diff --git a/go.mod b/go.mod index 71c5fdb0..ecf94bbb 100644 --- a/go.mod +++ b/go.mod @@ -1,22 +1,23 @@ module github.com/crc-org/macadam go 1.21 +toolchain go1.22.5 require ( github.com/containers/podman/v5 v5.1.1 github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 // indirect ) -require github.com/containers/common v0.59.1 +require github.com/containers/common v0.60.0 require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.3 // indirect + github.com/Microsoft/hcsshim v0.12.5 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -27,32 +28,32 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/cilium/ebpf v0.11.0 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.17 // indirect + github.com/containerd/containerd v1.7.18 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containers/buildah v1.36.0 // indirect github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 // indirect - github.com/containers/image/v5 v5.31.0 // indirect + github.com/containers/image/v5 v5.32.0 // indirect github.com/containers/libhvee v0.7.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.1.10 // indirect + github.com/containers/ocicrypt v1.2.0 // indirect github.com/containers/psgo v1.9.0 // indirect - github.com/containers/storage v1.54.0 // indirect + github.com/containers/storage v1.55.0 // indirect github.com/containers/winquit v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 // indirect github.com/crc-org/crc/v2 v2.36.0 // indirect github.com/crc-org/vfkit v0.5.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect - github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/cyphar/filepath-securejoin v0.3.1 // indirect github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v26.1.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -61,7 +62,7 @@ require ( github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-gonic/gin v1.9.1 // indirect - github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -83,7 +84,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-containerregistry v0.19.1 // indirect + github.com/google/go-containerregistry v0.20.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect @@ -92,11 +93,11 @@ require ( github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/leodido/go-urn v1.2.4 // indirect - github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect + github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect @@ -110,9 +111,9 @@ require ( github.com/moby/buildkit v0.12.5 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect - github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/user v0.2.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -120,7 +121,7 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.1.12 // indirect + github.com/opencontainers/runc v1.1.13 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect github.com/opencontainers/selinux v1.11.0 // indirect @@ -136,10 +137,10 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sigstore/fulcio v1.4.5 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.3 // indirect + github.com/sigstore/sigstore v1.8.4 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.16.0 // indirect + github.com/sylabs/sif/v2 v2.18.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect @@ -149,7 +150,7 @@ require ( github.com/ugorji/go/codec v1.2.12 // indirect github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.7.3 // indirect + github.com/vbauerster/mpb/v8 v8.7.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect @@ -159,18 +160,17 @@ require ( go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/net v0.25.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/term v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/grpc v1.62.1 // indirect + google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - tags.cncf.io/container-device-interface v0.7.2 // indirect + tags.cncf.io/container-device-interface v0.8.0 // indirect ) diff --git a/go.sum b/go.sum index 9534c605..313017e5 100644 --- a/go.sum +++ b/go.sum @@ -8,12 +8,12 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= -github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= +github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -55,8 +55,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A= -github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI= +github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= +github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -67,29 +67,29 @@ github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9Fqctt github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containers/buildah v1.36.0 h1:e369nE9bx0yJtPVRDMsbr0OzkW59XCYAl+5poGhFjcs= github.com/containers/buildah v1.36.0/go.mod h1:qlEF4RuCnzEUTQhAnCyGr5WoYNZaU0k2mPcZscUR//c= -github.com/containers/common v0.59.1 h1:7VkmJN3YvD0jLFwaUjLHSRJ98JLffydiyOJjYr0dUTo= -github.com/containers/common v0.59.1/go.mod h1:53VicJCZ2AD0O+Br7VVoyrS7viXF4YmwlTIocWUT8XE= +github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w= +github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8= github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 h1:EqWMZeFa08y2c1GniaFkfjlO5AjegoG2foWo6NlDfUY= github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363/go.mod h1:KN4qqZfwVBzvqlN1Ytbhf84sOzftw+R8YL9bixQlr2Y= -github.com/containers/image/v5 v5.31.0 h1:eDFVlz5XaYICxe9dXpf23htEKvyosgkl62mJlIATXE4= -github.com/containers/image/v5 v5.31.0/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q= +github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ= +github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc= github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4= github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic= -github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8= +github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= +github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= github.com/containers/podman/v5 v5.1.1 h1:Rm0BdJ2gyvf0atynwHsBdHX7NVmnHSAZnyQM6bMLDww= github.com/containers/podman/v5 v5.1.1/go.mod h1:AAzQ0cVMH8XymapWXCPbxBXah/oEn47dlT6hY4zFwtk= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I= -github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw= +github.com/containers/storage v1.55.0 h1:wTWZ3YpcQf1F+dSP4KxG9iqDfpQY1otaUXjPpffuhgg= +github.com/containers/storage v1.55.0/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09/go.mod h1:m2r/smMKsKwgMSAoFKHaa68ImdCSNuKE1MxvQ64xuCQ= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crc-org/crc/v2 v2.36.0 h1:ZgxVhBytVs6U1uyj21wto6IjgTVgQdL0kXKMpn1NGzQ= github.com/crc-org/crc/v2 v2.36.0/go.mod h1:u0CBlfUpGNrTJt2RGRrvzXqbjNHBjv7iIJklNNJ5SlI= github.com/crc-org/vfkit v0.5.1 h1:r1zNf1g1bLbgu5BgIQodirvYaIGWJQ91eS/PIgNO6lo= @@ -98,8 +98,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= -github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= +github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -112,14 +112,14 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc= -github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= +github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -144,8 +144,8 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= -github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= +github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -222,8 +222,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= -github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.0 h1:wRqHpOeVh3DnenOrPy9xDOLdnLatiGuuNRVelR2gSbg= +github.com/google/go-containerregistry v0.20.0/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -254,8 +254,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -268,8 +268,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo= -github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU= +github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I= +github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -299,12 +299,12 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM= +github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -317,16 +317,16 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.18.0 h1:W9Y7IWXxPUpAit9ieMOLI7PJZGaW22DTKgiVAuhDTLc= -github.com/onsi/ginkgo/v2 v2.18.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= -github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= @@ -355,8 +355,8 @@ github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZ github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -381,12 +381,12 @@ github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= -github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= +github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w= +github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= @@ -405,8 +405,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.16.0 h1:2eqaBaQQsn5DZTzm3QZm0HupZQEjNXfxRnCmtyCihEU= -github.com/sylabs/sif/v2 v2.16.0/go.mod h1:d5TxgD/mhMUU3kWLmZmWJQ99Wg0asaTP0bq3ezR1xpg= +github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI= +github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -425,8 +425,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0= -github.com/vbauerster/mpb/v8 v8.7.3/go.mod h1:9nFlNpDGVoTmQ4QvNjSLtwLmAFjwmq0XaAF26toHGNM= +github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo= +github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -436,7 +436,6 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= @@ -467,21 +466,17 @@ golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -491,13 +486,9 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -505,8 +496,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -519,12 +508,9 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -532,25 +518,16 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -562,10 +539,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -576,8 +551,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -585,8 +560,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -603,8 +578,6 @@ google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= -gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -618,5 +591,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -tags.cncf.io/container-device-interface v0.7.2 h1:MLqGnWfOr1wB7m08ieI4YJ3IoLKKozEnnNYBtacDPQU= -tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto= +tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc= +tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 3651cfa9..639e6c39 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -9,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; add it to your go.mod with: +This library requires Go 1.18 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 4d38f3bf..7aaf462c 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -6,7 +6,7 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" + "io/fs" "math" "os" "reflect" @@ -18,13 +18,13 @@ import ( // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { - UnmarshalTOML(interface{}) error + UnmarshalTOML(any) error } // Unmarshal decodes the contents of data in TOML format into a pointer v. // // See [Decoder] for a description of the decoding process. -func Unmarshal(data []byte, v interface{}) error { +func Unmarshal(data []byte, v any) error { _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } @@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error { // Decode the TOML data in to the pointer v. // // See [Decoder] for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { +func Decode(data string, v any) (MetaData, error) { return NewDecoder(strings.NewReader(data)).Decode(v) } // DecodeFile reads the contents of a file and decodes it with [Decode]. -func DecodeFile(path string, v interface{}) (MetaData, error) { +func DecodeFile(path string, v any) (MetaData, error) { fp, err := os.Open(path) if err != nil { return MetaData{}, err @@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { return NewDecoder(fp).Decode(v) } +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. @@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { // overhead of reflection. They can be useful when you don't know the exact type // of TOML data until runtime. type Primitive struct { - undecoded interface{} + undecoded any context Key } @@ -122,7 +133,7 @@ var ( ) // Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { +func (dec *Decoder) Decode(v any) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { s := "%q" @@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. rv = indirect(rv) rt := rv.Type() if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && @@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // TODO: parser should read from io.Reader? Or at the very least, make it // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) + data, err := io.ReadAll(dec.r) if err != nil { return MetaData{}, err } @@ -179,7 +190,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // will only reflect keys that were decoded. Namely, any keys hidden behind a // Primitive will be considered undecoded. Executing this method will update the // undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { md.context = primValue.context defer func() { md.context = nil }() return md.unify(primValue.undecoded, rvalue(v)) @@ -190,7 +201,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { +func (md *MetaData) unify(data any, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. if rv.Type() == primitiveType { @@ -207,7 +218,11 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { rvi := rv.Interface() if v, ok := rvi.(Unmarshaler); ok { - return v.UnmarshalTOML(data) + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) @@ -227,14 +242,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.unifyInt(data, rv) } switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil case reflect.Struct: return md.unifyStruct(data, rv) case reflect.Map: @@ -258,14 +265,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.e("unsupported type %s", rv.Kind()) } -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) if !ok { if mapping == nil { return nil } - return md.e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) } for key, datum := range tmap { @@ -304,14 +310,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { keyType := rv.Type().Key().Kind() if keyType != reflect.String && keyType != reflect.Interface { return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", keyType, rv.Type()) } - tmap, ok := mapping.(map[string]interface{}) + tmap, ok := mapping.(map[string]any) if !ok { if tmap == nil { return nil @@ -347,7 +353,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -361,7 +367,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.unifySliceArray(datav, rv) } -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -388,7 +394,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { return nil } -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyString(data any, rv reflect.Value) error { _, ok := rv.Interface().(json.Number) if ok { if i, ok := data.(int64); ok { @@ -408,7 +414,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { return md.badtype("string", data) } -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { rvk := rv.Kind() if num, ok := data.(float64); ok { @@ -429,7 +435,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { if num, ok := data.(int64); ok { if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) } rv.SetFloat(float64(num)) return nil @@ -438,7 +444,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { return md.badtype("float", data) } -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { _, ok := rv.Interface().(time.Duration) if ok { // Parse as string duration, and fall back to regular integer parsing @@ -481,7 +487,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { if b, ok := data.(bool); ok { rv.SetBool(b) return nil @@ -489,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { return md.badtype("boolean", data) } -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { rv.Set(reflect.ValueOf(data)) return nil } -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { case Marshaler: @@ -523,13 +529,13 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { - return err + return md.parseErr(err) } return nil } -func (md *MetaData) badtype(dst string, data interface{}) error { - return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) } func (md *MetaData) parseErr(err error) error { @@ -543,7 +549,7 @@ func (md *MetaData) parseErr(err error) error { } } -func (md *MetaData) e(format string, args ...interface{}) error { +func (md *MetaData) e(format string, args ...any) error { f := "toml: " if len(md.context) > 0 { f = fmt.Sprintf("toml: (last key %q): ", md.context) @@ -556,7 +562,7 @@ func (md *MetaData) e(format string, args ...interface{}) error { } // rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { +func rvalue(v any) reflect.Value { return indirect(reflect.ValueOf(v)) } @@ -600,3 +606,8 @@ func isUnifiable(rv reflect.Value) bool { } return false } + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index 086d0b68..00000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS reads the contents of a file from [fs.FS] and decodes it with -// [Decode]. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index b9e30971..155709a8 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -15,15 +15,15 @@ type TextMarshaler encoding.TextMarshaler // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + // PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). // // Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { +func PrimitiveDecode(primValue Primitive, v any) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } - -// DecodeReader is an alias for NewDecoder(r).Decode(v). -// -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index 81a7c0fe..82c90a90 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -2,9 +2,6 @@ // // This package supports TOML v1.0.0, as specified at https://toml.io // -// There is also support for delaying decoding with the Primitive type, and -// querying the set of keys in a TOML document with the MetaData type. -// // The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, // and can be used to verify if TOML document is valid. It can also be used to // print the type of each key. diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 9cd25d75..73366c0d 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,6 +2,7 @@ package toml import ( "bufio" + "bytes" "encoding" "encoding/json" "errors" @@ -76,6 +77,17 @@ type Marshaler interface { MarshalTOML() ([]byte, error) } +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + // Encoder encodes a Go to a TOML document. // // The mapping between Go values and TOML values should be precisely the same as @@ -115,26 +127,21 @@ type Marshaler interface { // NOTE: only exported keys are encoded due to the use of reflection. Unexported // keys are silently discarded. type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? w *bufio.Writer - hasWritten bool // written any output to w yet? } // NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } + return &Encoder{w: bufio.NewWriter(w), Indent: " "} } // Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // // An error is returned if the value given cannot be encoded to a valid TOML // document. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { rv := eindirect(reflect.ValueOf(v)) err := enc.safeEncode(Key([]string{}), rv) if err != nil { @@ -280,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Float32: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) } @@ -304,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) } } @@ -712,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { } } -func (enc *Encoder) wf(format string, v ...interface{}) { +func (enc *Encoder) wf(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index efd68865..b45a3f45 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -114,13 +114,22 @@ func (pe ParseError) ErrorWithPosition() string { msg, pe.Position.Line, col, col+pe.Position.Len) } if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) } if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -159,17 +168,47 @@ func (pe ParseError) column(lines []string) int { return col } +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + type ( errLexControl struct{ r rune } errLexEscape struct{ r rune } errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } + errParseDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} errParseRange struct { - i interface{} // int or float - size string // "int64", "uint16", etc. + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" } errParseDuration struct{ d string } ) @@ -183,18 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape func (e errLexEscape) Usage() string { return usageEscape } func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } func (e errParseRange) Usage() string { return usageIntOverflow } -func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } -func (e errParseDuration) Usage() string { return usageDuration } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -251,19 +292,35 @@ bug in the program that uses too small of an integer. The maximum and minimum values are: size │ lowest │ highest - ───────┼────────────────┼────────── + ───────┼────────────────┼────────────── int8 │ -128 │ 127 int16 │ -32,768 │ 32,767 int32 │ -2,147,483,648 │ 2,147,483,647 int64 │ -9.2 × 10¹ⷠ│ 9.2 × 10¹ⷠuint8 │ 0 │ 255 - uint16 │ 0 │ 65535 - uint32 │ 0 │ 4294967295 + uint16 │ 0 │ 65,535 + uint32 │ 0 │ 4,294,967,295 uint64 │ 0 │ 1.8 × 10¹⸠int refers to int32 on 32-bit systems and int64 on 64-bit systems. ` +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + const usageDuration = ` A duration must be as "number", without any spaces. Valid units are: @@ -277,3 +334,23 @@ A duration must be as "number", without any spaces. Valid units are: You can combine multiple units; for example "5m10s" for 5 minutes and 10 seconds. ` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 3545a6ad..a1016d98 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -17,6 +17,7 @@ const ( itemEOF itemText itemString + itemStringEsc itemRawString itemMultilineString itemRawMultilineString @@ -53,6 +54,7 @@ type lexer struct { state stateFn items chan item tomlNext bool + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -164,7 +166,7 @@ func (lx *lexer) next() (r rune) { } r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { + if r == utf8.RuneError && w == 1 { lx.error(errLexUTF8{lx.input[lx.pos]}) return utf8.RuneError } @@ -270,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { } // errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { +func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() pos.Line-- @@ -333,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -698,7 +698,12 @@ func lexString(lx *lexer) stateFn { return lexStringEscape case r == '"': lx.backup() - lx.emit(itemString) + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } lx.next() lx.ignore() return lx.pop() @@ -748,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn { lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() + lx.esc = false lx.emit(itemMultilineString) lx.next() /// Read over ''' again and discard it. lx.next() @@ -837,6 +843,7 @@ func lexMultilineStringEscape(lx *lexer) stateFn { } func lexStringEscape(lx *lexer) stateFn { + lx.esc = true r := lx.next() switch r { case 'e': @@ -879,10 +886,8 @@ func lexHexEscape(lx *lexer) stateFn { var r rune for i := 0; i < 2; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected two hexadecimal digits after '\x', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) } } return lx.pop() @@ -892,10 +897,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) } } return lx.pop() @@ -905,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 8; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) } } return lx.pop() @@ -975,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn { // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if isHexadecimal(r) { + if isHex(r) { return lexHexInteger } switch r { @@ -1109,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { return lexOctalInteger case 'x': r = lx.peek() - if !isHexadecimal(r) { + if !isHex(r) { lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) } return lexHexInteger @@ -1207,7 +1208,7 @@ func (itype itemType) String() string { return "EOF" case itemText: return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: return "String" case itemBool: return "Bool" @@ -1240,7 +1241,7 @@ func (itype itemType) String() string { } func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) + return fmt.Sprintf("(%s, %s)", item.typ, item.val) } func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } @@ -1256,10 +1257,7 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} - +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { if tomlNext { return (r >= 'A' && r <= 'Z') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 2e78b24e..e6145373 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -13,7 +13,7 @@ type MetaData struct { context Key // Used only during decoding. keyInfo map[string]keyInfo - mapping map[string]interface{} + mapping map[string]any keys []Key decoded map[string]struct{} data []byte // Input file; for errors. @@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool { } var ( - hash map[string]interface{} + hash map[string]any ok bool - hashOrVal interface{} = md.mapping + hashOrVal any = md.mapping ) for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { + if hash, ok = hashOrVal.(map[string]any); !ok { return false } if hashOrVal, ok = hash[k]; !ok { @@ -94,28 +94,55 @@ func (md *MetaData) Undecoded() []Key { type Key []string func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } } - return strings.Join(ss, ".") + return b.String() } func (k Key) maybeQuoted(i int) string { if k[i] == "" { return `""` } - for _, c := range k[i] { - if !isBareKeyChar(c, false) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } return k[i] } +// Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { + if cap(k) > len(k) { + return append(k, piece) + } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece return newKey } + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 9c191536..11ac3108 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "math" "os" "strconv" "strings" @@ -20,9 +21,9 @@ type parser struct { ordered []Key // List of keys in the order that they appear in the TOML data. - keyInfo map[string]keyInfo // Map keyname → info about the TOML key. - mapping map[string]interface{} // Map keyname → key value. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]any // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } type keyInfo struct { @@ -49,6 +50,7 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -71,7 +73,7 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), - mapping: make(map[string]interface{}), + mapping: make(map[string]any), lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), @@ -97,7 +99,7 @@ func (p *parser) panicErr(it item, err error) { }) } -func (p *parser) panicItemf(it item, format string, v ...interface{}) { +func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: it.pos, @@ -106,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) { }) } -func (p *parser) panicf(format string, v ...interface{}) { +func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: p.pos, @@ -139,7 +141,7 @@ func (p *parser) nextPos() item { return it } -func (p *parser) bug(format string, v ...interface{}) { +func (p *parser) bug(format string, v ...any) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -194,11 +196,11 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } @@ -207,7 +209,8 @@ func (p *parser) topLevel(item item) { /// Set value. vItem := p.next() val, typ := p.value(vItem, false) - p.set(p.currentKey, val, typ, vItem.pos) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -222,7 +225,7 @@ func (p *parser) keyString(it item) string { switch it.typ { case itemText: return it.val - case itemString, itemMultilineString, + case itemString, itemStringEsc, itemMultilineString, itemRawString, itemRawMultilineString: s, _ := p.value(it, false) return s.(string) @@ -239,9 +242,11 @@ var datetimeRepl = strings.NewReplacer( // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { switch it.typ { case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) @@ -274,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { panic("unreachable") } -func (p *parser) valueInteger(it item) (interface{}, tomlType) { +func (p *parser) valueInteger(it item) (any, tomlType) { if !numUnderscoresOK(it.val) { p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) } @@ -298,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { return num, p.typeOfPrimitive(it) } -func (p *parser) valueFloat(it item) (interface{}, tomlType) { +func (p *parser) valueFloat(it item) (any, tomlType) { parts := strings.FieldsFunc(it.val, func(r rune) bool { switch r { case '.', 'e', 'E': @@ -322,7 +327,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) } val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" val = "nan" } num, err := strconv.ParseFloat(val, 64) @@ -333,6 +340,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float value: %q", it.val) } } + if signbit { + num = math.Copysign(num, -1) + } return num, p.typeOfPrimitive(it) } @@ -352,7 +362,7 @@ var dtTypes = []struct { {"15:04", internal.LocalTime, true}, } -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { +func (p *parser) valueDatetime(it item) (any, tomlType) { it.val = datetimeRepl.Replace(it.val) var ( t time.Time @@ -365,26 +375,44 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } ok = true break } } if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + p.panicErr(it, errParseDate{it.val}) } return t, p.typeOfPrimitive(it) } -func (p *parser) valueArray(it item) (interface{}, tomlType) { +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { p.setType(p.currentKey, tomlArray, it.pos) var ( - types []tomlType - - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) ) for it = p.next(); it.typ != itemArrayEnd; it = p.next() { if it.typ == itemCommentStart { @@ -394,21 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { val, typ := p.value(it, true) array = append(array, val) - types = append(types, typ) - // XXX: types isn't used here, we need it to record the accurate type + // XXX: type isn't used here, we need it to record the accurate type // information. // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? - _ = types + _ = typ } return array, tomlArray } -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { var ( - hash = make(map[string]interface{}) + topHash = make(map[string]any) outerContext = p.context outerKey = p.currentKey ) @@ -436,11 +463,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } @@ -448,7 +475,21 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ, it.pos) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } hash[p.currentKey] = val /// Restore context. @@ -456,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom } p.context = outerContext p.currentKey = outerKey - return hash, tomlHash + return topHash, tomlHash } // numHasLeadingZero checks if this number has leading zeroes, allowing for '0', @@ -486,9 +527,9 @@ func numUnderscoresOK(s string) bool { } } - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) + // isHexis a superset of all the permissable characters surrounding an + // underscore. + accept = isHex(r) } return accept } @@ -511,21 +552,19 @@ func numPeriodsOK(s string) bool { // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. + /// Always start at the top level and drill down for our context. hashContext := p.mapping - keyContext := make(Key, 0) + keyContext := make(Key, 0, len(key)-1) - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] keyContext = append(keyContext, k) // No key? Make an implicit hash and move on. if !ok { p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) + hashContext[k] = make(map[string]any) } // If the hash context is actually an array of tables, then set @@ -534,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) { // Otherwise, it better be a table, since this MUST be a key group (by // virtue of it not being the last element in a key). switch t := hashContext[k].(type) { - case []map[string]interface{}: + case []map[string]any: hashContext = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hashContext = t default: p.panicf("Key '%s' was already created as a hash.", keyContext) @@ -547,39 +586,33 @@ func (p *parser) addContext(key Key, array bool) { if array { // If this is the first element for this array, then allocate a new // list of tables for it. - k := key[len(key)-1] + k := key.last() if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) + hashContext[k] = make([]map[string]any, 0, 4) } // Add a new table. But make sure the key hasn't already been used // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) } else { p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) + p.setValue(key.last(), make(map[string]any)) } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { - p.setValue(key, val) - p.setType(key, typ, pos) + p.context = append(p.context, key.last()) } // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. -func (p *parser) setValue(key string, value interface{}) { +func (p *parser) setValue(key string, value any) { var ( - tmpHash interface{} + tmpHash any ok bool hash = p.mapping - keyContext Key + keyContext = make(Key, 0, len(p.context)+1) ) for _, k := range p.context { keyContext = append(keyContext, k) @@ -587,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) { p.bug("Context for key '%s' has not been established.", keyContext) } switch t := tmpHash.(type) { - case []map[string]interface{}: + case []map[string]any: // The context is a table of hashes. Pick the most recent table // defined as the current hash. hash = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hash = t default: p.panicf("Key '%s' has already been defined.", keyContext) @@ -618,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) { p.removeImplicit(keyContext) return } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } @@ -683,8 +715,11 @@ func stripFirstNewline(s string) string { // the next newline. After a line-ending backslash, all whitespace is removed // until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - var b strings.Builder - var i int + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) for { ix := strings.Index(s[i:], `\`) if ix < 0 { @@ -714,9 +749,8 @@ func (p *parser) stripEscapedNewlines(s string) string { continue } if !strings.Contains(s[i:j], "\n") { - // This is not a line-ending backslash. - // (It's a bad escape sequence, but we can let - // replaceEscapes catch it.) + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) i++ continue } @@ -727,79 +761,78 @@ func (p *parser) stripEscapedNewlines(s string) string { } func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- continue } - r += 1 - if r >= len(s) { + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { p.bug("Escape sequence at end of string.") return "" } - switch s[r] { + switch str[i+1] { default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 + b.WriteByte(0x08) + skip = 1 case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 + b.WriteByte(0x09) + skip = 1 case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 + b.WriteByte(0x0a) + skip = 1 case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 + b.WriteByte(0x0c) + skip = 1 case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 + b.WriteByte(0x0d) + skip = 1 case 'e': if p.tomlNext { - replaced = append(replaced, rune(0x001B)) - r += 1 + b.WriteByte(0x1b) + skip = 1 } case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 + b.WriteByte(0x22) + skip = 1 case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. case 'x': if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) - replaced = append(replaced, escaped) - r += 3 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 } case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 } } - return string(replaced) + return b.String() } -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 254ca82e..10c51f7e 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -25,10 +25,8 @@ type field struct { // breaking ties with index sequence. type byName []field -func (x byName) Len() int { return len(x) } - +func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name @@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool { // byIndex sorts field by index sequence. type byIndex []field -func (x byIndex) Len() int { return len(x) } - +func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go index 4e90d773..1c090d33 100644 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool { type tomlBaseType string -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" @@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlFloat case itemDatetime: return tomlDatetime - case itemString: + case itemString, itemStringEsc: return tomlString case itemMultilineString: return tomlString diff --git a/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go new file mode 100644 index 00000000..86c7c22a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go @@ -0,0 +1,46 @@ +//go:build windows + +package hcsshim + +import ( + "errors" + + "github.com/Microsoft/hcsshim/internal/hns" +) + +// HNSNnvManagementMacAddress represents management mac address +// which needs to be excluded from VF reassignment +type HNSNnvManagementMacAddress = hns.HNSNnvManagementMacAddress + +// HNSNnvManagementMacList represents a list of management +// mac addresses for exclusion from VF reassignment +type HNSNnvManagementMacList = hns.HNSNnvManagementMacList + +var ( + ErrorEmptyMacAddressList = errors.New("management mac_address list is empty") +) + +// SetNnvManagementMacAddresses sets a list of +// management mac addresses in hns for exclusion from VF reassignment. +func SetNnvManagementMacAddresses(managementMacAddresses []string) (*HNSNnvManagementMacList, error) { + if len(managementMacAddresses) == 0 { + return nil, ErrorEmptyMacAddressList + } + nnvManagementMacList := &HNSNnvManagementMacList{} + for _, mac := range managementMacAddresses { + nnvManagementMacList.MacAddressList = append(nnvManagementMacList.MacAddressList, HNSNnvManagementMacAddress{MacAddress: mac}) + } + return nnvManagementMacList.Set() +} + +// GetNnvManagementMacAddresses retrieves a list of +// management mac addresses in hns for exclusion from VF reassignment. +func GetNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) { + return hns.GetNnvManagementMacAddressList() +} + +// DeleteNnvManagementMacAddresses delete list of +// management mac addresses in hns which are excluded from VF reassignment. +func DeleteNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) { + return hns.DeleteNnvManagementMacAddressList() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go new file mode 100644 index 00000000..82ca5bae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go @@ -0,0 +1,60 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// HNSNnvManagementMacAddress represents management mac address +// which needs to be excluded from VF reassignment +type HNSNnvManagementMacAddress struct { + MacAddress string `json:",omitempty"` +} + +// HNSNnvManagementMacList represents a list of management +// mac addresses for exclusion from VF reassignment +type HNSNnvManagementMacList struct { + MacAddressList []HNSNnvManagementMacAddress `json:",omitempty"` +} + +// HNSNnvManagementMacRequest makes a HNS call to modify/query NnvManagementMacList +func HNSNnvManagementMacRequest(method, path, request string) (*HNSNnvManagementMacList, error) { + nnvManagementMacList := &HNSNnvManagementMacList{} + err := hnsCall(method, "/accelnet/"+path, request, &nnvManagementMacList) + if err != nil { + return nil, err + } + return nnvManagementMacList, nil +} + +// Set ManagementMacAddressList by sending "POST" NnvManagementMacRequest to HNS. +func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMacList, error) { + operation := "Set" + title := "hcsshim::nnvManagementMacList::" + operation + logrus.Debugf(title+" id=%s", nnvManagementMacList.MacAddressList) + + jsonString, err := json.Marshal(nnvManagementMacList) + if err != nil { + return nil, err + } + return HNSNnvManagementMacRequest("POST", "", string(jsonString)) +} + +// Get ManagementMacAddressList by sending "GET" NnvManagementMacRequest to HNS. +func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { + operation := "Get" + title := "hcsshim::nnvManagementMacList::" + operation + logrus.Debugf(title) + return HNSNnvManagementMacRequest("GET", "", "") +} + +// Delete ManagementMacAddressList by sending "DELETE" NnvManagementMacRequest to HNS. +func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { + operation := "Delete" + title := "hcsshim::nnvManagementMacList::" + operation + logrus.Debugf(title) + return HNSNnvManagementMacRequest("DELETE", "", "") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 59366441..6238e103 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -10,6 +10,28 @@ import ( "github.com/sirupsen/logrus" ) +// EndpointState represents the states of an HNS Endpoint lifecycle. +type EndpointState uint16 + +// EndpointState const +// The lifecycle of an Endpoint goes through created, attached, AttachedSharing - endpoint is being shared with other containers, +// detached, after being attached, degraded and finally destroyed. +// Note: This attribute is used by calico to define stale containers and is dependent on HNS v1 api, if we move to HNS v2 api we will need +// to update the current calico code and cordinate the change with calico. Reach out to Microsoft to facilate the change via HNS. +const ( + Uninitialized EndpointState = iota + Created EndpointState = 1 + Attached EndpointState = 2 + AttachedSharing EndpointState = 3 + Detached EndpointState = 4 + Degraded EndpointState = 5 + Destroyed EndpointState = 6 +) + +func (es EndpointState) String() string { + return [...]string{"Uninitialized", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es] +} + // HNSEndpoint represents a network endpoint in HNS type HNSEndpoint struct { Id string `json:"ID,omitempty"` @@ -34,6 +56,7 @@ type HNSEndpoint struct { Namespace *Namespace `json:",omitempty"` EncapOverhead uint16 `json:",omitempty"` SharedContainers []string `json:",omitempty"` + State EndpointState `json:",omitempty"` } // SystemType represents the type of the system on which actions are done diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 082c018a..e97e4f63 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -57,9 +57,10 @@ type PaPolicy struct { type OutboundNatPolicy struct { Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` - Destinations []string `json:",omitempty"` + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` + Destinations []string `json:",omitempty"` + MaxPortPoolUsage uint16 `json:",omitempty"` } type ProxyPolicy struct { diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index 87622559..00000000 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "context" - "errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 7a9b33e0..00000000 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = fmt.Errorf("%s: %w", msg, cls) - } else { - err = cls - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go deleted file mode 100644 index 9e9e8b49..00000000 --- a/vendor/github.com/containerd/containerd/log/context_deprecated.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/containerd/log" -) - -// G is a shorthand for [GetLogger]. -// -// Deprecated: use [log.G]. -var G = log.G - -// L is an alias for the standard logger. -// -// Deprecated: use [log.L]. -var L = log.L - -// Fields type to pass to "WithFields". -// -// Deprecated: use [log.Fields]. -type Fields = log.Fields - -// Entry is a logging entry. -// -// Deprecated: use [log.Entry]. -type Entry = log.Entry - -// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using -// zeros to ensure the formatted time is always the same number of -// characters. -// -// Deprecated: use [log.RFC3339NanoFixed]. -const RFC3339NanoFixed = log.RFC3339NanoFixed - -// Level is a logging level. -// -// Deprecated: use [log.Level]. -type Level = log.Level - -// Supported log levels. -const ( - // TraceLevel level. - // - // Deprecated: use [log.TraceLevel]. - TraceLevel Level = log.TraceLevel - - // DebugLevel level. - // - // Deprecated: use [log.DebugLevel]. - DebugLevel Level = log.DebugLevel - - // InfoLevel level. - // - // Deprecated: use [log.InfoLevel]. - InfoLevel Level = log.InfoLevel - - // WarnLevel level. - // - // Deprecated: use [log.WarnLevel]. - WarnLevel Level = log.WarnLevel - - // ErrorLevel level - // - // Deprecated: use [log.ErrorLevel]. - ErrorLevel Level = log.ErrorLevel - - // FatalLevel level. - // - // Deprecated: use [log.FatalLevel]. - FatalLevel Level = log.FatalLevel - - // PanicLevel level. - // - // Deprecated: use [log.PanicLevel]. - PanicLevel Level = log.PanicLevel -) - -// SetLevel sets log level globally. It returns an error if the given -// level is not supported. -// -// Deprecated: use [log.SetLevel]. -func SetLevel(level string) error { - return log.SetLevel(level) -} - -// GetLevel returns the current log level. -// -// Deprecated: use [log.GetLevel]. -func GetLevel() log.Level { - return log.GetLevel() -} - -// OutputFormat specifies a log output format. -// -// Deprecated: use [log.OutputFormat]. -type OutputFormat = log.OutputFormat - -// Supported log output formats. -const ( - // TextFormat represents the text logging format. - // - // Deprecated: use [log.TextFormat]. - TextFormat log.OutputFormat = "text" - - // JSONFormat represents the JSON logging format. - // - // Deprecated: use [log.JSONFormat]. - JSONFormat log.OutputFormat = "json" -) - -// SetFormat sets the log output format. -// -// Deprecated: use [log.SetFormat]. -func SetFormat(format OutputFormat) error { - return log.SetFormat(format) -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -// -// Deprecated: use [log.WithLogger]. -func WithLogger(ctx context.Context, logger *log.Entry) context.Context { - return log.WithLogger(ctx, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -// -// Deprecated: use [log.GetLogger]. -func GetLogger(ctx context.Context) *log.Entry { - return log.GetLogger(ctx) -} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 8c600fc9..91f50e8c 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -20,7 +20,7 @@ import ( "runtime" "sync" - "github.com/containerd/containerd/log" + "github.com/containerd/log" ) // Present the ARM instruction set architecture, eg: v7, v8 diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go index 722d86c3..e07aa99c 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go @@ -24,7 +24,7 @@ import ( "runtime" "strings" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go index fa5f19c4..8cbcbb24 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go @@ -22,7 +22,7 @@ import ( "fmt" "runtime" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" ) func getCPUVariant() (string, error) { diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 56613b07..44bc24a5 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -116,7 +116,7 @@ import ( specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" ) var ( diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index ebd76b38..1f4f925f 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -12,7 +12,6 @@ import ( "strings" "time" - "github.com/containers/common/libimage/manifests" "github.com/containers/common/libimage/platform" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" @@ -32,12 +31,6 @@ const ( defaultRetryDelay = time.Second ) -// LookupReferenceFunc return an image reference based on the specified one. -// The returned reference can return custom ImageSource or ImageDestination -// objects which intercept or filter blobs, manifests, and signatures as -// they are read and written. -type LookupReferenceFunc = manifests.LookupReferenceFunc - // CopyOptions allow for customizing image-copy operations. type CopyOptions struct { // If set, will be used for copying the image. Fields below may diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index f0cf2e5b..2465d370 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -88,6 +88,8 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp return tree, nil } + filterInvalidValue := `invalid image filter %q: must be in the format "filter=value or filter!=value"` + var wantedReferenceMatches, unwantedReferenceMatches []string filters := map[string][]filterFunc{} duplicate := map[string]string{} @@ -101,7 +103,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp } else { split = strings.SplitN(f, "=", 2) if len(split) != 2 { - return nil, fmt.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") + return nil, fmt.Errorf(filterInvalidValue, f) } } @@ -195,7 +197,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterBefore(until) default: - return nil, fmt.Errorf("unsupported image filter %q", key) + return nil, fmt.Errorf(filterInvalidValue, key) } if negate { filter = negateFilter(filter) diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 66829c46..9af120e2 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -135,7 +135,7 @@ func (r *Runtime) layerTree(ctx context.Context, images []*Image) (*layerTree, e // mistake. Users may not be able to recover, so we're now // throwing a warning to guide them to resolve the issue and // turn the errors non-fatal. - logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID()) + logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", topLayer, img.ID()) continue } node.images = append(node.images, img) @@ -234,7 +234,7 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I // mistake. Users may not be able to recover, so we're now // throwing a warning to guide them to resolve the issue and // turn the errors non-fatal. - logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer()) + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", parent.TopLayer()) return children, nil } @@ -336,7 +336,7 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { // mistake. Users may not be able to recover, so we're now // throwing a warning to guide them to resolve the issue and // turn the errors non-fatal. - logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", child.TopLayer()) return nil, nil } diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index e52d5469..5093ea3e 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "maps" "slices" "time" @@ -20,7 +21,6 @@ import ( structcopier "github.com/jinzhu/copier" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" ) // NOTE: the abstractions and APIs here are a first step to further merge @@ -238,9 +238,7 @@ func (m *ManifestList) Inspect() (*define.ManifestListData, error) { for i, manifest := range ociFormat.Manifests { inspectList.Manifests[i].Annotations = manifest.Annotations inspectList.Manifests[i].ArtifactType = manifest.ArtifactType - if manifest.URLs != nil { - inspectList.Manifests[i].URLs = slices.Clone(manifest.URLs) - } + inspectList.Manifests[i].URLs = slices.Clone(manifest.URLs) inspectList.Manifests[i].Data = manifest.Data inspectList.Manifests[i].Files, err = m.list.Files(manifest.Digest) if err != nil { @@ -252,10 +250,7 @@ func (m *ManifestList) Inspect() (*define.ManifestListData, error) { if platform == nil { platform = &imgspecv1.Platform{} } - var osFeatures []string - if platform.OSFeatures != nil { - osFeatures = slices.Clone(platform.OSFeatures) - } + osFeatures := slices.Clone(platform.OSFeatures) inspectList.Subject = &define.ManifestListDescriptor{ Platform: manifest.Schema2PlatformSpec{ OS: platform.OS, @@ -483,23 +478,23 @@ func (m *ManifestList) AddArtifact(ctx context.Context, options *ManifestListAdd // Options for annotating a manifest list. type ManifestListAnnotateOptions struct { - // Add the specified annotations to the added image. + // Add the specified annotations to the added image. Empty values are ignored. Annotations map[string]string - // Add the specified architecture to the added image. + // Add the specified architecture to the added image. Empty values are ignored. Architecture string - // Add the specified features to the added image. + // Add the specified features to the added image. Empty values are ignored. Features []string - // Add the specified OS to the added image. + // Add the specified OS to the added image. Empty values are ignored. OS string - // Add the specified OS features to the added image. + // Add the specified OS features to the added image. Empty values are ignored. OSFeatures []string - // Add the specified OS version to the added image. + // Add the specified OS version to the added image. Empty values are ignored. OSVersion string - // Add the specified variant to the added image. + // Add the specified variant to the added image. Empty values are ignored unless Architecture is set to a non-empty value. Variant string - // Add the specified annotations to the index itself. + // Add the specified annotations to the index itself. Empty values are ignored. IndexAnnotations map[string]string - // Set the subject to which the index refers. + // Set the subject to which the index refers. Empty values are ignored. Subject string } @@ -536,7 +531,7 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn return err } } - if len(options.Variant) > 0 { + if len(options.Architecture) != 0 || len(options.Variant) > 0 { if err := m.list.SetVariant(d, options.Variant); err != nil { return err } diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 2213e735..a1625bb1 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "maps" "mime" "net/http" "os" @@ -40,7 +41,6 @@ import ( imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) const ( @@ -236,11 +236,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, // Files returns the list of files associated with a particular artifact // instance in the image index, primarily for display purposes. func (l *list) Files(instanceDigest digest.Digest) ([]string, error) { - filesList, ok := l.artifacts.Files[instanceDigest] - if ok { - return slices.Clone(filesList), nil - } - return nil, nil + return slices.Clone(l.artifacts.Files[instanceDigest]), nil } // instanceByFile returns the instanceDigest of the first manifest in the index @@ -640,9 +636,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if instanceInfo.OS == "" { instanceInfo.OS = config.OS instanceInfo.OSVersion = config.OSVersion - if config.OSFeatures != nil { - instanceInfo.OSFeatures = slices.Clone(config.OSFeatures) - } + instanceInfo.OSFeatures = slices.Clone(config.OSFeatures) } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture @@ -906,9 +900,7 @@ func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, option Subject: subject, } // Add in annotations, more or less exactly as specified. - if options.Annotations != nil { - artifactManifest.Annotations = maps.Clone(options.Annotations) - } + artifactManifest.Annotations = maps.Clone(options.Annotations) // Encode and save the data we care about. artifactManifestBytes, err := json.Marshal(artifactManifest) diff --git a/vendor/github.com/containers/common/libimage/types.go b/vendor/github.com/containers/common/libimage/types.go new file mode 100644 index 00000000..348375d8 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/types.go @@ -0,0 +1,9 @@ +package libimage + +import "github.com/containers/common/libimage/manifests" + +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_bsd.go similarity index 93% rename from vendor/github.com/containers/common/pkg/config/config_freebsd.go rename to vendor/github.com/containers/common/pkg/config/config_bsd.go index 5b7f55a7..e378a072 100644 --- a/vendor/github.com/containers/common/pkg/config/config_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_bsd.go @@ -1,3 +1,5 @@ +//go:build (freebsd || netbsd || openbsd) + package config const ( diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_bsd.go similarity index 95% rename from vendor/github.com/containers/common/pkg/config/default_freebsd.go rename to vendor/github.com/containers/common/pkg/config/default_bsd.go index 1110edd0..c619e178 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_bsd.go @@ -1,3 +1,5 @@ +//go:build (freebsd || netbsd || openbsd) + package config // DefaultInitPath is the default path to the container-init binary. diff --git a/vendor/github.com/containers/common/pkg/config/default_common.go b/vendor/github.com/containers/common/pkg/config/default_common.go index 2caa3f01..ea5ded36 100644 --- a/vendor/github.com/containers/common/pkg/config/default_common.go +++ b/vendor/github.com/containers/common/pkg/config/default_common.go @@ -1,4 +1,4 @@ -//go:build !freebsd +//go:build !freebsd && !netbsd package config diff --git a/vendor/github.com/containers/common/pkg/password/password_supported.go b/vendor/github.com/containers/common/pkg/password/password_supported.go index 4761b3ff..52a56b63 100644 --- a/vendor/github.com/containers/common/pkg/password/password_supported.go +++ b/vendor/github.com/containers/common/pkg/password/password_supported.go @@ -1,4 +1,4 @@ -//go:build linux || darwin || freebsd +//go:build linux || darwin || freebsd || netbsd package password diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 5484a318..aec8361b 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.59.1" +const Version = "0.60.0" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 52556304..318450a3 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -73,7 +73,7 @@ type bpCompressionStepData struct { operation bpcOperation // What we are actually doing uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. - uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. srcCompressorName string // Compressor name to record in the blob info cache for the source blob. uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. closers []io.Closer // Objects to close after the upload is done, if any. @@ -323,7 +323,11 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } } - if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { + if d.srcCompressorName == "" || d.uploadedCompressorName == "" { + return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)", + d.srcCompressorName, d.uploadedCompressorName) + } + if d.uploadedCompressorName != internalblobinfocache.UnknownCompression { if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { // HACK: Don’t record zstd:chunked algorithms. // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, @@ -337,7 +341,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf } } if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && - d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + d.srcCompressorName != internalblobinfocache.UnknownCompression { if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { // HACK: Don’t record zstd:chunked algorithms, see above. c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 9d544f56..17cc8c83 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -409,7 +409,6 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { srcInfos := ic.src.LayerInfos() - numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { return nil, err @@ -440,7 +439,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor // copyGroup is used to determine if all layers are copied copyGroup := sync.WaitGroup{} - data := make([]copyLayerData, numLayers) + data := make([]copyLayerData, len(srcInfos)) copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { defer ic.c.concurrentBlobCopiesSemaphore.Release(1) defer copyGroup.Done() @@ -463,9 +462,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor // Decide which layers to encrypt layersToEncrypt := set.New[int]() - var encryptAll bool if ic.c.options.OciEncryptLayers != nil { - encryptAll = len(*ic.c.options.OciEncryptLayers) == 0 totalLayers := len(srcInfos) for _, l := range *ic.c.options.OciEncryptLayers { switch { @@ -478,7 +475,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor } } - if encryptAll { + if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers†for i := 0; i < len(srcInfos); i++ { layersToEncrypt.Add(i) } @@ -493,8 +490,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor defer copyGroup.Wait() for i, srcLayer := range srcInfos { - err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) - if err != nil { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. return fmt.Errorf("copying layer: %w", err) } @@ -509,8 +505,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor } compressionAlgos := set.New[string]() - destInfos := make([]types.BlobInfo, numLayers) - diffIDs := make([]digest.Digest, numLayers) + destInfos := make([]types.BlobInfo, len(srcInfos)) + diffIDs := make([]digest.Digest, len(srcInfos)) for i, cld := range data { if cld.err != nil { return nil, cld.err diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 94cbcb1d..97d97fed 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -86,11 +86,9 @@ type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } +// bearerToken records a cached token we can use to authenticate. type bearerToken struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` + token string expirationTime time.Time } @@ -147,25 +145,6 @@ const ( noAuth ) -func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { - token := new(bearerToken) - if err := json.Unmarshal(blob, &token); err != nil { - return nil, err - } - if token.Token == "" { - token.Token = token.AccessToken - } - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return token, nil -} - // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { @@ -774,7 +753,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope token = *t c.tokenCache.Store(cacheKey, token) } - registryToken = token.Token + registryToken = token.token } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) return nil @@ -827,12 +806,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall return nil, err } - tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) - if err != nil { - return nil, err - } - - return newBearerTokenFromJSONBlob(tokenBlob) + return newBearerTokenFromHTTPResponseBody(res) } func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, @@ -878,12 +852,50 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, if err := httpResponseToError(res, "Requesting bearer token"); err != nil { return nil, err } - tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + + return newBearerTokenFromHTTPResponseBody(res) +} + +// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken. +// The caller is still responsible for ensuring res.Body is closed. +func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) { + blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) if err != nil { return nil, err } - return newBearerTokenFromJSONBlob(tokenBlob) + var token struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time + } + if err := json.Unmarshal(blob, &token); err != nil { + const bodySampleLength = 50 + bodySample := blob + if len(bodySample) > bodySampleLength { + bodySample = bodySample[:bodySampleLength] + } + return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err) + } + + bt := &bearerToken{ + token: token.Token, + } + if bt.token == "" { + bt.token = token.AccessToken + } + + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return bt, nil } // detectPropertiesHelper performs the work of detectProperties which executes diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 4c80bb2b..9741afc3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) // Image is a Docker-specific implementation of types.ImageCloser with a few extra methods @@ -90,6 +91,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. } for _, tag := range tagsHolder.Tags { if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values + // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, + // contrary to the tag format specified in + // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , + // include digests in the list. + if _, err := digest.Parse(tag); err == nil { + logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag) + continue + } return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err) } tags = append(tags, tag) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 7f59ea3f..7f7a74bd 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -361,8 +361,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } - } - if !candidate.UnknownLocation { if candidate.CompressionAlgorithm != nil { logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name()) } else { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index a2b6dbed..c8f6ba30 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -1,7 +1,9 @@ package docker import ( + "bytes" "context" + "encoding/json" "errors" "fmt" "io" @@ -11,6 +13,7 @@ import ( "net/http" "net/url" "os" + "os/exec" "strings" "sync" @@ -162,6 +165,34 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica client.Close() return nil, err } + + if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" { + acf := map[string]struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + IdentityToken string `json:"identityToken,omitempty"` + }{ + physicalRef.ref.String(): { + Username: client.auth.Username, + Password: client.auth.Password, + IdentityToken: client.auth.IdentityToken, + }, + } + acfD, err := json.Marshal(acf) + if err != nil { + logrus.Warnf("failed to marshal auth config: %v", err) + } else { + cmd := exec.Command(h) + cmd.Stdin = bytes.NewReader(acfD) + if err := cmd.Run(); err != nil { + var stderr string + if ee, ok := err.(*exec.ExitError); ok { + stderr = string(ee.Stderr) + } + logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err) + } + } + } return s, nil } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 67b4cfeb..fe78efae 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -1,6 +1,7 @@ package manifest import ( + "bytes" "encoding/json" "fmt" "maps" @@ -296,29 +297,51 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation }, } for i, component := range components { - var platform *imgspecv1.Platform - if component.Platform != nil { - platformCopy := ociPlatformClone(*component.Platform) - platform = &platformCopy - } - m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - ArtifactType: component.ArtifactType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - Annotations: maps.Clone(component.Annotations), - Platform: platform, - } - index.Manifests[i] = m + index.Manifests[i] = oci1DescriptorClone(component) } return &index } +func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor { + var platform *imgspecv1.Platform + if d.Platform != nil { + platformCopy := ociPlatformClone(*d.Platform) + platform = &platformCopy + } + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size: d.Size, + URLs: slices.Clone(d.URLs), + Annotations: maps.Clone(d.Annotations), + Data: bytes.Clone(d.Data), + Platform: platform, + ArtifactType: d.ArtifactType, + } +} + // OCI1IndexPublicClone creates a deep copy of the passed-in index. // This is publicly visible as c/image/manifest.OCI1IndexClone. func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { - return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations) + var subject *imgspecv1.Descriptor + if index.Subject != nil { + s := oci1DescriptorClone(*index.Subject) + subject = &s + } + manifests := make([]imgspecv1.Descriptor, len(index.Manifests)) + for i, m := range index.Manifests { + manifests[i] = oci1DescriptorClone(m) + } + return &OCI1IndexPublic{ + Index: imgspecv1.Index{ + Versioned: index.Versioned, + MediaType: index.MediaType, + ArtifactType: index.ArtifactType, + Manifests: manifests, + Subject: subject, + Annotations: maps.Clone(index.Annotations), + }, + } } // ToOCI1Index returns the index encoded as an OCI1 index. diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go index 037572b0..b413ec51 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -74,3 +74,15 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { logrus.Debugf("Using SQLite blob info cache at %s", path) return cache } + +// CleanupDefaultCache removes the blob info cache directory. +// It deletes the cache directory but it does not affect any file or memory buffer currently +// in use. +func CleanupDefaultCache(sys *types.SystemContext) error { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + // Mirror the DefaultCache behavior that does not fail in this case + return nil + } + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 1185e9d4..32aaa4d6 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -27,7 +27,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest } // New returns a BlobInfoCache implementation which is in-memory only. diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 45427a35..1b161474 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -248,6 +248,11 @@ type V2RegistriesConf struct { // potentially use all unqualified-search registries ShortNameMode string `toml:"short-name-mode"` + // AdditionalLayerStoreAuthHelper is a helper binary that receives + // registry credentials pass them to Additional Layer Store for + // registry authentication. These credentials are only collected when pulling (not pushing). + AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"` + shortNameAliasConf // If you add any field, make sure to update Nonempty() below. @@ -825,6 +830,16 @@ func CredentialHelpers(sys *types.SystemContext) ([]string, error) { return config.partialV2.CredentialHelpers, nil } +// AdditionalLayerStoreAuthHelper returns the helper for passing registry +// credentials to Additional Layer Store. +func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) { + config, err := getConfig(sys) + if err != nil { + return "", err + } + return config.partialV2.AdditionalLayerStoreAuthHelper, nil +} + // refMatchingSubdomainPrefix returns the length of ref // iff ref, which is a registry, repository namespace, repository or image reference (as formatted by // reference.Domain(), reference.Named.Name() or reference.Reference.String() @@ -1051,6 +1066,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { c.shortNameMode = updates.shortNameMode } + // == Merge AdditionalLayerStoreAuthHelper: + if updates.partialV2.AdditionalLayerStoreAuthHelper != "" { + c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper + } + // == Merge aliasCache: // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf. c.aliasCache.updateWithConfigurationFrom(updates.aliasCache) diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 5d6c1ac8..bd1979ae 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/v5/signature/internal" "github.com/containers/storage/pkg/homedir" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism // implementation, which is out-of-process and more appropriate for handling long-term private key material // than any Go implementation. @@ -150,7 +151,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) } if md.SignedBy == nil { - return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)) + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId)) } if md.Signature != nil { if md.Signature.SigLifetimeSecs != nil { diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index a0b34741..92dfebd3 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -325,7 +325,13 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces if out.UncompressedDigest != "" { // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is // responsible for ensuring blobDigest has been validated. + if out.CompressedDigest != blobDigest { + return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q", + out.CompressedDigest, blobDigest) + } s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + // We trust ApplyDiffWithDiffer to validate or create both values correctly. + options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) } else { // Don’t identify layers by TOC if UncompressedDigest is available. // - Using UncompressedDigest allows image reuse with non-partially-pulled layers diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 0b5c3bd7..4d889f80 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 31 + VersionMinor = 32 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml index d3800d1e..bf39af83 100644 --- a/vendor/github.com/containers/ocicrypt/.golangci.yml +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -7,7 +7,7 @@ linters: - goimports - revive - ineffassign - - vet + - govet - unused - misspell diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go index 0c485d51..b8436a8d 100644 --- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -96,9 +96,8 @@ func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { return v, ok } else if v, ok := lbco.Private.CipherOptions[key]; ok { return v, ok - } else { - return nil, false } + return nil, false } func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go index 3912e82d..3bba4669 100644 --- a/vendor/github.com/containers/ocicrypt/gpg.go +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -79,9 +79,8 @@ func GuessGPGVersion() GPGVersion { return GPGv2 } else if err := exec.Command("gpg", "--version").Run(); err == nil { return GPGv1 - } else { - return GPGVersionUndetermined } + return GPGVersionUndetermined } // NewGPGClient creates a new GPGClient object representing the given version diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go index 24e1d619..c1bdd6fb 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -24,7 +24,7 @@ import ( "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v4" ) type jweKeyWrapper struct { @@ -65,7 +65,11 @@ func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([] } func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { - jwe, err := jose.ParseEncrypted(string(jweString)) + // cf. list of algorithms in func addPubKeys() below + keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW} + // accept all algorithms defined in RFC 7518, section 5.1 + contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM} + jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption) if err != nil { return nil, errors.New("jose.ParseEncrypted failed") } diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go index ddb244a8..6ac0fcb9 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -124,9 +124,8 @@ func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []b } return protocolOuput.KeyWrapResults.Annotation, nil - } else { - return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") } return nil, nil @@ -162,9 +161,8 @@ func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString } return protocolOuput.KeyUnwrapResults.OptsData, nil - } else { - return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") } func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go index 160f747b..f653f2ef 100644 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -26,7 +26,7 @@ import ( "strings" "github.com/containers/ocicrypt/crypto/pkcs11" - "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v4" "golang.org/x/crypto/openpgp" ) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index c2474c7f..50b98761 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20240513t140131z-f40f39d13" + IMAGE_SUFFIX: "c20240529t141726z-f40f39d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml index 20968466..ec11f5da 100644 --- a/vendor/github.com/containers/storage/.golangci.yml +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -1,11 +1,7 @@ --- run: concurrency: 6 - deadline: 5m - skip-dirs-use-default: true + timeout: 5m linters: enable: - gofumpt - disable: - - errcheck - - staticcheck diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 7ee2642f..96ea9f9a 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -53,6 +53,8 @@ local-cross cross: ## cross build the binaries for arm, darwin, and freebsd os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) ./... ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) ./... || exit 1 ; \ echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ done diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index b7921ae8..094d6ad0 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.54.0 +1.55.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go index e58084fc..7176ba36 100644 --- a/vendor/github.com/containers/storage/check.go +++ b/vendor/github.com/containers/storage/check.go @@ -304,7 +304,14 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { archiveErr = err } // consume any trailer after the EOF marker - io.Copy(io.Discard, diffReader) + if _, err := io.Copy(io.Discard, diffReader); err != nil { + err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err) + if isReadWrite { + report.Layers[layerID] = append(report.Layers[layerID], err) + } else { + report.ROLayers[layerID] = append(report.ROLayers[layerID], err) + } + } wg.Done() }(id, reader) wg.Wait() @@ -366,7 +373,7 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { if options.LayerMountable { func() { // Mount the layer. - mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel}) + mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel, Options: []string{"ro"}}) if err != nil { err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) if isReadWrite { @@ -955,6 +962,9 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6 mtime: mtime, } } + case tar.TypeXGlobalHeader: + // ignore, since even though it looks like a valid pathname, it doesn't end + // up on the filesystem default: // treat these as TypeReg items delete(c.directory, components[0]) @@ -966,9 +976,6 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6 mode: mode, mtime: mtime, } - case tar.TypeXGlobalHeader: - // ignore, since even though it looks like a valid pathname, it doesn't end - // up on the filesystem } return } diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index e00314d3..c12d4ca5 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -30,7 +30,6 @@ import ( "io" "io/fs" "os" - "os/exec" "path" "path/filepath" "strings" @@ -75,8 +74,6 @@ func init() { type Driver struct { sync.Mutex root string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap ctr *graphdriver.RefCounter pathCacheLock sync.Mutex pathCache map[string]string @@ -129,22 +126,16 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) a := &Driver{ root: home, - uidMaps: options.UIDMaps, - gidMaps: options.GIDMaps, pathCache: make(map[string]string), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), locker: locker.New(), mountOptions: mountOptions, } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure - if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(home, 0o700); err != nil { if os.IsExist(err) { return a, nil } @@ -157,7 +148,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Populate the dir structure for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(path.Join(home, p), 0o700); err != nil { return nil, err } } @@ -191,13 +182,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } // Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - if unshare.IsRootless() { return ErrAufsNested } @@ -334,7 +319,7 @@ func (a *Driver) createDirsFor(id, parent string) error { // The path of directories are /mnt/ // and /diff/ for _, p := range paths { - rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + rootPair := idtools.IDPair{UID: 0, GID: 0} rootPerms := defaultPerms if parent != "" { st, err := system.Stat(path.Join(a.rootPath(), p, parent)) @@ -355,7 +340,9 @@ func (a *Driver) createDirsFor(id, parent string) error { // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { a.locker.Lock(id) - defer a.locker.Unlock(id) + defer func() { + _ = a.locker.Unlock(id) + }() a.pathCacheLock.Lock() mountpoint, exists := a.pathCache[id] a.pathCacheLock.Unlock() @@ -446,7 +433,10 @@ func atomicRemove(source string) error { // This will mount the dir at its given path func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { a.locker.Lock(id) - defer a.locker.Unlock(id) + defer func() { + _ = a.locker.Unlock(id) + }() + parents, err := a.getParentLayerPaths(id) if err != nil && !os.IsNotExist(err) { return "", err @@ -483,7 +473,10 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { a.locker.Lock(id) - defer a.locker.Unlock(id) + defer func() { + _ = a.locker.Unlock(id) + }() + a.pathCacheLock.Lock() m, exists := a.pathCache[id] if !exists { @@ -506,7 +499,9 @@ func (a *Driver) Put(id string) error { // For AUFS, it queries the mountpoint for this ID. func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { a.locker.Lock(id) - defer a.locker.Unlock(id) + defer func() { + _ = a.locker.Unlock(id) + }() a.pathCacheLock.Lock() m, exists := a.pathCache[id] if !exists { @@ -689,7 +684,9 @@ func (a *Driver) Cleanup() error { func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { defer func() { if err != nil { - Unmount(target) + if err1 := Unmount(target); err1 != nil { + logrus.Warnf("Unmount %q: %v", target, err1) + } } }() diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 11ae5636..7a0f9cbc 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -66,11 +66,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites) } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(filepath.Join(home, "subvolumes"), 0o700); err != nil { return nil, err } @@ -85,8 +81,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) driver := &Driver{ home: home, - uidMaps: options.UIDMaps, - gidMaps: options.GIDMaps, options: opt, } @@ -129,8 +123,6 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { type Driver struct { // root of the file system home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap options btrfsOptions quotaEnabled bool once sync.Once @@ -481,11 +473,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { quotas := d.quotasDir() subvolumes := d.subvolumesDir() - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(subvolumes, 0o700); err != nil { return err } if parent == "" { @@ -523,7 +511,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } - if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(quotas, 0o700); err != nil { return err } if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil { @@ -531,14 +519,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } } - // if we have a remapped root (user namespaces enabled), change the created snapshot - // dir ownership to match - if rootUID != 0 || rootGID != 0 { - if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { - return err - } - } - mountLabel := "" if opts != nil { mountLabel = opts.MountLabel diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index ca43c3f0..d728e919 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -4,11 +4,12 @@ import ( "bytes" "errors" "fmt" + "io/fs" "os" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" - "github.com/opencontainers/selinux/pkg/pwalk" + "github.com/opencontainers/selinux/pkg/pwalkdir" ) const ( @@ -54,13 +55,14 @@ func chownByMapsMain() { chowner := newLChowner() - chown := func(path string, info os.FileInfo, _ error) error { - if path == "." { + var chown fs.WalkDirFunc = func(path string, d fs.DirEntry, _ error) error { + info, err := d.Info() + if path == "." || err != nil { return nil } return chowner.LChown(path, info, toHost, toContainer) } - if err := pwalk.Walk(".", chown); err != nil { + if err := pwalkdir.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) os.Exit(1) } diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go index 9a1c6751..a65f0d58 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_unix.go +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -1,5 +1,5 @@ -//go:build linux || darwin || freebsd || solaris -// +build linux darwin freebsd solaris +//go:build !windows +// +build !windows package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 9c3d7c66..87894094 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -50,13 +50,13 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c defer srcFile.Close() if *copyWithFileClone { - _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) - if err == nil { + _, _, errno := unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if errno == 0 { return nil } *copyWithFileClone = false - if err == unix.EXDEV { + if errno == unix.EXDEV { *copyWithFileRange = false } } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index b42ba075..b37837b0 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -193,6 +193,7 @@ type DriverWithDifferOutput struct { UIDs []uint32 GIDs []uint32 UncompressedDigest digest.Digest + CompressedDigest digest.Digest Metadata string BigData map[string][]byte TarSplit []byte @@ -215,20 +216,25 @@ const ( DifferOutputFormatFlat ) +// DifferFsVerity is a part of the experimental Differ interface and should not be used from outside of c/storage. +// It configures the fsverity requirement. type DifferFsVerity int const ( // DifferFsVerityDisabled means no fs-verity is used DifferFsVerityDisabled = iota - // DifferFsVerityEnabled means fs-verity is used when supported - DifferFsVerityEnabled + // DifferFsVerityIfAvailable means fs-verity is used when supported by + // the underlying kernel and filesystem. + DifferFsVerityIfAvailable - // DifferFsVerityRequired means fs-verity is required + // DifferFsVerityRequired means fs-verity is required. Note this is not + // currently set or exposed by the overlay driver. DifferFsVerityRequired ) -// DifferOptions overrides how the differ work +// DifferOptions is a part of the experimental Differ interface and should not be used from outside of c/storage. +// It overrides how the differ works. type DifferOptions struct { // Format defines the destination directory layout format Format DifferOutputFormat @@ -377,8 +383,6 @@ type Options struct { ImageStore string DriverPriority []string DriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap ExperimentalEnabled bool } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index d8139f65..d75c5fdf 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -263,7 +263,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { return false, fmt.Errorf("create mapped mount: %w", err) } - defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + defer func() { + if err := unix.Unmount(lowerMappedDir, unix.MNT_DETACH); err != nil { + logrus.Warnf("Unmount %q: %v", lowerMappedDir, err) + } + }() opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) flags := uintptr(0) diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index 8f07c236..973cd786 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -56,7 +57,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) if err != nil { - return fmt.Errorf("failed to open output file %q: %w", destFile, err) + return &fs.PathError{Op: "openat", Path: destFile, Err: err} } outFd := os.NewFile(uintptr(fd), "outFd") @@ -117,7 +118,7 @@ func hasACL(path string) (bool, error) { fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { - return false, err + return false, &fs.PathError{Op: "openat", Path: path, Err: err} } defer unix.Close(fd) // do not worry about checking the magic number, if the file is invalid @@ -125,7 +126,7 @@ func hasACL(path string) (bool, error) { flags := make([]byte, 4) nread, err := unix.Pread(fd, flags, 8) if err != nil { - return false, err + return false, fmt.Errorf("pread %q: %w", path, err) } if nread != 4 { return false, fmt.Errorf("failed to read flags from %q", path) @@ -150,5 +151,8 @@ func mountComposefsBlob(dataDir, mountPoint string) error { mountOpts += ",noacl" } - return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts) + if err := unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts); err != nil { + return fmt.Errorf("failed to mount erofs image at %q: %w", mountPoint, err) + } + return nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 8b6f64b1..8ed35745 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "os/exec" "path" @@ -119,8 +120,6 @@ type Driver struct { home string runhome string imageStore string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions @@ -332,13 +331,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) backingFs = fsName runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil { + if err := os.MkdirAll(path.Join(home, linkDir), 0o755); err != nil { return nil, err } @@ -348,7 +343,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(runhome, 0o700); err != nil { return nil, err } @@ -373,9 +368,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, err } } else { - if opts.forceMask != nil { - return nil, errors.New("'force_mask' is supported only with 'mount_program'") - } // check if they are running over btrfs, aufs, overlay, or ecryptfs switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: @@ -457,8 +449,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) home: home, imageStore: options.ImageStore, runhome: runhome, - uidMaps: options.UIDMaps, - gidMaps: options.GIDMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), supportsDType: supportsDType, usingMetacopy: usingMetacopy, @@ -698,12 +688,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { - // We can try to modprobe overlay first - selinuxLabelTest := selinux.PrivContainerMountLabel() - exec.Command("modprobe", "overlay").Run() - logLevel := logrus.ErrorLevel if unshare.IsRootless() { logLevel = logrus.DebugLevel @@ -831,7 +817,9 @@ func (d *Driver) useNaiveDiff() bool { logrus.Info(nativeDiffCacheText) useNaiveDiffOnly = true } - cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText) + if err := cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText); err != nil { + logrus.Warnf("Recording overlay native-diff support status: %v", err) + } }) return useNaiveDiffOnly } @@ -860,14 +848,14 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir := d.dir(id) + dir, _, inAdditionalStore := d.dir2(id, false) if err := fileutils.Exists(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), + "MergedDir": d.getMergedDir(id, dir, inAdditionalStore), "UpperDir": path.Join(dir, "diff"), } @@ -983,6 +971,10 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts } } + if d.options.forceMask != nil && d.options.mountProgram == "" { + return fmt.Errorf("overlay: force_mask option for writeable layers is only supported with a mount_program") + } + if _, ok := opts.StorageOpt["size"]; !ok { if opts.StorageOpt == nil { opts.StorageOpt = map[string]string{} @@ -1021,8 +1013,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl disableQuota := readOnly - uidMaps := d.uidMaps - gidMaps := d.gidMaps + var uidMaps []idtools.IDMap + var gidMaps []idtools.IDMap if opts != nil && opts.IDMappings != nil { uidMaps = opts.IDMappings.UIDs() @@ -1047,14 +1039,23 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } + + st := idtools.Stat{IDs: idPair, Mode: defaultPerms} + if parent != "" { parentBase := d.dir(parent) - st, err := system.Stat(filepath.Join(parentBase, "diff")) - if err != nil { - return err + parentDiff := filepath.Join(parentBase, "diff") + if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil { + st = xSt + } else { + systemSt, err := system.Stat(parentDiff) + if err != nil { + return err + } + st.IDs.UID = int(systemSt.UID()) + st.IDs.GID = int(systemSt.GID()) + st.Mode = os.FileMode(systemSt.Mode()) } - rootUID = int(st.UID()) - rootGID = int(st.GID()) } if err := fileutils.Lexists(dir); err == nil { @@ -1100,22 +1101,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl } } - perms := defaultPerms + forcedSt := st if d.options.forceMask != nil { - perms = *d.options.forceMask + forcedSt.IDs = idPair + forcedSt.Mode = *d.options.forceMask } - if parent != "" { - parentBase := d.dir(parent) - st, err := system.Stat(filepath.Join(parentBase, "diff")) - if err != nil { - return err - } - perms = os.FileMode(st.Mode()) + diff := path.Join(dir, "diff") + if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { + return err } - if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { - return err + if d.options.forceMask != nil { + if err := idtools.SetContainersOverrideXattr(diff, st); err != nil { + return err + } } lid := generateID(idLength) @@ -1130,16 +1130,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl return err } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { return err } // if no parent directory, create a dummy lower directory and skip writing a "lowers" file if parent == "" { - return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID) + return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID) } lower, err := d.getLower(parent) @@ -1283,12 +1283,6 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { } func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { - if uidMaps == nil { - uidMaps = d.uidMaps - } - if gidMaps == nil { - gidMaps = d.gidMaps - } if uidMaps != nil { var uids, gids bytes.Buffer if len(uidMaps) == 1 && uidMaps[0].Size == 1 { @@ -1539,11 +1533,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) diffN++ - st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) - if err == nil && !permsKnown { - perms = os.FileMode(st.Mode()) - permsKnown = true - } + err = fileutils.Exists(filepath.Join(dir, nameWithSuffix("diff", diffN))) } idmappedMountProcessPid := -1 @@ -1561,7 +1551,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO composefsMounts := []string{} defer func() { for _, m := range composefsMounts { - defer unix.Unmount(m, unix.MNT_DETACH) + defer func(m string) { + if err := unix.Unmount(m, unix.MNT_DETACH); err != nil { + logrus.Warnf("Unmount %q: %v", m, err) + } + }(m) } }() @@ -1665,7 +1659,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO skipIDMappingLayers[composefsMount] = composefsMount // overlay takes a reference on the mount, so it is safe to unmount // the mapped idmounts as soon as the final overlay file system is mounted. - defer unix.Unmount(composefsMount, unix.MNT_DETACH) + defer func() { + if err := unix.Unmount(composefsMount, unix.MNT_DETACH); err != nil { + logrus.Warnf("Unmount %q: %v", composefsMount, err) + } + }() } absLowers = append(absLowers, composefsMount) continue @@ -1705,10 +1703,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - mergedDir := path.Join(dir, "merged") + mergedDir := d.getMergedDir(id, dir, inAdditionalStore) // Attempt to create the merged dir only if it doesn't exist. if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) { - if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } } @@ -1772,7 +1770,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // overlay takes a reference on the mount, so it is safe to unmount // the mapped idmounts as soon as the final overlay file system is mounted. - defer unix.Unmount(root, unix.MNT_DETACH) + defer func() { + if err := unix.Unmount(root, unix.MNT_DETACH); err != nil { + logrus.Warnf("Unmount %q: %v", root, err) + } + }() } // relative path to the layer through the id mapped mount @@ -1854,7 +1856,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountOverlayFrom(d.home, source, target, mType, flags, label) } - mountTarget = path.Join(id, "merged") + if !inAdditionalStore { + mountTarget = path.Join(id, "merged") + } } // overlay has a check in place to prevent mounting the same file system twice @@ -1873,13 +1877,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return mergedDir, nil } +// getMergedDir returns the directory path that should be used as the mount point for the overlayfs. +func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string { + // If the layer is in an additional store, the lock we might hold only a reading lock. To prevent + // races with other processes, use a private directory under the main store rundir. At this point, the + // current process is holding an exclusive lock on the store, and since the rundir cannot be shared for + // different stores, it is safe to assume the current process has exclusive access to it. + if inAdditionalStore { + return path.Join(d.runhome, id, "merged") + } + return path.Join(dir, "merged") +} + // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { dir, _, inAdditionalStore := d.dir2(id, false) if err := fileutils.Exists(dir); err != nil { return err } - mountpoint := path.Join(dir, "merged") + mountpoint := d.getMergedDir(id, dir, inAdditionalStore) + if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } @@ -1936,7 +1953,15 @@ func (d *Driver) Put(id string) error { } } - if !inAdditionalStore { + if inAdditionalStore { + // check the base name for extra safety + if strings.HasPrefix(mountpoint, d.runhome) && filepath.Base(mountpoint) == "merged" { + err := os.RemoveAll(filepath.Dir(mountpoint)) + if err != nil { + logrus.Warningf("Failed to remove mountpoint %s overlay: %s: %v", id, mountpoint, err) + } + } + } else { uid, gid := int(0), int(0) fi, err := os.Stat(mountpoint) if err != nil { @@ -1953,7 +1978,7 @@ func (d *Driver) Put(id string) error { // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains // its atomic semantic. In this way the "merged" directory is never removed. if err := unix.Rename(tmpMountpoint, mountpoint); err != nil { - logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err) + logrus.Debugf("Failed to replace mountpoint %s overlay: %s: %v", id, mountpoint, err) return fmt.Errorf("replacing mount point %q: %w", mountpoint, err) } } @@ -2024,11 +2049,27 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { } type overlayFileGetter struct { - diffDirs []string + diffDirs []string + composefsMounts map[string]*os.File // map from diff dir to the directory with the composefs blob mounted } func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + buf := make([]byte, unix.PathMax) for _, d := range g.diffDirs { + if f, found := g.composefsMounts[d]; found { + // there is no *at equivalent for getxattr, but it can be emulated by opening the file under /proc/self/fd/$FD/$PATH + len, err := unix.Getxattr(fmt.Sprintf("/proc/self/fd/%d/%s", int(f.Fd()), path), "trusted.overlay.redirect", buf) + if err != nil { + if errors.Is(err, unix.ENODATA) { + continue + } + return nil, &fs.PathError{Op: "getxattr", Path: path, Err: err} + } + + // the xattr value is the path to the file in the composefs layer diff directory + return os.Open(filepath.Join(d, string(buf[:len]))) + } + f, err := os.Open(filepath.Join(d, path)) if err == nil { return f, nil @@ -2041,7 +2082,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { } func (g *overlayFileGetter) Close() error { - return nil + var errs *multierror.Error + for _, f := range g.composefsMounts { + if err := f.Close(); err != nil { + errs = multierror.Append(errs, err) + } + if err := unix.Rmdir(f.Name()); err != nil { + errs = multierror.Append(errs, err) + } + } + return errs.ErrorOrNil() } func (d *Driver) getStagingDir(id string) string { @@ -2052,10 +2102,7 @@ func (d *Driver) getStagingDir(id string) string { // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences, either for this layer, or one of our // lowers if we're just a template directory. Used for direct access for tar-split. -func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - if d.usingComposefs { - return nil, nil - } +func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) { p, err := d.getDiffPath(id) if err != nil { return nil, err @@ -2064,7 +2111,43 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { if err != nil { return nil, err } - return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil + + // map from diff dir to the directory with the composefs blob mounted + composefsMounts := make(map[string]*os.File) + defer func() { + if Err != nil { + for _, f := range composefsMounts { + f.Close() + if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) { + logrus.Warnf("Failed to remove %s: %v", f.Name(), err) + } + } + } + }() + diffDirs := append([]string{p}, paths...) + for _, diffDir := range diffDirs { + // diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory + id := path.Base(path.Dir(diffDir)) + composefsBlob := d.getComposefsData(id) + if fileutils.Exists(composefsBlob) != nil { + // not a composefs layer, ignore it + continue + } + dir, err := os.MkdirTemp(d.runhome, "composefs-mnt") + if err != nil { + return nil, err + } + if err := mountComposefsBlob(composefsBlob, dir); err != nil { + return nil, err + } + fd, err := os.Open(dir) + if err != nil { + return nil, err + } + composefsMounts[diffDir] = fd + _ = unix.Unmount(dir, unix.MNT_DETACH) + } + return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -2100,9 +2183,16 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { // ApplyDiffWithDiffer applies the changes in the new layer using the specified function func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { var idMappings *idtools.IDMappings + var forceMask *os.FileMode + if options != nil { idMappings = options.Mappings + forceMask = options.ForceMask + } + if d.options.forceMask != nil { + forceMask = d.options.forceMask } + if idMappings == nil { idMappings = &idtools.IDMappings{} } @@ -2120,8 +2210,8 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App return graphdriver.DriverWithDifferOutput{}, err } perms := defaultPerms - if d.options.forceMask != nil { - perms = *d.options.forceMask + if forceMask != nil { + perms = *forceMask } applyDir = filepath.Join(layerDir, "dir") if err := os.Mkdir(applyDir, perms); err != nil { @@ -2155,7 +2245,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } if d.usingComposefs { differOptions.Format = graphdriver.DifferOutputFormatFlat - differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled + differOptions.UseFsVerity = graphdriver.DifferFsVerityIfAvailable } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), @@ -2163,6 +2253,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App IgnoreChownErrors: d.options.ignoreChownErrors, WhiteoutFormat: d.getWhiteoutFormat(), InUserNS: unshare.IsRootless(), + ForceMask: forceMask, }, &differOptions) out.Target = applyDir @@ -2342,14 +2433,18 @@ func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent strin // layers. diffPath, err := d.getDiffPath(id) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get diff path: %w", err) } layers, err := d.getLowerDiffPaths(id) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get lower diff path: %w", err) } - return archive.OverlayChanges(layers, diffPath) + c, err := archive.OverlayChanges(layers, diffPath) + if err != nil { + return nil, fmt.Errorf("computing changes: %w", err) + } + return c, nil } // AdditionalImageStores returns additional image stores supported by the driver @@ -2476,6 +2571,19 @@ func nameWithSuffix(name string, number int) string { return fmt.Sprintf("%s%d", name, number) } +func validateOneAdditionalLayerPath(target string) error { + for _, p := range []string{ + filepath.Join(target, "diff"), + filepath.Join(target, "info"), + filepath.Join(target, "blob"), + } { + if err := fileutils.Exists(p); err != nil { + return err + } + } + return nil +} + func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) { refElem := base64.StdEncoding.EncodeToString([]byte(ref)) for _, ls := range d.options.layerStores { @@ -2484,18 +2592,11 @@ func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (st ref = refElem } target := path.Join(ls.path, ref, tocDigest.String()) - // Check if all necessary files exist - for _, p := range []string{ - filepath.Join(target, "diff"), - filepath.Join(target, "info"), - filepath.Join(target, "blob"), - } { - if err := fileutils.Exists(p); err != nil { - wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) - return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) - } - } - return target, nil + err := validateOneAdditionalLayerPath(target) + if err == nil { + return target, nil + } + logrus.Debugf("additional Layer Store %v failed to stat additional layer: %v", ls, err) } return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go similarity index 86% rename from vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go rename to vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go index 88bfbf9c..bc80301d 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go @@ -1,5 +1,5 @@ -//go:build linux && cgo -// +build linux,cgo +//go:build linux && cgo && !exclude_disk_quota +// +build linux,cgo,!exclude_disk_quota package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go new file mode 100644 index 00000000..1340c45b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go @@ -0,0 +1,18 @@ +//go:build linux && (!cgo || exclude_disk_quota) +// +build linux +// +build !cgo exclude_disk_quota + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index d4f540c9..0577711b 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -5,18 +5,8 @@ package overlay import ( "fmt" - "path" - - "github.com/containers/storage/pkg/directory" ) -// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. -// For Overlay, it attempts to check the XFS quota for size, and falls back to -// finding the size of the "diff" directory. -func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { - return directory.Usage(path.Join(d.dir(id), "diff")) -} - func getComposeFsHelper() (string, error) { return "", fmt.Errorf("composefs not supported on this build") } diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go index b0623bda..92e4001d 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -19,16 +19,6 @@ package quota #include #include -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif #ifndef FS_IOC_FSGETXATTR #define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) #endif @@ -357,7 +347,6 @@ func setProjectID(targetPath string, projectID uint32) error { return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index db903211..f60ec17b 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -33,12 +33,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) d := &Driver{ name: "vfs", home: home, - idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), imageStore: options.ImageStore, } - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil { + if err := os.MkdirAll(filepath.Join(home, "dir"), 0o700); err != nil { return nil, err } for _, option := range options.DriverOptions { @@ -79,7 +77,6 @@ type Driver struct { name string home string additionalHomes []string - idMappings *idtools.IDMappings ignoreChownErrors bool naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater @@ -152,14 +149,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool return fmt.Errorf("--storage-opt is not supported for vfs") } - idMappings := d.idMappings + var uidMaps []idtools.IDMap + var gidMaps []idtools.IDMap + if opts != nil && opts.IDMappings != nil { - idMappings = opts.IDMappings + uidMaps = opts.IDMappings.UIDs() + gidMaps = opts.IDMappings.GIDs() + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err } dir := d.dir2(id, ro) - rootIDs := idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { + if err := os.MkdirAll(filepath.Dir(dir), 0o700); err != nil { return err } @@ -174,21 +178,24 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool rootPerms = os.FileMode(0o700) } + idPair := idtools.IDPair{UID: rootUID, GID: rootGID} if parent != "" { st, err := system.Stat(d.dir(parent)) if err != nil { return err } rootPerms = os.FileMode(st.Mode()) - rootIDs.UID = int(st.UID()) - rootIDs.GID = int(st.GID()) + idPair.UID = int(st.UID()) + idPair.GID = int(st.GID()) } - if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dir, rootPerms, idPair); err != nil { return err } labelOpts := []string{"level:s0"} if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { - label.SetFileLabel(dir, mountLabel) + if err := label.SetFileLabel(dir, mountLabel); err != nil { + logrus.Debugf("Set %s label to %q file ended with error: %v", mountLabel, dir, err) + } } if parent != "" { parentDir, err := d.Get(parent, graphdriver.MountOpts{}) diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e0228978..0e859a93 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -106,11 +106,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } - rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) - if err != nil { - return nil, fmt.Errorf("failed to get root uid/gid: %w", err) - } - if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil { + if err := os.MkdirAll(base, 0o700); err != nil { return nil, fmt.Errorf("failed to create '%s': %w", base, err) } @@ -118,8 +114,6 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { dataset: rootDataset, options: options, filesystemsCache: filesystemsCache, - uidMaps: opt.UIDMaps, - gidMaps: opt.GIDMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), } return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil @@ -177,8 +171,6 @@ type Driver struct { options zfsOptions sync.Mutex // protects filesystem cache against concurrent access filesystemsCache map[string]bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap ctr *graphdriver.RefCounter } @@ -248,7 +240,9 @@ func (d *Driver) cloneFilesystem(name, parentName string) error { } if err != nil { - snapshot.Destroy(zfs.DestroyDeferDeletion) + if err1 := snapshot.Destroy(zfs.DestroyDeferDeletion); err1 != nil { + logrus.Warnf("Destroy zfs.DestroyDeferDeletion: %v", err1) + } return err } return snapshot.Destroy(zfs.DestroyDeferDeletion) @@ -448,12 +442,8 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr opts := label.FormatMountLabel(mountOptions, options.MountLabel) logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil { + if err := os.MkdirAll(mountpoint, 0o755); err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index f1325262..6caf28ab 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -2529,7 +2529,9 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver layer.GIDs = diffOutput.GIDs updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID) layer.UncompressedDigest = diffOutput.UncompressedDigest - updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID) + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, diffOutput.CompressedDigest, layer.ID) + layer.CompressedDigest = diffOutput.CompressedDigest + updateDigestMap(&r.bytocsum, layer.TOCDigest, diffOutput.TOCDigest, layer.ID) layer.TOCDigest = diffOutput.TOCDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go index 64020388..ec98b40c 100644 --- a/vendor/github.com/containers/storage/lockfile_compat.go +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -5,7 +5,7 @@ import ( ) // Deprecated: Use lockfile.*LockFile. -type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated +type Locker = lockfile.Locker //nolint:staticcheck // SA1019 lockfile.Locker is deprecated // Deprecated: Use lockfile.GetLockFile. func GetLockfile(path string) (lockfile.Locker, error) { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 77c9c818..13a45895 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -70,6 +70,8 @@ type ( } ) +const PaxSchilyXattr = "SCHILY.xattr." + const ( tarExt = "tar" solaris = "solaris" @@ -169,10 +171,17 @@ func DetectCompression(source []byte) Compression { } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { +func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) + + defer func() { + if Err != nil { + p.Put(buf) + } + }() + if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and @@ -189,6 +198,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse { + return rc, nil + } gzReader, err := gzip.NewReader(buf) if err != nil { return nil, err @@ -207,6 +222,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return readBufWrapper, nil case Zstd: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse { + return rc, nil + } return zstdReader(buf) default: return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) @@ -214,9 +235,16 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } // CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { +func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) { p := pools.BufioWriter32KPool buf := p.Get(dest) + + defer func() { + if Err != nil { + p.Put(buf) + } + }() + switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) @@ -391,11 +419,11 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } -// ReadSecurityXattrToTarHeader reads security.capability, security,image +// readSecurityXattrToTarHeader reads security.capability, security,image // xattrs from filesystem to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) +func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) } for _, xattr := range []string{"security.capability", "security.ima"} { capability, err := system.Lgetxattr(path, xattr) @@ -403,14 +431,14 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) } if capability != nil { - hdr.Xattrs[xattr] = string(capability) + hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability) } } return nil } -// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header -func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { +// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func readUserXattrToTarHeader(path string, hdr *tar.Header) error { xattrs, err := system.Llistxattr(path) if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { return err @@ -425,10 +453,10 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { } return err } - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) } - hdr.Xattrs[key] = string(value) + hdr.PAXRecords[PaxSchilyXattr+key] = string(value) } } return nil @@ -516,10 +544,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err != nil { return err } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + if err := readSecurityXattrToTarHeader(path, hdr); err != nil { return err } - if err := ReadUserXattrToTarHeader(path, hdr); err != nil { + if err := readUserXattrToTarHeader(path, hdr); err != nil { return err } if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { @@ -642,7 +670,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - case tar.TypeReg, tar.TypeRegA: + case tar.TypeReg: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile @@ -701,8 +729,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { - value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777) - if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode() & 0o7777, + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { return err } } @@ -753,11 +784,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } var errs []string - for key, value := range hdr.Xattrs { - if _, found := xattrsToIgnore[key]; found { + for key, value := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr) + if !ok { + continue + } + if _, found := xattrsToIgnore[xattrKey]; found { continue } - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only @@ -1113,9 +1148,14 @@ loop: } } - if options.ForceMask != nil && rootHdr != nil { - value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode) - if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + if options.ForceMask != nil { + value := idtools.Stat{Mode: 0o755} + if rootHdr != nil { + value.IDs.UID = rootHdr.Uid + value.IDs.GID = rootHdr.Gid + value.Mode = os.FileMode(rootHdr.Mode) + } + if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return err } } @@ -1337,7 +1377,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id } } else if runtime.GOOS == darwin { uid, gid = hdr.Uid, hdr.Gid - if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok { + if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { attrs := strings.Split(string(xstat), ":") if len(attrs) == 3 { val, err := strconv.ParseUint(attrs[0], 10, 32) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go index 4d362f07..a754d0d4 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -1,5 +1,5 @@ -//go:build freebsd || darwin -// +build freebsd darwin +//go:build netbsd || freebsd || darwin +// +build netbsd freebsd darwin package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 02995d76..eae60a30 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -48,8 +48,8 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi return nil, err } if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, getOverlayOpaqueXattrName()) + if hdr.PAXRecords != nil { + delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName()) } // If there are no lower layers, then it can't have been deleted in this layer. if len(o.rolayers) == 0 { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index f8414717..dc308120 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -316,7 +316,11 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) // with respect to the parent layers func OverlayChanges(layers []string, rw string) ([]Change, error) { dc := func(root, path string, fi os.FileInfo) (string, error) { - return overlayDeletedFile(layers, root, path, fi) + r, err := overlayDeletedFile(layers, root, path, fi) + if err != nil { + return "", fmt.Errorf("overlay deleted file query: %w", err) + } + return r, nil } return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) } @@ -351,7 +355,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str // If the directory isn't marked as opaque, then it's just a normal directory. opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) if err != nil { - return "", err + return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err) } if len(opaque) != 1 || opaque[0] != 'y' { return "", err diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index 6b2e5938..25429406 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -31,9 +31,9 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta ownerChanged || oldStat.Rdev() != newStat.Rdev() || oldStat.Flags() != newStat.Flags() || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + ((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) { return true } return false diff --git a/vendor/github.com/containers/storage/pkg/archive/filter.go b/vendor/github.com/containers/storage/pkg/archive/filter.go new file mode 100644 index 00000000..9902a1ef --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/filter.go @@ -0,0 +1,73 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "strings" + "sync" +) + +var filterPath sync.Map + +func getFilterPath(name string) string { + path, ok := filterPath.Load(name) + if ok { + return path.(string) + } + + path, err := exec.LookPath(name) + if err != nil { + path = "" + } + + filterPath.Store(name, path) + return path.(string) +} + +type errorRecordingReader struct { + r io.Reader + err error +} + +func (r *errorRecordingReader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if r.err == nil && err != io.EOF { + r.err = err + } + return n, err +} + +// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout. +// cleanup() is a caller provided function that will be called when the command finishes running, regardless of +// whether it succeeds or fails. +// If the command is not found, it returns (nil, false) and the cleanup function is not called. +func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) { + path := getFilterPath(args[0]) + if path == "" { + return nil, false + } + + var stderrBuf bytes.Buffer + + inputWithError := &errorRecordingReader{r: input} + + r, w := io.Pipe() + cmd := exec.Command(path, args[1:]...) + cmd.Stdin = inputWithError + cmd.Stdout = w + cmd.Stderr = &stderrBuf + go func() { + err := cmd.Run() + // if there is an error reading from input, prefer to return that error + if inputWithError.err != nil { + err = inputWithError.err + } else if err != nil && stderrBuf.Len() > 0 { + err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err) + } + w.CloseWithError(err) // CloseWithErr(nil) == Close() + cleanup() + }() + return r, true +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go index f7a16e9f..a0a578d3 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -10,9 +10,11 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string, ) error { + _ = root // Restricting the operation to this root is not implemented on macOS return archive.Unpack(decompressedArchive, dest, options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + _ = root // Restricting the operation to this root is not implemented on macOS return archive.TarWithOptions(srcPath, options) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 259f8c99..9907d24c 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -107,12 +107,15 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T w.Close() if err := cmd.Wait(); err != nil { + errorOut := fmt.Errorf("unpacking failed (error: %w; output: %s)", err, output) // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, // we need to exhaust `xz`'s output, otherwise the `xz` side will be // pending on write pipe forever - io.Copy(io.Discard, decompressedArchive) + if _, err := io.Copy(io.Discard, decompressedArchive); err != nil { + return fmt.Errorf("%w\nexhausting input failed (error: %w)", errorOut, err) + } - return fmt.Errorf("processing tar file(%s): %w", output, err) + return errorOut } return nil } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 09ef6d5d..5b8acdab 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -19,10 +19,13 @@ import ( // Old root is removed after the call to pivot_root so it is no longer available under the new root. // This is similar to how libcontainer sets up a container's rootfs func chroot(path string) (err error) { - caps, err := capability.NewPid(0) + caps, err := capability.NewPid2(0) if err != nil { return err } + if err := caps.Load(); err != nil { + return err + } // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host // environment not in the chroot from untrusted files. diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 71ed094d..cdcd9fdc 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -40,11 +40,13 @@ func applyLayer() { } // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) + oldMask, err := system.Umask(0) if err != nil { fatal(err) } + defer func() { + _, _ = system.Umask(oldMask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { fatal(err) diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go similarity index 87% rename from vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go rename to vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go index 45d76ec3..09e75680 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go +++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go @@ -2,10 +2,15 @@ package chunked import ( "encoding/binary" + "fmt" "hash/crc32" "io" + + "github.com/docker/go-units" ) +const bloomFilterMaxLength = 100 * units.MB // max size for bloom filter + type bloomFilter struct { bitArray []uint64 k uint32 @@ -79,6 +84,10 @@ func readBloomFilter(reader io.Reader) (*bloomFilter, error) { if err := binary.Read(reader, binary.LittleEndian, &k); err != nil { return nil, err } + // sanity check + if bloomFilterLen > bloomFilterMaxLength { + return nil, fmt.Errorf("bloom filter length %d exceeds max length %d", bloomFilterLen, bloomFilterMaxLength) + } bloomFilterArray := make([]uint64, bloomFilterLen) if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil { return nil, err diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 34d1b92f..d49ddfed 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -18,6 +18,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/ioutils" + "github.com/docker/go-units" jsoniter "github.com/json-iterator/go" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" @@ -34,6 +35,8 @@ const ( // https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries bloomFilterHashes = 3 // number of hash functions for the bloom filter + + maxTagsLen = 100 * units.MB // max size for tags len ) type cacheFile struct { @@ -77,7 +80,9 @@ var ( func (c *layer) release() { runtime.SetFinalizer(c, nil) if c.mmapBuffer != nil { - unix.Munmap(c.mmapBuffer) + if err := unix.Munmap(c.mmapBuffer); err != nil { + logrus.Warnf("Error Munmap: layer %q: %v", c.id, err) + } } } @@ -189,7 +194,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { } defer func() { if errRet != nil && mmapBuffer != nil { - unix.Munmap(mmapBuffer) + if err := unix.Munmap(mmapBuffer); err != nil { + logrus.Warnf("Error Munmap: layer %q: %v", layerID, err) + } } }() cacheFile, err := readCacheFileFromMemory(buffer) @@ -280,6 +287,13 @@ func (c *layersCache) load() error { newLayers = append(newLayers, l) continue } + + if r.ReadOnly { + // if the layer is coming from a read-only store, do not attempt + // to write to it. + continue + } + // the cache file is either not present or broken. Try to generate it from the TOC. l, err = c.createCacheFileFromTOC(r.ID) if err != nil { @@ -635,6 +649,14 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil { return nil, err } + + if tagsLen > maxTagsLen { + return nil, fmt.Errorf("tags len %d exceeds the maximum allowed size %d", tagsLen, maxTagsLen) + } + if digestLen > tagLen { + return nil, fmt.Errorf("digest len %d exceeds the tag len %d", digestLen, tagLen) + } + tags := make([]byte, tagsLen) if _, err := bigData.Read(tags); err != nil { return nil, err @@ -643,6 +665,10 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { // retrieve the unread part of the buffer. remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():] + if vdataLen >= uint64(len(remaining)) { + return nil, fmt.Errorf("vdata len %d exceeds the remaining buffer size %d", vdataLen, len(remaining)) + } + vdata := remaining[:vdataLen] fnames := remaining[vdataLen:] @@ -901,7 +927,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { s := iter.ReadString() d, err := digest.Parse(s) if err != nil { - return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err) + return nil, fmt.Errorf("invalid tarSplitDigest %q: %w", s, err) } toc.TarSplitDigest = d diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 7b3879a9..633740a2 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -5,13 +5,16 @@ import ( "errors" "fmt" "io" + "maps" "strconv" + "time" "github.com/containers/storage/pkg/chunked/internal" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" + expMaps "golang.org/x/exp/maps" ) var typesToTar = map[string]byte{ @@ -209,20 +212,162 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di } decodedTarSplit := []byte{} - if tarSplitChunk.Offset > 0 { + if toc.TarSplitDigest != "" { + if tarSplitChunk.Offset <= 0 { + return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey) + } tarSplit, err := readBlob(tarSplitChunk.Length) if err != nil { return nil, nil, nil, 0, err } - decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String()) if err != nil { return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err) } + // We use the TOC for creating on-disk files, but the tar-split for creating metadata + // when exporting the layer contents. Ensure the two match, otherwise local inspection of a container + // might be misleading about the exported contents. + if err := ensureTOCMatchesTarSplit(toc, decodedTarSplit); err != nil { + return nil, nil, nil, 0, fmt.Errorf("tar-split and TOC data is inconsistent: %w", err) + } + } else if tarSplitChunk.Offset > 0 { + // We must ignore the tar-split when the digest is not present in the TOC, because we can’t authenticate it. + // + // But if we asked for the chunk, now we must consume the data to not block the producer. + // Ideally the GetBlobAt API should be changed so that this is not necessary. + _, err := readBlob(tarSplitChunk.Length) + if err != nil { + return nil, nil, nil, 0, err + } } return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err } +// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries. +func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { + pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries + for i := range toc.Entries { + e := &toc.Entries[i] + if e.Type != internal.TypeChunk { + if _, ok := pendingFiles[e.Name]; ok { + return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name) + } + pendingFiles[e.Name] = e + } + } + + if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error { + e, ok := pendingFiles[hdr.Name] + if !ok { + return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name) + } + delete(pendingFiles, hdr.Name) + expected, err := internal.NewFileMetadata(hdr) + if err != nil { + return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err) + } + if err := ensureFileMetadataAttributesMatch(e, &expected); err != nil { + return fmt.Errorf("TOC and tar-split metadata doesn’t match: %w", err) + } + + return nil + }); err != nil { + return err + } + if len(pendingFiles) != 0 { + remaining := expMaps.Keys(pendingFiles) + if len(remaining) > 5 { + remaining = remaining[:5] // Just to limit the size of the output. + } + return fmt.Errorf("TOC contains entries not present in tar-split, incl. %q", remaining) + } + return nil +} + +// ensureTimePointersMatch ensures that a and b are equal +func ensureTimePointersMatch(a, b *time.Time) error { + // We didn’t always use “timeIfNotZero†when creating the TOC, so treat time.IsZero the same as nil. + // The archive/tar code turns time.IsZero() timestamps into an Unix timestamp of 0 when writing, but turns an Unix timestamp of 0 + // when writing into a (local-timezone) Jan 1 1970, which is not IsZero(). So, treat that the same as IsZero as well. + unixZero := time.Unix(0, 0) + if a != nil && (a.IsZero() || a.Equal(unixZero)) { + a = nil + } + if b != nil && (b.IsZero() || b.Equal(unixZero)) { + b = nil + } + switch { + case a == nil && b == nil: + return nil + case a == nil: + return fmt.Errorf("nil != %v", *b) + case b == nil: + return fmt.Errorf("%v != nil", *a) + default: + if a.Equal(*b) { + return nil + } + return fmt.Errorf("%v != %v", *a, *b) + } +} + +// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data +// in the tar stream or matching contents) +func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error { + // Keep this in sync with internal.FileMetadata! + + if a.Type != b.Type { + return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type) + } + if a.Name != b.Name { + return fmt.Errorf("mismatch of Name: %q != %q", a.Name, b.Name) + } + if a.Linkname != b.Linkname { + return fmt.Errorf("mismatch of Linkname: %q != %q", a.Linkname, b.Linkname) + } + if a.Mode != b.Mode { + return fmt.Errorf("mismatch of Mode: %q != %q", a.Mode, b.Mode) + } + if a.Size != b.Size { + return fmt.Errorf("mismatch of Size: %q != %q", a.Size, b.Size) + } + if a.UID != b.UID { + return fmt.Errorf("mismatch of UID: %q != %q", a.UID, b.UID) + } + if a.GID != b.GID { + return fmt.Errorf("mismatch of GID: %q != %q", a.GID, b.GID) + } + + if err := ensureTimePointersMatch(a.ModTime, b.ModTime); err != nil { + return fmt.Errorf("mismatch of ModTime: %w", err) + } + if err := ensureTimePointersMatch(a.AccessTime, b.AccessTime); err != nil { + return fmt.Errorf("mismatch of AccessTime: %w", err) + } + if err := ensureTimePointersMatch(a.ChangeTime, b.ChangeTime); err != nil { + return fmt.Errorf("mismatch of ChangeTime: %w", err) + } + if a.Devmajor != b.Devmajor { + return fmt.Errorf("mismatch of Devmajor: %q != %q", a.Devmajor, b.Devmajor) + } + if a.Devminor != b.Devminor { + return fmt.Errorf("mismatch of Devminor: %q != %q", a.Devminor, b.Devminor) + } + if !maps.Equal(a.Xattrs, b.Xattrs) { + return fmt.Errorf("mismatch of Xattrs: %q != %q", a.Xattrs, b.Xattrs) + } + + // Digest is not compared + // Offset is not compared + // EndOffset is not compared + + // ChunkSize is not compared + // ChunkOffset is not compared + // ChunkDigest is not compared + // ChunkType is not compared + return nil +} + func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { d, err := digest.Parse(expectedCompressedChecksum) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index a2fd904c..65496974 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -7,7 +7,6 @@ package compressor import ( "bufio" "bytes" - "encoding/base64" "io" "github.com/containers/storage/pkg/chunked/internal" @@ -369,34 +368,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } } - typ, err := internal.GetType(hdr.Typeflag) + mainEntry, err := internal.NewFileMetadata(hdr) if err != nil { return err } - xattrs := make(map[string]string) - for k, v := range hdr.Xattrs { - xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) - } - entries := []internal.FileMetadata{ - { - Type: typ, - Name: hdr.Name, - Linkname: hdr.Linkname, - Mode: hdr.Mode, - Size: hdr.Size, - UID: hdr.Uid, - GID: hdr.Gid, - ModTime: &hdr.ModTime, - AccessTime: &hdr.AccessTime, - ChangeTime: &hdr.ChangeTime, - Devmajor: hdr.Devmajor, - Devminor: hdr.Devminor, - Xattrs: xattrs, - Digest: checksum, - Offset: startOffset, - EndOffset: lastOffset, - }, - } + mainEntry.Digest = checksum + mainEntry.Offset = startOffset + mainEntry.EndOffset = lastOffset + entries := []internal.FileMetadata{mainEntry} for i := 1; i < len(chunks); i++ { entries = append(entries, internal.FileMetadata{ Type: internal.TypeChunk, diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index 701b6aa5..d98cee09 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -1,13 +1,16 @@ +//go:build unix + package dump import ( "bufio" + "encoding/base64" "fmt" "io" "path/filepath" + "reflect" "strings" "time" - "unicode" "github.com/containers/storage/pkg/chunked/internal" "golang.org/x/sys/unix" @@ -20,20 +23,26 @@ const ( ESCAPE_LONE_DASH ) -func escaped(val string, escape int) string { +func escaped(val []byte, escape int) string { noescapeSpace := escape&NOESCAPE_SPACE != 0 escapeEqual := escape&ESCAPE_EQUAL != 0 escapeLoneDash := escape&ESCAPE_LONE_DASH != 0 - length := len(val) - - if escapeLoneDash && val == "-" { + if escapeLoneDash && len(val) == 1 && val[0] == '-' { return fmt.Sprintf("\\x%.2x", val[0]) } + // This is intended to match the C isprint API with LC_CTYPE=C + isprint := func(c byte) bool { + return c >= 32 && c < 127 + } + // This is intended to match the C isgraph API with LC_CTYPE=C + isgraph := func(c byte) bool { + return c > 32 && c < 127 + } + var result string - for i := 0; i < length; i++ { - c := val[i] + for _, c := range []byte(val) { hexEscape := false var special string @@ -50,9 +59,9 @@ func escaped(val string, escape int) string { hexEscape = escapeEqual default: if noescapeSpace { - hexEscape = !unicode.IsPrint(rune(c)) + hexEscape = !isprint(c) } else { - hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c)) + hexEscape = !isgraph(c) } } @@ -67,8 +76,8 @@ func escaped(val string, escape int) string { return result } -func escapedOptional(val string, escape int) string { - if val == "" { +func escapedOptional(val []byte, escape int) string { + if len(val) == 0 { return "-" } return escaped(val, escape) @@ -104,10 +113,31 @@ func sanitizeName(name string) string { return path } -func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { +func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { path := sanitizeName(entry.Name) - if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { + parent := filepath.Dir(path) + if _, found := added[parent]; !found && path != "/" { + parentEntry := &internal.FileMetadata{ + Name: parent, + Type: internal.TypeDir, + Mode: 0o755, + } + if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil { + return err + } + + } + if e, found := added[path]; found { + // if the entry was already added, make sure it has the same data + if !reflect.DeepEqual(*e, *entry) { + return fmt.Errorf("entry %q already added with different data", path) + } + return nil + } + added[path] = entry + + if _, err := fmt.Fprint(out, escaped([]byte(path), ESCAPE_STANDARD)); err != nil { return err } @@ -151,7 +181,7 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri } } - if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil { + if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil { return err } @@ -165,14 +195,18 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri return err } digest := verityDigests[payload] - if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil { + if _, err := fmt.Fprint(out, escapedOptional([]byte(digest), ESCAPE_LONE_DASH)); err != nil { return err } - for k, v := range entry.Xattrs { - name := escaped(k, ESCAPE_EQUAL) - value := escaped(v, ESCAPE_EQUAL) + for k, vEncoded := range entry.Xattrs { + v, err := base64.StdEncoding.DecodeString(vEncoded) + if err != nil { + return fmt.Errorf("decode xattr %q: %w", k, err) + } + name := escaped([]byte(k), ESCAPE_EQUAL) + value := escaped(v, ESCAPE_EQUAL) if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil { return err } @@ -201,6 +235,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, }() links := make(map[string]int) + added := make(map[string]*internal.FileMetadata) for _, e := range toc.Entries { if e.Linkname == "" { continue @@ -211,14 +246,14 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, links[e.Linkname] = links[e.Linkname] + 1 } - if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") { + if len(toc.Entries) == 0 { root := &internal.FileMetadata{ Name: "/", Type: internal.TypeDir, Mode: 0o755, } - if err := dumpNode(w, links, verityDigests, root); err != nil { + if err := dumpNode(w, added, links, verityDigests, root); err != nil { pipeW.CloseWithError(err) closed = true return @@ -229,7 +264,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, if e.Type == internal.TypeChunk { continue } - if err := dumpNode(w, links, verityDigests, &e); err != nil { + if err := dumpNode(w, added, links, verityDigests, &e); err != nil { pipeW.CloseWithError(err) closed = true return diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go new file mode 100644 index 00000000..4fc4864a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go @@ -0,0 +1,605 @@ +package chunked + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync/atomic" + "syscall" + "time" + + driversCopy "github.com/containers/storage/drivers/copy" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked/internal" + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/vbatts/tar-split/archive/tar" + "golang.org/x/sys/unix" +) + +// procPathForFile returns an absolute path in /proc which +// refers to the file; see procPathForFd. +func procPathForFile(f *os.File) string { + return procPathForFd(int(f.Fd())) +} + +// procPathForFd returns an absolute path in /proc which +// refers to the file; this allows passing a file descriptor +// in places that don't accept a file descriptor. +func procPathForFd(fd int) string { + return fmt.Sprintf("/proc/self/fd/%d", fd) +} + +// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that +// are not part of the TOC document. +// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. +type fileMetadata struct { + internal.FileMetadata + + // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. + chunks []*internal.FileMetadata + + // skipSetAttrs is set when the file attributes must not be + // modified, e.g. it is a hard link from a different source, + // or a composefs file. + skipSetAttrs bool +} + +func doHardLink(dirfd, srcFd int, destFile string) error { + destDir, destBase := filepath.Split(destFile) + destDirFd := dirfd + if destDir != "" && destDir != "." { + f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := procPathForFd(srcFd) + err := unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + if err != nil { + return &fs.PathError{Op: "linkat", Path: destFile, Err: err} + } + return nil + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + if err := unix.Unlinkat(destDirFd, destBase, 0); err != nil { + return err + } + return doLink() + } + return err +} + +func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + destFile := fileMetadata.Name + src := procPathForFd(srcFd) + st, err := os.Stat(src) + if err != nil { + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) + } + + copyWithFileRange, copyWithFileClone := true, true + + if useHardLinks { + err := doHardLink(dirfd, srcFd, destFile) + if err == nil { + // if the file was deduplicated with a hard link, skip overriding file metadata. + fileMetadata.skipSetAttrs = true + return nil, st.Size(), nil + } + } + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := openFileUnderRoot(dirfd, destFile, newFileFlags, mode) + if err != nil { + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) + } + + err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) + if err != nil { + dstFile.Close() + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) + } + return dstFile, st.Size(), nil +} + +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return unix.NsecToTimespec(time.UnixNano()) +} + +// chown changes the owner and group of the file at the specified path under the directory +// pointed by dirfd. +// If nofollow is true, the function will not follow symlinks. +// If path is empty, the function will change the owner and group of the file descriptor. +// absolutePath is the absolute path of the file, used only for error messages. +func chown(dirfd int, path string, uid, gid int, nofollow bool, absolutePath string) error { + var err error + flags := 0 + if nofollow { + flags |= unix.AT_SYMLINK_NOFOLLOW + } else if path == "" { + flags |= unix.AT_EMPTY_PATH + } + err = unix.Fchownat(dirfd, path, uid, gid, flags) + if err == nil { + return nil + } + if errors.Is(err, syscall.EINVAL) { + return fmt.Errorf(`potentially insufficient UIDs or GIDs available in the user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, path, err) + } + return &fs.PathError{Op: "fchownat", Path: absolutePath, Err: err} +} + +// setFileAttrs sets the file attributes for file given metadata +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error { + if metadata.skipSetAttrs { + return nil + } + if file == nil { + return errors.New("invalid file") + } + fd := int(file.Fd()) + + t, err := typeToTarType(metadata.Type) + if err != nil { + return err + } + + // If it is a symlink, force to use the path + if t == tar.TypeSymlink { + usePath = true + } + + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + var err error + if usePath { + err = chown(dirfd, baseName, metadata.UID, metadata.GID, true, metadata.Name) + } else { + err = chown(fd, "", metadata.UID, metadata.GID, false, metadata.Name) + } + if options.IgnoreChownErrors { + return nil + } + return err + } + + doSetXattr := func(k string, v []byte) error { + err := unix.Fsetxattr(fd, k, v, 0) + if err != nil { + return &fs.PathError{Op: "fsetxattr", Path: metadata.Name, Err: err} + } + return nil + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + var err error + if usePath { + err = unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } else { + err = unix.UtimesNanoAt(unix.AT_FDCWD, procPathForFd(fd), ts, 0) + } + if err != nil { + return &fs.PathError{Op: "utimensat", Path: metadata.Name, Err: err} + } + return nil + } + + doChmod := func() error { + var err error + op := "" + if usePath { + err = unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + op = "fchmodat" + } else { + err = unix.Fchmod(fd, uint32(mode)) + op = "fchmod" + } + if err != nil { + return &fs.PathError{Op: op, Path: metadata.Name, Err: err} + } + return nil + } + + if err := doChown(); err != nil { + return err + } + + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return fmt.Errorf("decode xattr %q: %w", v, err) + } + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) + } + } + + if err := doUtimes(); !canIgnore(err) { + return err + } + + if err := doChmod(); !canIgnore(err) { + return err + } + return nil +} + +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := procPathForFd(dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + var fd int + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName, baseName := filepath.Split(name) + if dirName != "" && dirName != "." { + newRoot, err := securejoin.SecureJoin(root, dirName) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0) + if err != nil { + return -1, &fs.PathError{Op: "open", Path: root, Err: err} + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, baseName, int(flags), uint32(mode)) + if err != nil { + return -1, &fs.PathError{Op: "openat", Path: name, Err: err} + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, &fs.PathError{Op: "openat", Path: newPath, Err: err} + } + } + + target, err := os.Readlink(procPathForFd(fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + how := unix.OpenHow{ + Flags: flags, + Mode: uint64(mode & 0o7777), + Resolve: unix.RESOLVE_IN_ROOT, + } + fd, err := unix.Openat2(dirfd, name, &how) + if err != nil { + return -1, &fs.PathError{Op: "openat2", Path: name, Err: err} + } + return fd, nil +} + +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if name == "" { + fd, err := unix.Dup(dirfd) + if err != nil { + return -1, fmt.Errorf("failed to duplicate file descriptor %d: %w", dirfd, err) + } + return fd, nil + } + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } + } + return fd, err +} + +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// dirfd is an open file descriptor to the target checkout directory. +// name is the path to open relative to dirfd. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(dirfd, parent, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} + +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// dirfd is an open file descriptor to the target checkout directory. +// name is the path to open relative to dirfd. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() + + baseName := filepath.Base(name) + + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, uint32(mode)); err2 != nil { + return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2} + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + return nil, err +} + +// appendHole creates a hole with the specified size at the open fd. +// fd is the open file descriptor. +// name is the path to use for error messages. +// size is the size of the hole to create. +func appendHole(fd int, name string, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return &fs.PathError{Op: "seek", Path: name, Err: err} + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return &fs.PathError{Op: "ftruncate", Path: name, Err: err} + } + return nil +} + +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { + parent, base := filepath.Split(name) + parentFd := dirfd + if parent != "" && parent != "." { + parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0) + if err != nil { + return err + } + defer parentFile.Close() + parentFd = int(parentFile.Fd()) + } + + if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { + if !os.IsExist(err) { + return &fs.PathError{Op: "mkdirat", Path: name, Err: err} + } + } + + file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return setFileAttrs(dirfd, file, mode, metadata, options, false) +} + +func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { + sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer sourceFile.Close() + + err = doHardLink(dirfd, int(sourceFile.Fd()), metadata.Name) + if err != nil { + return err + } + + newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_WRONLY|unix.O_NOFOLLOW, 0) + if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) +} + +func safeSymlink(dirfd int, metadata *fileMetadata) error { + destDir, destBase := filepath.Split(metadata.Name) + destDirFd := dirfd + if destDir != "" && destDir != "." { + f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return &fs.PathError{Op: "symlinkat", Path: metadata.Name, Err: err} + } + return nil +} + +type whiteoutHandler struct { + Dirfd int + Root string +} + +func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { + file, err := openOrCreateDirUnderRoot(d.Dirfd, path, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return &fs.PathError{Op: "fsetxattr", Path: path, Err: err} + } + return nil +} + +func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { + dir, base := filepath.Split(path) + dirfd := d.Dirfd + if dir != "" && dir != "." { + dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0) + if err != nil { + return err + } + defer dir.Close() + + dirfd = int(dir.Fd()) + } + + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return &fs.PathError{Op: "mknodat", Path: path, Err: err} + } + + return nil +} + +func (d whiteoutHandler) Chown(path string, uid, gid int) error { + file, err := openFileUnderRoot(d.Dirfd, path, unix.O_PATH, 0) + if err != nil { + return err + } + defer file.Close() + + return chown(int(file.Fd()), "", uid, gid, false, path) +} + +type readerAtCloser interface { + io.ReaderAt + io.Closer +} + +// seekableFile is a struct that wraps an *os.File to provide an ImageSourceSeekable. +type seekableFile struct { + reader readerAtCloser +} + +func (f *seekableFile) Close() error { + return f.reader.Close() +} + +func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + streams := make(chan io.ReadCloser) + errs := make(chan error) + + go func() { + for _, chunk := range chunks { + streams <- io.NopCloser(io.NewSectionReader(f.reader, int64(chunk.Offset), int64(chunk.Length))) + } + close(streams) + close(errs) + }() + + return streams, errs, nil +} + +func newSeekableFile(reader readerAtCloser) *seekableFile { + return &seekableFile{reader: reader} +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 5decbfb6..2a24e4bf 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -5,25 +5,57 @@ package internal // larger software like the graph drivers. import ( - "archive/tar" "bytes" + "encoding/base64" "encoding/binary" "fmt" "io" + "strings" "time" + "github.com/containers/storage/pkg/archive" jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" ) +// TOC is short for Table of Contents and is used by the zstd:chunked +// file format to effectively add an overall index into the contents +// of a tarball; it also includes file metadata. type TOC struct { - Version int `json:"version"` - Entries []FileMetadata `json:"entries"` - TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` + // Version is currently expected to be 1 + Version int `json:"version"` + // Entries is the list of file metadata in this TOC. + // The ordering in this array currently defaults to being the same + // as that of the tar stream; however, this should not be relied on. + Entries []FileMetadata `json:"entries"` + // TarSplitDigest is the checksum of the "tar-split" data which + // is included as a distinct skippable zstd frame before the TOC. + TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` } +// FileMetadata is an entry in the TOC that includes both generic file metadata +// that duplicates what can found in the tar header (and should match), but +// also special/custom content (see below). +// +// Regular files may optionally be represented as a sequence of “chunksâ€, +// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries +// are heuristically determined to increase chance of chunk matching / reuse +// similar to rsync). In that case, the regular file is represented +// as an initial TypeReg entry (with all metadata for the file as a whole) +// immediately followed by zero or more TypeChunk entries (containing only Type, +// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk* +// fields are relevant in all of these entries, including the initial +// TypeReg one. +// +// Note that the metadata here, when fetched by a zstd:chunked aware client, +// is used instead of that in the tar stream. The contents of the tar stream +// are not used in this scenario. type FileMetadata struct { + // If you add any fields, update ensureFileMetadataMatches as well! + + // The metadata below largely duplicates that in the tar headers. Type string `json:"type"` Name string `json:"name"` Linkname string `json:"linkName,omitempty"` @@ -37,9 +69,11 @@ type FileMetadata struct { Devmajor int64 `json:"devMajor,omitempty"` Devminor int64 `json:"devMinor,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"` - Digest string `json:"digest,omitempty"` - Offset int64 `json:"offset,omitempty"` - EndOffset int64 `json:"endOffset,omitempty"` + // Digest is a hexadecimal sha256 checksum of the file contents; it + // is empty for empty files + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` ChunkSize int64 `json:"chunkSize,omitempty"` ChunkOffset int64 `json:"chunkOffset,omitempty"` @@ -53,19 +87,23 @@ const ( ) const ( + // The following types correspond to regular types of entries that can + // appear in a tar archive. TypeReg = "reg" - TypeChunk = "chunk" TypeLink = "hardlink" TypeChar = "char" TypeBlock = "block" TypeDir = "dir" TypeFifo = "fifo" TypeSymlink = "symlink" + // TypeChunk is special; in zstd:chunked not only are files individually + // compressed and indexable, there is a "rolling checksum" used to compute + // "chunks" of individual file contents, that are also added to the TOC + TypeChunk = "chunk" ) var TarTypes = map[byte]string{ tar.TypeReg: TypeReg, - tar.TypeRegA: TypeReg, tar.TypeLink: TypeLink, tar.TypeChar: TypeChar, tar.TypeBlock: TypeBlock, @@ -83,11 +121,23 @@ func GetType(t byte) (string, error) { } const ( + // ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest. ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" - ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" - TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" - - TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written. + // ManifestInfoKey is an annotation that signals the start of the TOC (manifest) + // contents which are embedded as a skippable zstd frame. It has a format of + // four decimal integers separated by `:` as follows: + // ::: + // The is ManifestTypeCRFS which should have the value `1`. + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + // TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata + // contents which are embedded as a skippable zstd frame. It has a format of + // three decimal integers separated by `:` as follows: + // :: + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + + // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead. + // The value is retained here as a constant as a historical reference for older zstd:chunked images. + // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -232,3 +282,43 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte { return manifestDataLE } + +// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil. +func timeIfNotZero(t *time.Time) *time.Time { + if t == nil || t.IsZero() { + return nil + } + return t +} + +// NewFileMetadata creates a basic FileMetadata entry for hdr. +// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately. +func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) { + typ, err := GetType(hdr.Typeflag) + if err != nil { + return FileMetadata{}, err + } + xattrs := make(map[string]string) + for k, v := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) + if !ok { + continue + } + xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) + } + return FileMetadata{ + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: timeIfNotZero(&hdr.ModTime), + AccessTime: timeIfNotZero(&hdr.AccessTime), + ChangeTime: timeIfNotZero(&hdr.ChangeTime), + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index e001022c..403d7d5a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -8,20 +8,18 @@ import ( "fmt" "hash" "io" + "io/fs" "os" "path/filepath" "reflect" "sort" "strings" "sync" - "sync/atomic" "syscall" - "time" "github.com/containerd/stargz-snapshotter/estargz" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" - driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" @@ -29,8 +27,6 @@ import ( "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" - "github.com/containers/storage/types" - securejoin "github.com/cyphar/filepath-securejoin" jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" @@ -42,9 +38,8 @@ import ( const ( maxNumberMissingChunks = 1024 - autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them. + autoMergePartsThreshold = 1024 // if the gap between two ranges is below this threshold, automatically merge them. newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) - containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" @@ -59,21 +54,6 @@ const ( copyGoRoutines = 32 ) -// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that -// are not part of the TOC document. -// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. -type fileMetadata struct { - internal.FileMetadata - - // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. - chunks []*internal.FileMetadata - - // skipSetAttrs is set when the file attributes must not be - // modified, e.g. it is a hard link from a different source, - // or a composefs file. - skipSetAttrs bool -} - type compressedFileType int type chunkedDiffer struct { @@ -111,7 +91,7 @@ type chunkedDiffer struct { blobSize int64 - storeOpts *types.StoreOptions + pullOptions map[string]string useFsVerity graphdriver.DifferFsVerity fsVerityDigests map[string]string @@ -127,98 +107,7 @@ type chunkedLayerData struct { Format graphdriver.DifferOutputFormat `json:"format"` } -func timeToTimespec(time *time.Time) (ts unix.Timespec) { - if time == nil || time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return unix.NsecToTimespec(time.UnixNano()) -} - -func doHardLink(srcFd int, destDirFd int, destBase string) error { - doLink := func() error { - // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses - // /proc/self/fd doesn't and can be used with rootless. - srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) - return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) - } - - err := doLink() - - // if the destination exists, unlink it first and try again - if err != nil && os.IsExist(err) { - unix.Unlinkat(destDirFd, destBase, 0) - return doLink() - } - return err -} - -func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { - destFile := fileMetadata.Name - src := fmt.Sprintf("/proc/self/fd/%d", srcFd) - st, err := os.Stat(src) - if err != nil { - return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) - } - - copyWithFileRange, copyWithFileClone := true, true - - if useHardLinks { - destDirPath := filepath.Dir(destFile) - destBase := filepath.Base(destFile) - destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) - if err == nil { - defer destDir.Close() - - err := doHardLink(srcFd, int(destDir.Fd()), destBase) - if err == nil { - // if the file was deduplicated with a hard link, skip overriding file metadata. - fileMetadata.skipSetAttrs = true - return nil, st.Size(), nil - } - } - } - - // If the destination file already exists, we shouldn't blow it away - dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) - if err != nil { - return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) - } - - err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) - if err != nil { - dstFile.Close() - return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) - } - return dstFile, st.Size(), nil -} - -type seekableFile struct { - file *os.File -} - -func (f *seekableFile) Close() error { - return f.file.Close() -} - -func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - streams := make(chan io.ReadCloser) - errs := make(chan error) - - go func() { - for _, chunk := range chunks { - streams <- io.NopCloser(io.NewSectionReader(f.file, int64(chunk.Offset), int64(chunk.Length))) - } - close(streams) - close(errs) - }() - - return streams, errs, nil -} - -func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) { +func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { return 0, nil, "", nil, err @@ -226,7 +115,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) if err != nil { - return 0, nil, "", nil, err + return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err} } f := os.NewFile(uintptr(fd), destDirectory) @@ -240,7 +129,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se } convertedOutputDigester := digest.Canonical.Digester() - copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff) + copied, err := io.CopyBuffer(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff, c.copyBuffer) if err != nil { f.Close() return 0, nil, "", nil, err @@ -249,21 +138,15 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se f.Close() return 0, nil, "", nil, err } - is := seekableFile{ - file: f, - } - return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil + return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - storeOpts, err := types.DefaultStoreOptions() - if err != nil { - return nil, err - } + pullOptions := store.PullOptions() - if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) { + if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) { return nil, errors.New("enable_partial_images not configured") } @@ -279,21 +162,21 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges if err != nil { return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err) } - return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts) + return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions) } if hasEstargzTOC { estargzTOCDigest, err := digest.Parse(estargzTOCDigestString) if err != nil { return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err) } - return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts) + return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions) } - return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) + return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions) } -func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { - if !parseBooleanPullOption(storeOpts, "convert_images", false) { +func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { + if !parseBooleanPullOption(pullOptions, "convert_images", false) { return nil, errors.New("convert_images not configured") } @@ -309,12 +192,12 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige convertToZstdChunked: true, copyBuffer: makeCopyBuffer(), layersCache: layersCache, - storeOpts: storeOpts, + pullOptions: pullOptions, stream: iss, }, nil } -func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { +func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -333,14 +216,14 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in layersCache: layersCache, manifest: manifest, toc: toc, - storeOpts: storeOpts, + pullOptions: pullOptions, stream: iss, tarSplit: tarSplit, tocOffset: tocOffset, }, nil } -func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { +func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -358,7 +241,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize fileType: fileTypeEstargz, layersCache: layersCache, manifest: manifest, - storeOpts: storeOpts, + pullOptions: pullOptions, stream: iss, tocOffset: tocOffset, }, nil @@ -375,15 +258,15 @@ func makeCopyBuffer() []byte { // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { - srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) + srcDirfd, err := unix.Open(source, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { - return false, nil, 0, fmt.Errorf("open source file: %w", err) + return false, nil, 0, &fs.PathError{Op: "open", Path: source, Err: err} } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(srcDirfd, name, unix.O_RDONLY|syscall.O_CLOEXEC, 0) if err != nil { - return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err) + return false, nil, 0, err } defer srcFile.Close() @@ -420,7 +303,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool { return false } - path := fmt.Sprintf("/proc/self/fd/%d", fd) + path := procPathForFd(fd) listXattrs, err := system.Llistxattr(path) if err != nil { @@ -476,7 +359,7 @@ func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int, if st.Size() != file.Size { continue } - fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK|unix.O_CLOEXEC, 0) if err != nil { logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err) return false, nil, 0, nil @@ -585,15 +468,15 @@ type missingPart struct { } func (o *originFile) OpenFile() (io.ReadCloser, error) { - srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { - return nil, fmt.Errorf("open source file: %w", err) + return nil, &fs.PathError{Op: "open", Path: o.Root, Err: err} } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(srcDirfd, o.Path, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { - return nil, fmt.Errorf("open source file under target rootfs: %w", err) + return nil, err } if _, err := srcFile.Seek(o.Offset, 0); err != nil { @@ -603,253 +486,6 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) { return srcFile, nil } -// setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error { - if metadata.skipSetAttrs { - return nil - } - if file == nil || file.Fd() < 0 { - return errors.New("invalid file") - } - fd := int(file.Fd()) - - t, err := typeToTarType(metadata.Type) - if err != nil { - return err - } - - // If it is a symlink, force to use the path - if t == tar.TypeSymlink { - usePath = true - } - - baseName := "" - if usePath { - dirName := filepath.Dir(metadata.Name) - if dirName != "" { - parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) - if err != nil { - return err - } - defer parentFd.Close() - - dirfd = int(parentFd.Fd()) - } - baseName = filepath.Base(metadata.Name) - } - - doChown := func() error { - if usePath { - return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) - } - return unix.Fchown(fd, metadata.UID, metadata.GID) - } - - doSetXattr := func(k string, v []byte) error { - return unix.Fsetxattr(fd, k, v, 0) - } - - doUtimes := func() error { - ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} - if usePath { - return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) - } - return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) - } - - doChmod := func() error { - if usePath { - return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) - } - return unix.Fchmod(fd, uint32(mode)) - } - - if err := doChown(); err != nil { - if !options.IgnoreChownErrors { - return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) - } - } - - canIgnore := func(err error) bool { - return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) - } - - for k, v := range metadata.Xattrs { - if _, found := xattrsToIgnore[k]; found { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return fmt.Errorf("decode xattr %q: %w", v, err) - } - if err := doSetXattr(k, data); !canIgnore(err) { - return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) - } - } - - if err := doUtimes(); !canIgnore(err) { - return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) - } - - if err := doChmod(); !canIgnore(err) { - return fmt.Errorf("chmod %q: %w", metadata.Name, err) - } - return nil -} - -func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { - root := fmt.Sprintf("/proc/self/fd/%d", dirfd) - - targetRoot, err := os.Readlink(root) - if err != nil { - return -1, err - } - - hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 - - var fd int - // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the - // last component as the path to openat(). - if hasNoFollow { - dirName := filepath.Dir(name) - if dirName != "" { - newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) - if err != nil { - return -1, err - } - root = newRoot - } - - parentDirfd, err := unix.Open(root, unix.O_PATH, 0) - if err != nil { - return -1, err - } - defer unix.Close(parentDirfd) - - fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) - if err != nil { - return -1, err - } - } else { - newPath, err := securejoin.SecureJoin(root, name) - if err != nil { - return -1, err - } - fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) - if err != nil { - return -1, err - } - } - - target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) - if err != nil { - unix.Close(fd) - return -1, err - } - - // Add an additional check to make sure the opened fd is inside the rootfs - if !strings.HasPrefix(target, targetRoot) { - unix.Close(fd) - return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name) - } - - return fd, err -} - -func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { - how := unix.OpenHow{ - Flags: flags, - Mode: uint64(mode & 0o7777), - Resolve: unix.RESOLVE_IN_ROOT, - } - return unix.Openat2(dirfd, name, &how) -} - -// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid -// using it again. -var skipOpenat2 int32 - -// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a -// userspace lookup. -func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { - var fd int - var err error - if atomic.LoadInt32(&skipOpenat2) > 0 { - fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) - } else { - fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) - // If the function failed with ENOSYS, switch off the support for openat2 - // and fallback to using safejoin. - if err != nil && errors.Is(err, unix.ENOSYS) { - atomic.StoreInt32(&skipOpenat2, 1) - fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) - } - } - return fd, err -} - -// openFileUnderRoot safely opens a file under the specified root directory using openat2 -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// flags are the flags to pass to the open syscall. -// mode specifies the mode to use for newly created files. -func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { - fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) - if err == nil { - return os.NewFile(uintptr(fd), name), nil - } - - hasCreate := (flags & unix.O_CREAT) != 0 - if errors.Is(err, unix.ENOENT) && hasCreate { - parent := filepath.Dir(name) - if parent != "" { - newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) - if err2 == nil { - defer newDirfd.Close() - fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) - if err == nil { - return os.NewFile(uintptr(fd), name), nil - } - } - } - } - return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) -} - -// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// mode specifies the mode to use for newly created files. -func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { - fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) - if err == nil { - return os.NewFile(uintptr(fd), name), nil - } - - if errors.Is(err, unix.ENOENT) { - parent := filepath.Dir(name) - if parent != "" { - pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) - if err2 != nil { - return nil, err - } - defer pDir.Close() - - baseName := filepath.Base(name) - - if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil { - return nil, err - } - - fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) - if err == nil { - return os.NewFile(uintptr(fd), name), nil - } - } - } - return nil, err -} - func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { switch { case partCompression == fileTypeHole: @@ -918,23 +554,14 @@ func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { return nil } -// appendHole creates a hole with the specified size at the open fd. -func appendHole(fd int, size int64) error { - off, err := unix.Seek(fd, size, unix.SEEK_CUR) - if err != nil { - return err - } - // Make sure the file size is changed. It might be the last hole and no other data written afterwards. - if err := unix.Ftruncate(fd, off); err != nil { - return err - } - return nil -} - func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { switch compression { case fileTypeZstdChunked: - defer c.zstdReader.Reset(nil) + defer func() { + if err := c.zstdReader.Reset(nil); err != nil { + logrus.Warnf("release of references to the previous zstd reader failed: %v", err) + } + }() if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { return err } @@ -948,7 +575,7 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT return err } case fileTypeHole: - if err := appendHole(int(destFile.file.Fd()), size); err != nil { + if err := appendHole(int(destFile.file.Fd()), destFile.metadata.Name, size); err != nil { return err } if destFile.hash != nil { @@ -977,7 +604,7 @@ type destinationFile struct { } func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { - file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + file, err := openFileUnderRoot(dirfd, metadata.Name, newFileFlags, 0) if err != nil { return nil, err } @@ -1080,7 +707,7 @@ func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error { return nil } -func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile filesToClose := make(chan *destinationFile, 3) @@ -1294,7 +921,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk calculateChunksToRequest := func() { @@ -1333,164 +960,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st return err } - if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { - return err - } - return nil -} - -func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { - parent := filepath.Dir(name) - base := filepath.Base(name) - - parentFd := dirfd - if parent != "." { - parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) - if err != nil { - return err - } - defer parentFile.Close() - parentFd = int(parentFile.Fd()) - } - - if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { - if !os.IsExist(err) { - return fmt.Errorf("mkdir %q: %w", name, err) - } - } - - file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) - if err != nil { - return err - } - defer file.Close() - - return setFileAttrs(dirfd, file, mode, metadata, options, false) -} - -func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) - if err != nil { - return err - } - defer sourceFile.Close() - - destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) - destDirFd := dirfd - if destDir != "." { - f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) - if err != nil { - return err - } - defer f.Close() - destDirFd = int(f.Fd()) - } - - err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) - if err != nil { - return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) - } - - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) - if err != nil { - // If the target is a symlink, open the file with O_PATH. - if errors.Is(err, unix.ELOOP) { - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) - if err != nil { - return err - } - defer newFile.Close() - - return setFileAttrs(dirfd, newFile, mode, metadata, options, true) - } - return err - } - defer newFile.Close() - - return setFileAttrs(dirfd, newFile, mode, metadata, options, false) -} - -func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { - destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) - destDirFd := dirfd - if destDir != "." { - f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) - if err != nil { - return err - } - defer f.Close() - destDirFd = int(f.Fd()) - } - - if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { - return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) - } - return nil -} - -type whiteoutHandler struct { - Dirfd int - Root string -} - -func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { - file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) - if err != nil { + if err := c.storeMissingFiles(streams, errs, dirfd, missingParts, options); err != nil { return err } - defer file.Close() - - if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { - return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) - } - return nil -} - -func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { - dir := filepath.Dir(path) - base := filepath.Base(path) - - dirfd := d.Dirfd - if dir != "" { - dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) - if err != nil { - return err - } - defer dir.Close() - - dirfd = int(dir.Fd()) - } - - if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { - return fmt.Errorf("mknod %q: %w", path, err) - } - - return nil -} - -func checkChownErr(err error, name string, uid, gid int) error { - if errors.Is(err, syscall.EINVAL) { - return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err) - } - return err -} - -func (d whiteoutHandler) Chown(path string, uid, gid int) error { - file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) - if err != nil { - return err - } - defer file.Close() - - if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { - var stat unix.Stat_t - if unix.Fstat(int(file.Fd()), &stat) == nil { - if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { - return nil - } - } - return checkChownErr(err, path, uid, gid) - } return nil } @@ -1501,8 +973,8 @@ type hardLinkToCreate struct { metadata *fileMetadata } -func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { - if value, ok := storeOpts.PullOptions[name]; ok { +func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool { + if value, ok := pullOptions[name]; ok { return strings.ToLower(value) == "true" } return def @@ -1515,10 +987,10 @@ type findAndCopyFileOptions struct { } func reopenFileReadOnly(f *os.File) (*os.File, error) { - path := fmt.Sprintf("/proc/self/fd/%d", f.Fd()) + path := procPathForFile(f) fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { - return nil, err + return nil, &fs.PathError{Op: "open", Path: path, Err: err} } return os.NewFile(uintptr(fd), f.Name()), nil } @@ -1636,7 +1108,7 @@ func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, r := io.TeeReader(payload, originalRawDigester.Hash()) // copy the entire tarball and compute its digest - _, err = io.Copy(destination, r) + _, err = io.CopyBuffer(destination, r, c.copyBuffer) return originalRawDigester.Digest(), err } @@ -1654,13 +1126,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // stream to use for reading the zstd:chunked or Estargz file. stream := c.stream + var compressedDigest digest.Digest var uncompressedDigest digest.Digest var convertedBlobSize int64 if c.convertToZstdChunked { fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) if err != nil { - return graphdriver.DriverWithDifferOutput{}, err + return graphdriver.DriverWithDifferOutput{}, &fs.PathError{Op: "open", Path: dest, Err: err} } blobFile := os.NewFile(uintptr(fd), "blob-file") defer func() { @@ -1670,7 +1143,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff }() // calculate the checksum before accessing the file. - compressedDigest, err := c.copyAllBlobToFile(blobFile) + compressedDigest, err = c.copyAllBlobToFile(blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1683,7 +1156,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return graphdriver.DriverWithDifferOutput{}, err } - tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) + tarSize, fileSource, diffID, annotations, err := c.convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1756,14 +1229,15 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff }, TOCDigest: c.tocDigest, UncompressedDigest: uncompressedDigest, + CompressedDigest: compressedDigest, } // When the hard links deduplication is used, file attributes are ignored because setting them // modifies the source file as well. - useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false) + useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false) // List of OSTree repositories to use for deduplication - ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":") + ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":") whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) @@ -1790,16 +1264,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if options.ForceMask != nil { uid, gid, mode, err := archive.GetFileOwner(dest) if err == nil { - value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: int(uid), GID: int(gid)}, + Mode: os.FileMode(mode), + } + if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return output, err } } } - dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) + dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { - return output, fmt.Errorf("cannot open %q: %w", dest, err) + return output, &fs.PathError{Op: "open", Path: dest, Err: err} } defer unix.Close(dirfd) @@ -1812,7 +1289,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff for _, e := range mergedEntries { d := e.Name[0:2] if _, found := createdDirs[d]; !found { - unix.Mkdirat(dirfd, d, 0o755) + if err := unix.Mkdirat(dirfd, d, 0o755); err != nil { + return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err} + } createdDirs[d] = struct{}{} } } @@ -1868,11 +1347,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } filesToWaitFor := 0 - for i, r := range mergedEntries { + for i := range mergedEntries { + r := &mergedEntries[i] if options.ForceMask != nil { - value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777) - r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) - r.Mode = int64(*options.ForceMask) + value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode)) + if r.Xattrs == nil { + r.Xattrs = make(map[string]string) + } + r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) } mode := os.FileMode(r.Mode) @@ -1916,12 +1398,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if r.Size == 0 { // Used to have a scope for cleanup. createEmptyFile := func() error { - file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) + file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0) if err != nil { return err } defer file.Close() - if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { + if err := setFileAttrs(dirfd, file, mode, r, options, false); err != nil { return err } return nil @@ -1936,7 +1418,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if r.Name == "" || r.Name == "." { output.RootDirMode = &mode } - if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { + if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil { return output, err } continue @@ -1950,12 +1432,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff dest: dest, dirfd: dirfd, mode: mode, - metadata: &r, + metadata: r, }) continue case tar.TypeSymlink: - if err := safeSymlink(dirfd, mode, &r, options); err != nil { + if err := safeSymlink(dirfd, r); err != nil { return output, err } continue @@ -2057,7 +1539,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { - if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil { + if err := c.retrieveMissingFiles(stream, dirfd, missingParts, options); err != nil { return output, err } } @@ -2167,13 +1649,13 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the // same digest as chunk.ChunkDigest func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { - parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { return false } defer unix.Close(parentDirfd) - fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + fd, err := openFileUnderRoot(parentDirfd, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { return false } diff --git a/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go b/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go new file mode 100644 index 00000000..aeb1698d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go @@ -0,0 +1,68 @@ +package chunked + +import ( + "bytes" + "fmt" + "io" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// iterateTarSplit calls handler for each tar header in tarSplit +func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error { + // This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream + // forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least + // for every non-empty file, which constraints it basically to the output we expect. + // + // Specifically, we assume: + // - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions + // - (There is a FileType entry for every tar header, we ignore it) + // - Trailing padding of a file, if any, is included in the next SegmentType entry + // - At the end, there may be SegmentType entries just for the terminating zero blocks. + + unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) + for { + tsEntry, err := unpacker.Next() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("reading tar-split entries: %w", err) + } + switch tsEntry.Type { + case storage.SegmentType: + payload := tsEntry.Payload + // This is horrible, but we don’t know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse + // files, but for sparse files that is set to the logical size.) + // + // First, assume that all padding is zero bytes. + // A tar header starts with a file name, which might in principle be empty, but + // at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that + // the tar name should never be empty (it should be ".", or maybe "./"). + // + // This will cause us to skip all zero bytes in the trailing blocks, but that’s fine. + i := 0 + for i < len(payload) && payload[i] == 0 { + i++ + } + payload = payload[i:] + tr := tar.NewReader(bytes.NewReader(payload)) + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that. + break + } + return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err) + } + if err := handler(hdr); err != nil { + return err + } + + case storage.FileType: + // Nothing + default: + return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type) + } + } +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 7f49d029..560df3cf 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -75,10 +75,6 @@ type OptionsConfig struct { // Size Size string `toml:"size,omitempty"` - // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids,omitempty"` - // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` @@ -90,13 +86,6 @@ type OptionsConfig struct { // files and directories. ForceMask os.FileMode `toml:"force_mask,omitempty"` - // RemapUser is the name of one or more entries in /etc/subuid which - // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user,omitempty"` - // RemapGroup is the name of one or more entries in /etc/subgid which - // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group,omitempty"` - // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 36e1bdd5..dd6c02a7 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,5 +1,5 @@ -//go:build linux || darwin || freebsd || solaris -// +build linux darwin freebsd solaris +//go:build !windows +// +build !windows package directory diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go deleted file mode 100644 index 9057fe1b..00000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !linux && !darwin && !freebsd && !windows -// +build !linux,!darwin,!freebsd,!windows - -package homedir - -// Copyright 2013-2018 Docker, Inc. -// NOTE: this package has originally been copied from github.com/docker/docker. - -import ( - "errors" - "os" - "path/filepath" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index 87484d95..35b93e62 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -10,6 +10,7 @@ import ( "syscall" "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -61,12 +62,20 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) // just wait for the SIGKILL for { - syscall.Pause() + _ = syscall.Pause() } } cleanupFunc := func() { - unix.Kill(int(pid), unix.SIGKILL) - _, _ = unix.Wait4(int(pid), nil, 0, nil) + err1 := unix.Kill(int(pid), unix.SIGKILL) + if err1 != nil && err1 != syscall.ESRCH { + logrus.Warnf("kill process pid: %d with SIGKILL ended with error: %v", int(pid), err1) + } + if err1 != nil { + return + } + if _, err := unix.Wait4(int(pid), nil, 0, nil); err != nil { + logrus.Warnf("wait4 pid: %d ended with error: %v", int(pid), err) + } } writeMappings := func(fname string, idmap []idtools.IDMap) error { mappings := "" diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index ef5a9525..dc963481 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -367,21 +367,77 @@ func checkChownErr(err error, name string, uid, gid int) error { return err } +// Stat contains file states that can be overriden with ContainersOverrideXattr. +type Stat struct { + IDs IDPair + Mode os.FileMode +} + +// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string +// that can be used as the value for the ContainersOverrideXattr xattr. +func FormatContainersOverrideXattr(uid, gid, mode int) string { + return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777) +} + +// GetContainersOverrideXattr will get and decode ContainersOverrideXattr. +func GetContainersOverrideXattr(path string) (Stat, error) { + var stat Stat + xstat, err := system.Lgetxattr(path, ContainersOverrideXattr) + if err != nil { + return stat, err + } + + attrs := strings.Split(string(xstat), ":") + if len(attrs) != 3 { + return stat, fmt.Errorf("The number of clons in %s does not equal to 3", + ContainersOverrideXattr) + } + + value, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + return stat, fmt.Errorf("Failed to parse UID: %w", err) + } + + stat.IDs.UID = int(value) + + value, err = strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + return stat, fmt.Errorf("Failed to parse GID: %w", err) + } + + stat.IDs.GID = int(value) + + value, err = strconv.ParseUint(attrs[2], 8, 32) + if err != nil { + return stat, fmt.Errorf("Failed to parse mode: %w", err) + } + + stat.Mode = os.FileMode(value) + + return stat, nil +} + +// SetContainersOverrideXattr will encode and set ContainersOverrideXattr. +func SetContainersOverrideXattr(path string, stat Stat) error { + value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode)) + return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0) +} + func SafeChown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode uint64 = 0o0700 + var mode os.FileMode = 0o0700 xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) if err == nil { attrs := strings.Split(string(xstat), ":") if len(attrs) == 3 { val, err := strconv.ParseUint(attrs[2], 8, 32) if err == nil { - mode = val + mode = os.FileMode(val) } } } - value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + value := Stat{IDPair{uid, gid}, mode} + if err = SetContainersOverrideXattr(name, value); err != nil { return err } uid = os.Getuid() @@ -397,19 +453,19 @@ func SafeChown(name string, uid, gid int) error { func SafeLchown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode uint64 = 0o0700 + var mode os.FileMode = 0o0700 xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) if err == nil { attrs := strings.Split(string(xstat), ":") if len(attrs) == 3 { val, err := strconv.ParseUint(attrs[2], 8, 32) if err == nil { - mode = val + mode = os.FileMode(val) } } } - value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + value := Stat{IDPair{uid, gid}, mode} + if err = SetContainersOverrideXattr(name, value); err != nil { return err } uid = os.Getuid() diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index 2a8c85ad..fd6addd7 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -150,10 +150,13 @@ func (w *atomicFileWriter) complete(commit bool) (retErr error) { } defer func() { - w.closeTempFile() + err := w.closeTempFile() if retErr != nil || w.writeErr != nil { os.Remove(w.f.Name()) } + if retErr == nil { + retErr = err + } }() if commit { diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 51014757..25a71ac9 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -415,7 +415,9 @@ func (l *LockFile) lock(lType lockType) { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - lockHandle(l.fd, lType, false) + if err := lockHandle(l.fd, lType, false); err != nil { + panic(err) + } } l.lockType = lType l.locked = true @@ -426,10 +428,13 @@ func (l *LockFile) lock(lType lockType) { // command. func (l *LockFile) tryLock(lType lockType) error { var success bool + var rwMutexUnlocker func() if lType == readLock { success = l.rwMutex.TryRLock() + rwMutexUnlocker = l.rwMutex.RUnlock } else { success = l.rwMutex.TryLock() + rwMutexUnlocker = l.rwMutex.Unlock } if !success { return fmt.Errorf("resource temporarily unavailable") @@ -440,7 +445,7 @@ func (l *LockFile) tryLock(lType lockType) error { // If we're the first reference on the lock, we need to open the file again. fd, err := openLock(l.file, l.ro) if err != nil { - l.rwMutex.Unlock() + rwMutexUnlocker() return err } l.fd = fd @@ -450,7 +455,7 @@ func (l *LockFile) tryLock(lType lockType) error { // reader lock or a writer lock. if err = lockHandle(l.fd, lType, true); err != nil { closeHandle(fd) - l.rwMutex.Unlock() + rwMutexUnlocker() return err } } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 0eff003b..6c8399f9 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -1,5 +1,5 @@ -//go:build linux || solaris || darwin || freebsd -// +build linux solaris darwin freebsd +//go:build !windows +// +build !windows package lockfile diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 40d8fd2b..067dd7cd 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -6,10 +6,12 @@ package loopback import ( "errors" "fmt" + "io/fs" "os" "syscall" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // Loopback related errors @@ -39,7 +41,7 @@ func getNextFreeLoopbackIndex() (int, error) { return index, err } -func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { +func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { // Read information about the loopback file. var st syscall.Stat_t err = syscall.Fstat(int(sparseFile.Fd()), &st) @@ -48,31 +50,51 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File return nil, ErrAttachLoopbackDevice } + // upper bound to avoid infinite loop + remaining := 1000 + // Start looking for a free /dev/loop for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := os.Stat(target) - if err != nil { - if os.IsNotExist(err) { - logrus.Error("There are no more loopback devices available.") - } + if remaining == 0 { + logrus.Errorf("No free loopback devices available") return nil, ErrAttachLoopbackDevice } + remaining-- - if fi.Mode()&os.ModeDevice != os.ModeDevice { - logrus.Errorf("Loopback device %s is not a block device.", target) - continue + index, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + return nil, err } + target := fmt.Sprintf("/dev/loop%d", index) + // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) if err != nil { + // The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so + // just treat ENXIO as if the device does not exist. + if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) { + // Another process could have taken the loopback device in the meantime. So repeat + // the process with the next loopback device. + continue + } logrus.Errorf("Opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } + fi, err := loopFile.Stat() + if err != nil { + loopFile.Close() + logrus.Errorf("Stat loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + if fi.Mode()&os.ModeDevice != os.ModeDevice { + loopFile.Close() + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() @@ -124,14 +146,6 @@ func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) { } func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) { - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start looping for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - logrus.Debugf("Error retrieving the next available loopback: %s", err) - } - var sparseFile *os.File // OpenFile adds O_CLOEXEC @@ -146,7 +160,7 @@ func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err erro } defer sparseFile.Close() - loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) + loopFile, err := openNextAvailableLoopback(sparseName, sparseFile) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go index cbc0299f..2d9e75ea 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -1,5 +1,18 @@ package mount -import "github.com/moby/sys/mountinfo" +import ( + "fmt" + "os" -var PidMountInfo = mountinfo.PidMountInfo + "github.com/moby/sys/mountinfo" +) + +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return mountinfo.GetMountsFromReader(f, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index c4816c13..217e2fe8 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -1,5 +1,5 @@ -//go:build linux || freebsd || darwin -// +build linux freebsd darwin +//go:build !windows +// +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index a8dc1ba0..32e8d7dc 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -526,8 +526,11 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { } else { // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able // to use unshare(), so don't bother creating a new user namespace at this point. - capabilities, err := capability.NewPid(0) + capabilities, err := capability.NewPid2(0) + bailOnError(err, "Initializing a new Capabilities object of pid 0") + err = capabilities.Load() bailOnError(err, "Reading the current capabilities sets") + if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { return } @@ -587,7 +590,12 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { cmd.Hook = func(int) error { go func() { for receivedSignal := range interrupted { - cmd.Cmd.Process.Signal(receivedSignal) + if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil { + logrus.Warnf( + "Failed to send a signal '%d' to the Process (PID: %d): %v", + receivedSignal, cmd.Cmd.Process.Pid, err, + ) + } } }() return nil diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 0f8d1f02..94a9b36d 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -19,6 +19,10 @@ driver = "overlay" # Temporary storage location runroot = "/run/containers/storage" +# Priority list for the storage drivers that will be tested one +# after the other to pick the storage driver if it is not defined. +# driver_priority = ["overlay", "btrfs"] + # Primary Read/Write location of container storage # When changing the graphroot location on an SELINUX system, you must # ensure the labeling matches the default locations labels with the @@ -77,28 +81,6 @@ additionalimagestores = [ # operation so it is not enabled by default. pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""} -# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of -# a container, to the UIDs/GIDs as they should appear outside of the container, -# and the length of the range of UIDs/GIDs. Additional mapped sets can be -# listed and will be heeded by libraries, but there are limits to the number of -# mappings which the kernel will allow when you later attempt to run a -# container. -# -# remap-uids = "0:1668442479:65536" -# remap-gids = "0:1668442479:65536" - -# Remap-User/Group is a user name which can be used to look up one or more UID/GID -# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting -# with an in-container ID of 0 and then a host-level ID taken from the lowest -# range that matches the specified name, and using the length of that range. -# Additional ranges are then assigned, using the ranges which specify the -# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, -# until all of the entries have been used for maps. This setting overrides the -# Remap-UIDs/GIDs setting. -# -# remap-user = "containers" -# remap-group = "containers" - # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned # to containers configured to create automatically a user namespace. Containers diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index 43278a1f..5f421e0c 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -39,27 +39,6 @@ graphroot = "/var/db/containers/storage" additionalimagestores = [ ] -# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of -# a container, to the UIDs/GIDs as they should appear outside of the container, -# and the length of the range of UIDs/GIDs. Additional mapped sets can be -# listed and will be heeded by libraries, but there are limits to the number of -# mappings which the kernel will allow when you later attempt to run a -# container. -# -# remap-uids = 0:1668442479:65536 -# remap-gids = 0:1668442479:65536 - -# Remap-User/Group is a user name which can be used to look up one or more UID/GID -# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting -# with an in-container ID of 0 and then a host-level ID taken from the lowest -# range that matches the specified name, and using the length of that range. -# Additional ranges are then assigned, using the ranges which specify the -# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, -# until all of the entries have been used for maps. -# -# remap-user = "containers" -# remap-group = "containers" - # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned # to containers configured to create automatically a user namespace. Containers diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 957675ba..efcecfae 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -1088,8 +1088,6 @@ func (s *store) createGraphDriverLocked() (drivers.Driver, error) { RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, - UIDMaps: s.uidMap, - GIDMaps: s.gidMap, } return drivers.New(s.graphDriverName, config) } @@ -1437,7 +1435,9 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { return true } -// putLayer requires the rlstore, rlstores, as well as s.containerStore (even if not an argument to this function) to be locked for write. +// On entry: +// - rlstore must be locked for writing +// - rlstores MUST NOT be locked func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { var parentLayer *Layer var options LayerOptions @@ -1474,6 +1474,11 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare return nil, -1, ErrLayerUnknown } parentLayer = ilayer + + if err := s.containerStore.startWriting(); err != nil { + return nil, -1, err + } + defer s.containerStore.stopWriting() containers, err := s.containerStore.Containers() if err != nil { return nil, -1, err @@ -1490,6 +1495,13 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare gidMap = ilayer.GIDMap } } else { + // FIXME? It’s unclear why we are holding containerStore locked here at all + // (and because we are not modifying it, why it is a write lock, not a read lock). + if err := s.containerStore.startWriting(); err != nil { + return nil, -1, err + } + defer s.containerStore.stopWriting() + if !options.HostUIDMapping && len(options.UIDMap) == 0 { uidMap = s.uidMap } @@ -1497,23 +1509,17 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare gidMap = s.gidMap } } - layerOptions := LayerOptions{ - OriginalDigest: options.OriginalDigest, - OriginalSize: options.OriginalSize, - UncompressedDigest: options.UncompressedDigest, - Flags: options.Flags, - } if s.canUseShifting(uidMap, gidMap) { - layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} + options.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} } else { - layerOptions.IDMappingOptions = types.IDMappingOptions{ + options.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: options.HostUIDMapping, HostGIDMapping: options.HostGIDMapping, UIDMap: copyIDMap(uidMap), GIDMap: copyIDMap(gidMap), } } - return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo) + return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo) } func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { @@ -1525,10 +1531,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, err } defer rlstore.stopWriting() - if err := s.containerStore.startWriting(); err != nil { - return nil, -1, err - } - defer s.containerStore.stopWriting() return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil) } @@ -2844,7 +2846,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { exists := store.Exists(id) store.stopReading() if exists { - return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown) + return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly) } } @@ -2928,14 +2930,40 @@ func (s *store) Unmount(id string, force bool) (bool, error) { } func (s *store) Changes(from, to string) ([]archive.Change, error) { - if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) { + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + + rlstore, lstores, err := s.bothLayerStoreKindsLocked() + if err != nil { + return nil, err + } + if err := rlstore.startWriting(); err != nil { + return nil, err + } + if rlstore.Exists(to) { + res, err := rlstore.Changes(from, to) + rlstore.stopWriting() + return res, err + } + rlstore.stopWriting() + + for _, s := range lstores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } if store.Exists(to) { res, err := store.Changes(from, to) - return res, true, err + store.stopReading() + return res, err } - return nil, false, nil - }); done { - return res, err + store.stopReading() } return nil, ErrLayerUnknown } @@ -2966,12 +2994,30 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro } defer s.stopUsingGraphDriver() - layerStores, err := s.allLayerStoresLocked() + rlstore, lstores, err := s.bothLayerStoreKindsLocked() if err != nil { return nil, err } - for _, s := range layerStores { + if err := rlstore.startWriting(); err != nil { + return nil, err + } + if rlstore.Exists(to) { + rc, err := rlstore.Diff(from, to, options) + if rc != nil && err == nil { + wrapped := ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + rlstore.stopWriting() + return err + }) + return wrapped, nil + } + rlstore.stopWriting() + return rc, err + } + rlstore.stopWriting() + + for _, s := range lstores { store := s if err := store.startReading(); err != nil { return nil, err @@ -3009,16 +3055,14 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { return layer, err } if err == nil { + // This code path exists only for cmd/containers/storage.applyDiffUsingStagingDirectory; we have tests that + // assume layer creation and applying a staged layer are separate steps. Production pull code always uses the + // other path, where layer creation is atomic. return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions) } // if the layer doesn't exist yet, try to create it. - if err := s.containerStore.startWriting(); err != nil { - return nil, err - } - defer s.containerStore.stopWriting() - slo := stagedLayerOptions{ DiffOutput: args.DiffOutput, DiffOptions: args.DiffOptions, diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 03e5f7ab..f1a900b8 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -352,7 +352,7 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { } if opts.GraphDriverName == "" { - if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) { + if canUseRootlessOverlay() { opts.GraphDriverName = overlayDriver } else { opts.GraphDriverName = "vfs" @@ -481,33 +481,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.Options.MountOpt != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) } - - uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") - if err != nil { - return err - } - gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") - if err != nil { - return err - } - - if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { - config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser - } - if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { - config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup - } - if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { - mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) - if err != nil { - logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) - return err - } - uidmap = mappings.UIDs() - gidmap = mappings.GIDs() - } - storeOptions.UIDMap = uidmap - storeOptions.GIDMap = gidmap storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser if config.Storage.Options.AutoUsernsMinSize > 0 { storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_bsd.go similarity index 90% rename from vendor/github.com/containers/storage/types/options_freebsd.go rename to vendor/github.com/containers/storage/types/options_bsd.go index be2bc2f2..040fdc79 100644 --- a/vendor/github.com/containers/storage/types/options_freebsd.go +++ b/vendor/github.com/containers/storage/types/options_bsd.go @@ -1,3 +1,5 @@ +//go:build freebsd || netbsd + package types const ( @@ -14,6 +16,6 @@ var ( ) // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay(home, runhome string) bool { +func canUseRootlessOverlay() bool { return false } diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go index 3eecc2b8..27ba6a06 100644 --- a/vendor/github.com/containers/storage/types/options_darwin.go +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -11,6 +11,6 @@ const ( var defaultOverrideConfigFile = "/etc/containers/storage.conf" // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay(home, runhome string) bool { +func canUseRootlessOverlay() bool { return false } diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go index a28e8288..09cbae54 100644 --- a/vendor/github.com/containers/storage/types/options_linux.go +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -22,7 +22,7 @@ var ( ) // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay(home, runhome string) bool { +func canUseRootlessOverlay() bool { // we check first for fuse-overlayfs since it is cheaper. if path, _ := exec.LookPath("fuse-overlayfs"); path != "" { return true diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go index c1bea9fa..99a67ff2 100644 --- a/vendor/github.com/containers/storage/types/options_windows.go +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -14,6 +14,6 @@ var ( ) // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay(home, runhome string) bool { +func canUseRootlessOverlay() bool { return false } diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf index c42d33fb..761b3a79 100644 --- a/vendor/github.com/containers/storage/types/storage_test.conf +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -25,16 +25,6 @@ rootless_storage_path = "$HOME/$UID/containers/storage" additionalimagestores = [ ] -# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of -# a container, to the UIDs/GIDs as they should appear outside of the container, -# and the length of the range of UIDs/GIDs. Additional mapped sets can be -# listed and will be heeded by libraries, but there are limits to the number of -# mappings which the kernel will allow when you later attempt to run a -# container. -# -remap-uids = "0:1000000000:30000" -remap-gids = "0:1500000000:60000" - [storage.options.overlay] # mountopt specifies comma separated list of extra mount options diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index b313a472..73fcd240 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -66,7 +66,10 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio return } - ReloadConfigurationFile(configFile, storeOptions) + if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { + logrus.Warningf("Failed to reload %q %v\n", configFile, err) + return + } prevReloadConfig.storeOptions = storeOptions prevReloadConfig.mod = mtime diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md new file mode 100644 index 00000000..7436896e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -0,0 +1,138 @@ +# Changelog # +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] ## + +## [0.3.1] - 2024-07-23 ## + +### Changed ### +- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll` + to do the necessary "partial lookups", `Open(at)InRoot` now does less work + for both implementations (resulting in a many-fold decrease in the number of + operations for `openat2`, and a modest improvement for non-`openat2`) and is + far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)` + behaviour. +- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this + effectively just means that we no longer risk getting spurious errors during + rename races. However, for our hardened procfs handler, this in theory should + prevent mount attacks from tricking us when doing magic-link readlinks (even + when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still + potentially vulnerable to those kinds of somewhat-esoteric attacks. + + Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath] + but it seems incredibly unlikely anyone is using `filepath-securejoin` on a + pre-2011 kernel. + +### Fixed ### +- Several improvements were made to the errors returned by `Open(at)InRoot` and + `MkdirAll` when dealing with invalid paths under the emulated (ie. + non-`openat2`) implementation. Previously, some paths would return the wrong + error (`ENOENT` when the last component was a non-directory), and other paths + would be returned as though they were acceptable (trailing-slash components + after a non-directory would be ignored by `Open(at)InRoot`). + + These changes were done to match `openat2`'s behaviour and purely is a + consistency fix (most users are going to be using `openat2` anyway). + +[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d + +## [0.3.0] - 2024-07-11 ## + +### Added ### +- A new set of `*os.File`-based APIs have been added. These are adapted from + [libpathrs][] and we strongly suggest using them if possible (as they provide + far more protection against attacks than `SecureJoin`): + + - `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File` + handle to the path. Note that the handle returned is an `O_PATH` handle, + which cannot be used for reading or writing (as well as some other + operations -- [see open(2) for more details][open.2]) + + - `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade + it to a regular handle. This can also be used with non-`O_PATH` handles, + but `O_PATH` is the most obvious application. + + - `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to + create a directory tree within a rootfs. + + As these are new APIs, they may change in the future. However, they should be + safe to start migrating to as we have extensive tests ensuring they behave + correctly and are safe against various races and other attacks. + +[libpathrs]: https://github.com/openSUSE/libpathrs +[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html + +## [0.2.5] - 2024-05-03 ## + +### Changed ### +- Some minor changes were made to how lexical components (like `..` and `.`) + are handled during path generation in `SecureJoin`. There is no behaviour + change as a result of this fix (the resulting paths are the same). + +### Fixed ### +- The error returned when we hit a symlink loop now references the correct + path. (#10) + +## [0.2.4] - 2023-09-06 ## + +### Security ### +- This release fixes a potential security issue in filepath-securejoin when + used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate + paths outside of the provided rootfs in certain cases), as well as improving + the overall behaviour of filepath-securejoin when dealing with Windows paths + that contain volume names. Thanks to Paulo Gomes for discovering and fixing + these issues. + +### Fixed ### +- Switch to GitHub Actions for CI so we can test on Windows as well as Linux + and MacOS. + +[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8 + +## [0.2.3] - 2021-06-04 ## + +### Changed ### +- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency + on `github.com/pkg/errors`. + +## [0.2.2] - 2018-09-05 ## + +### Changed ### +- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own + (internal) error. This allows callers to more easily use `errors.Is` to check + for this case. + +## [0.2.1] - 2018-09-05 ## + +### Fixed ### +- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR` + properly within `SecureJoin`. + +## [0.2.0] - 2017-07-19 ## + +We now have 100% test coverage! + +### Added ### +- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new + tests) or for implementing custom handling of lookup operations (such as for + rootless containers, where work is necessary to access directories with weird + modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`). + +## 0.1.0 - 2017-07-19 + +This is our first release of `github.com/cyphar/filepath-securejoin`, +containing a full implementation with a coverage of 93.5% (the only missing +cases are the error cases, which are hard to mocktest at the moment). + +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...HEAD +[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0 +[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5 +[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4 +[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3 +[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2 +[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE index bec842f2..cb1ab88d 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/LICENSE +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -1,5 +1,5 @@ Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -Copyright (C) 2017 SUSE LLC. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 4eca0f23..253956f8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -2,31 +2,24 @@ [![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) -An implementation of `SecureJoin`, a [candidate for inclusion in the Go -standard library][go#20126]. The purpose of this function is to be a "secure" -alternative to `filepath.Join`, and in particular it provides certain -guarantees that are not provided by `filepath.Join`. - -> **NOTE**: This code is *only* safe if you are not at risk of other processes -> modifying path components after you've used `SecureJoin`. If it is possible -> for a malicious process to modify path components of the resolved path, then -> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There -> are some Linux kernel patches I'm working on which might allow for a better -> solution.][lwn-obeneath] -> -> In addition, with a slightly modified API it might be possible to use -> `O_PATH` and verify that the opened path is actually the resolved one -- but -> I have not done that yet. I might add it in the future as a helper function -> to help users verify the path (we can't just return `/proc/self/fd/` -> because that doesn't always work transparently for all users). - -This is the function prototype: +### Old API ### -```go -func SecureJoin(root, unsafePath string) (string, error) -``` +This library was originally just an implementation of `SecureJoin` which was +[intended to be included in the Go standard library][go#20126] as a safer +`filepath.Join` that would restrict the path lookup to be inside a root +directory. + +The implementation was based on code that existed in several container +runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers +that can modify path components after `SecureJoin` returns and before the +caller uses the path, allowing for some fairly trivial TOCTOU attacks. + +`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to +support legacy users, but new users are strongly suggested to avoid using +`SecureJoin` and instead use the [new api](#new-api) or switch to +[libpathrs][libpathrs]. -This library **guarantees** the following: +With the above limitations in mind, this library guarantees the following: * If no error is set, the resulting string **must** be a child path of `root` and will not contain any symlink path components (they will all be @@ -47,7 +40,7 @@ This library **guarantees** the following: A (trivial) implementation of this function on GNU/Linux systems could be done with the following (note that this requires root privileges and is far more opaque than the implementation in this library, and also requires that -`readlink` is inside the `root` path): +`readlink` is inside the `root` path and is trustworthy): ```go package securejoin @@ -70,9 +63,105 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` -[lwn-obeneath]: https://lwn.net/Articles/767547/ +[libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 +### New API ### + +While we recommend users switch to [libpathrs][libpathrs] as soon as it has a +stable release, some methods implemented by libpathrs have been ported to this +library to ease the transition. These APIs are only supported on Linux. + +These APIs are implemented such that `filepath-securejoin` will +opportunistically use certain newer kernel APIs that make these operations far +more secure. In particular: + +* All of the lookup operations will use [`openat2`][openat2.2] on new enough + kernels (Linux 5.6 or later) to restrict lookups through magic-links and + bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to + efficiently resolve symlinks within a rootfs. + +* The APIs provide hardening against a malicious `/proc` mount to either detect + or avoid being tricked by a `/proc` that is not legitimate. This is done + using [`openat2`][openat2.2] for all users, and privileged users will also be + further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] + (Linux 4.18 or later). + +[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html +[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md +[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md + +#### `OpenInRoot` #### + +```go +func OpenInRoot(root, unsafePath string) (*os.File, error) +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) +func Reopen(handle *os.File, flags int) (*os.File, error) +``` + +`OpenInRoot` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +``` + +that protects against various race attacks that could lead to serious security +issues, depending on the application. Note that the returned `*os.File` is an +`O_PATH` file descriptor, which is quite restricted. Callers will probably need +to use `Reopen` to get a more usable handle (this split is done to provide +useful features like PTY spawning and to avoid users accidentally opening bad +inodes that could cause a DoS). + +Callers need to be careful in how they use the returned `*os.File`. Usually it +is only safe to operate on the handle directly, and it is very easy to create a +security issue. [libpathrs][libpathrs] provides far more helpers to make using +these handles safer -- there is currently no plan to port them to +`filepath-securejoin`. + +`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an +`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or +`MkdirAllHandle`) calls are operating on the same rootfs. + +> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. + +#### `MkdirAll` #### + +```go +func MkdirAll(root, unsafePath string, mode int) error +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error) +``` + +`MkdirAll` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +err = os.MkdirAll(path, mode) +``` + +that protects against the same kinds of races that `OpenInRoot` protects +against. + +`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an +`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an +`*os.File` of the final created directory is returned (this directory is +guaranteed to be effectively identical to the directory created by +`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot` +after `MkdirAll`). + +> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. This means that `MkdirAll` will not +> create non-existent directories referenced by a dangling symlink. + ### License ### The license of this project is the same as Go, which is a BSD 3-clause license diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 3a4036fb..9e11b32f 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.5 +0.3.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index 5ac23b99..bd86a48b 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,5 +1,5 @@ // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -41,6 +41,12 @@ func IsNotExist(err error) bool { // replaced with symlinks on the filesystem) after this function has returned. // Such a symlink race is necessarily out-of-scope of SecureJoin. // +// NOTE: Due to the above limitation, Linux users are strongly encouraged to +// use OpenInRoot instead, which does safely protect against these kinds of +// attacks. There is no way to solve this problem with SecureJoinVFS because +// the API is fundamentally wrong (you cannot return a "safe" path string and +// guarantee it won't be modified afterwards). +// // Volume names in unsafePath are always discarded, regardless if they are // provided via direct input or when evaluating symlinks. Therefore: // diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go new file mode 100644 index 00000000..290befa1 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go @@ -0,0 +1,389 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +type symlinkStackEntry struct { + // (dir, remainingPath) is what we would've returned if the link didn't + // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in + // this case. + dir *os.File + remainingPath string + // linkUnwalked is the remaining path components from the original + // Readlink which we have yet to walk. When this slice is empty, we + // drop the link from the stack. + linkUnwalked []string +} + +func (se symlinkStackEntry) String() string { + return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) +} + +func (se symlinkStackEntry) Close() { + _ = se.dir.Close() +} + +type symlinkStack []*symlinkStackEntry + +func (s *symlinkStack) IsEmpty() bool { + return s == nil || len(*s) == 0 +} + +func (s *symlinkStack) Close() { + if s != nil { + for _, link := range *s { + link.Close() + } + // TODO: Switch to clear once we switch to Go 1.21. + *s = nil + } +} + +var ( + errEmptyStack = errors.New("[internal] stack is empty") + errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") +) + +func (s *symlinkStack) popPart(part string) error { + if s == nil || s.IsEmpty() { + // If there is nothing in the symlink stack, then the part was from the + // real path provided by the user, and this is a no-op. + return errEmptyStack + } + if part == "." { + // "." components are no-ops -- we drop them when doing SwapLink. + return nil + } + + tailEntry := (*s)[len(*s)-1] + + // Double-check that we are popping the component we expect. + if len(tailEntry.linkUnwalked) == 0 { + return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) + } + headPart := tailEntry.linkUnwalked[0] + if headPart != part { + return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) + } + + // Drop the component, but keep the entry around in case we are dealing + // with a "tail-chained" symlink. + tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] + return nil +} + +func (s *symlinkStack) PopPart(part string) error { + if err := s.popPart(part); err != nil { + if errors.Is(err, errEmptyStack) { + // Skip empty stacks. + err = nil + } + return err + } + + // Clean up any of the trailing stack entries that are empty. + for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { + entry := (*s)[lastGood] + if len(entry.linkUnwalked) > 0 { + break + } + entry.Close() + (*s) = (*s)[:lastGood] + } + return nil +} + +func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { + if s == nil { + return nil + } + // Split the link target and clean up any "" parts. + linkTargetParts := slices.DeleteFunc( + strings.Split(linkTarget, "/"), + func(part string) bool { return part == "" || part == "." }) + + // Copy the directory so the caller doesn't close our copy. + dirCopy, err := dupFile(dir) + if err != nil { + return err + } + + // Add to the stack. + *s = append(*s, &symlinkStackEntry{ + dir: dirCopy, + remainingPath: remainingPath, + linkUnwalked: linkTargetParts, + }) + return nil +} + +func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { + // If we are currently inside a symlink resolution, remove the symlink + // component from the last symlink entry, but don't remove the entry even + // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we + // hit during a symlink resolution) we need to keep the old symlink until + // we finish the resolution. + if err := s.popPart(linkPart); err != nil { + if !errors.Is(err, errEmptyStack) { + return err + } + // Push the component regardless of whether the stack was empty. + } + return s.push(dir, remainingPath, linkTarget) +} + +func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { + if s == nil || s.IsEmpty() { + return nil, "", false + } + tailEntry := (*s)[0] + *s = (*s)[1:] + return tailEntry.dir, tailEntry.remainingPath, true +} + +// partialLookupInRoot tries to lookup as much of the request path as possible +// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing +// component of the requested path, returning a file handle to the final +// existing component and a string containing the remaining path components. +func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { + return lookupInRoot(root, unsafePath, true) +} + +func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, remainingPath, err := lookupInRoot(root, unsafePath, false) + if remainingPath != "" && err == nil { + // should never happen + err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) + } + // lookupInRoot(partial=false) will always close the handle if an error is + // returned, so no need to double-check here. + return handle, err +} + +func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // This is very similar to SecureJoin, except that we operate on the + // components using file descriptors. We then return the last component we + // managed open, along with the remaining path components not opened. + + // Try to use openat2 if possible. + if hasOpenat2() { + return lookupOpenat2(root, unsafePath, partial) + } + + // Get the "actual" root path from /proc/self/fd. This is necessary if the + // root is some magic-link like /proc/$pid/root, in which case we want to + // make sure when we do checkProcSelfFdPath that we are using the correct + // root path. + logicalRootPath, err := procSelfFdReadlink(root) + if err != nil { + return nil, "", fmt.Errorf("get real root path: %w", err) + } + + currentDir, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + defer func() { + // If a handle is not returned, close the internal handle. + if Handle == nil { + _ = currentDir.Close() + } + }() + + // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats + // dangling symlinks. If we hit a non-existent path while resolving a + // symlink, we need to return the (dir, remainingPath) that we had when we + // hit the symlink (treating the symlink as though it were a regular file). + // The set of (dir, remainingPath) sets is stored within the symlinkStack + // and we add and remove parts when we hit symlink and non-symlink + // components respectively. We need a stack because of recursive symlinks + // (symlinks that contain symlink components in their target). + // + // Note that the stack is ONLY used for book-keeping. All of the actual + // path walking logic is still based on currentPath/remainingPath and + // currentDir (as in SecureJoin). + var symStack *symlinkStack + if partial { + symStack = new(symlinkStack) + defer symStack.Close() + } + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Save the current remaining path so if the part is not real we can + // return the path including the component. + oldRemainingPath := remainingPath + + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + // If we hit an empty component, we need to treat it as though it is + // "." so that trailing "/" and "//" components on a non-directory + // correctly return the right error code. + if part == "" { + part = "." + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) + } + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch { + case err == nil: + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, "", fmt.Errorf("stat component %q: %w", part, err) + } + + switch st.Mode() & os.ModeType { + case os.ModeSymlink: + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See + // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and + // fstatat() with empty relative pathnames"). + linkDest, err := readlinkatFile(nextDir, "") + // We don't need the handle anymore. + _ = nextDir.Close() + if err != nil { + return nil, "", err + } + + linksWalked++ + if linksWalked > maxSymlinkLimit { + return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} + } + + // Swap out the symlink's component for the link entry itself. + if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { + return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks reset any work we've already done. + if path.IsAbs(linkDest) { + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = "/" + } + + default: + // If we are dealing with a directory, simply walk into it. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + + // The part was real, so drop it from the symlink stack. + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) + } + + // If we are operating on a .., make sure we haven't escaped. + // We only have to check for ".." here because walking down + // into a regular component component cannot cause you to + // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we + // have to check every ".." rather than only checking after a + // rename or mount on the system. + if part == ".." { + // Make sure the root hasn't moved. + if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + return nil, "", fmt.Errorf("root path moved during lookup: %w", err) + } + // Make sure the path is what we expect. + fullPath := logicalRootPath + nextPath + if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) + } + } + } + + default: + if !partial { + return nil, "", err + } + // If there are any remaining components in the symlink stack, we + // are still within a symlink resolution and thus we hit a dangling + // symlink. So pretend that the first symlink in the stack we hit + // was an ENOENT (to match openat2). + if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { + _ = currentDir.Close() + return oldDir, remainingPath, err + } + // We have hit a final component that doesn't exist, so we have our + // partial open result. Note that we have to use the OLD remaining + // path, since the lookup failed. + return currentDir, oldRemainingPath, err + } + } + + // If the unsafePath had a trailing slash, we need to make sure we try to + // do a relative "." open so that we will correctly return an error when + // the final component is a non-directory (to match openat2). In the + // context of openat2, a trailing slash and a trailing "/." are completely + // equivalent. + if strings.HasSuffix(unsafePath, "/") { + nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + if !partial { + _ = currentDir.Close() + currentDir = nil + } + return currentDir, "", err + } + _ = currentDir.Close() + currentDir = nextDir + } + + // All of the components existed! + return currentDir, "", nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go new file mode 100644 index 00000000..ad2bd797 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -0,0 +1,229 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +var ( + errInvalidMode = errors.New("invalid permission mode") + errPossibleAttack = errors.New("possible attack detected") +) + +// MkdirAllHandle is equivalent to MkdirAll, except that it is safer to use in +// two respects: +// +// - The caller provides the root directory as an *os.File (preferably O_PATH) +// handle. This means that the caller can be sure which root directory is +// being used. Note that this can be emulated by using /proc/self/fd/... as +// the root path with MkdirAll. +// +// - Once all of the directories have been created, an *os.File (O_PATH) handle +// to the directory at unsafePath is returned to the caller. This is done in +// an effectively-race-free way (an attacker would only be able to swap the +// final directory component), which is not possible to emulate with +// MkdirAll. +// +// In addition, the returned handle is obtained far more efficiently than doing +// a brand new lookup of unsafePath (such as with SecureJoin or openat2) after +// doing MkdirAll. If you intend to open the directory after creating it, you +// should use MkdirAllHandle. +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { + // Make sure there are no os.FileMode bits set. + if mode&^0o7777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) + } + + // Try to open as much of the path as possible. + currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) + defer func() { + if Err != nil { + _ = currentDir.Close() + } + }() + if err != nil && !errors.Is(err, unix.ENOENT) { + return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) + } + + // If there is an attacker deleting directories as we walk into them, + // detect this proactively. Note this is guaranteed to detect if the + // attacker deleted any part of the tree up to currentDir. + // + // Once we walk into a dead directory, partialLookupInRoot would not be + // able to walk further down the tree (directories must be empty before + // they are deleted), and if the attacker has removed the entire tree we + // can be sure that anything that was originally inside a dead directory + // must also be deleted and thus is a dead directory in its own right. + // + // This is mostly a quality-of-life check, because mkdir will simply fail + // later if the attacker deletes the tree after this check. + if err := isDeadInode(currentDir); err != nil { + return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) + } + + // Re-open the path to match the O_DIRECTORY reopen loop later (so that we + // always return a non-O_PATH handle). We also check that we actually got a + // directory. + if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { + return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) + } else if err != nil { + return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) + } else { + _ = currentDir.Close() + currentDir = reopenDir + } + + remainingParts := strings.Split(remainingPath, string(filepath.Separator)) + if slices.Contains(remainingParts, "..") { + // The path contained ".." components after the end of the "real" + // components. We could try to safely resolve ".." here but that would + // add a bunch of extra logic for something that it's not clear even + // needs to be supported. So just return an error. + // + // If we do filepath.Clean(remainingPath) then we end up with the + // problem that ".." can erase a trailing dangling symlink and produce + // a path that doesn't quite match what the user asked for. + return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) + } + + // Make sure the mode doesn't have any type bits. + mode &^= unix.S_IFMT + // What properties do we expect any newly created directories to have? + var ( + // While umask(2) is a per-thread property, and thus this value could + // vary between threads, a functioning Go program would LockOSThread + // threads with different umasks and so we don't need to LockOSThread + // for this entire mkdirat loop (if we are in the locked thread with a + // different umask, we are already locked and there's nothing for us to + // do -- and if not then it doesn't matter which thread we run on and + // there's nothing for us to do). + expectedMode = uint32(unix.S_IFDIR | (mode &^ getUmask())) + + // We would want to get the fs[ug]id here, but we can't access those + // from userspace. In practice, nobody uses setfs[ug]id() anymore, so + // just use the effective [ug]id (which is equivalent to the fs[ug]id + // for programs that don't use setfs[ug]id). + expectedUid = uint32(unix.Geteuid()) + expectedGid = uint32(unix.Getegid()) + ) + + // Create the remaining components. + for _, part := range remainingParts { + switch part { + case "", ".": + // Skip over no-op paths. + continue + } + + // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely + // create the finaly component without worrying about symlink-exchange + // attacks. + if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil { + err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} + // Make the error a bit nicer if the directory is dead. + if err2 := isDeadInode(currentDir); err2 != nil { + err = fmt.Errorf("%w (%w)", err, err2) + } + return nil, err + } + + // Get a handle to the next component. O_DIRECTORY means we don't need + // to use O_PATH. + var nextDir *os.File + if hasOpenat2() { + nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, + }) + } else { + nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + } + if err != nil { + return nil, err + } + _ = currentDir.Close() + currentDir = nextDir + + // Make sure that the directory matches what we expect. An attacker + // could have swapped the directory between us making it and opening + // it. There's no way for us to be sure that the directory is + // _precisely_ the same as the directory we created, but if we are in + // an empty directory with the same owner and mode as the one we + // created then there is nothing the attacker could do with this new + // directory that they couldn't do with the old one. + if stat, err := fstat(currentDir); err != nil { + return nil, fmt.Errorf("check newly created directory: %w", err) + } else { + if stat.Mode != expectedMode { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect mode 0o%.3o (expected 0o%.3o)", errPossibleAttack, currentDir.Name(), stat.Mode, expectedMode) + } + if stat.Uid != expectedUid || stat.Gid != expectedGid { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect owner %d:%d (expected %d:%d)", errPossibleAttack, currentDir.Name(), stat.Uid, stat.Gid, expectedUid, expectedGid) + } + // Check that the directory is empty. We only need to check for + // a single entry, and we should get EOF if the directory is + // empty. + _, err := currentDir.Readdirnames(1) + if !errors.Is(err, io.EOF) { + if err == nil { + err = fmt.Errorf("%w: newly created directory %q is non-empty", errPossibleAttack, currentDir.Name()) + } + return nil, fmt.Errorf("check if newly created directory %q is empty: %w", currentDir.Name(), err) + } + // Reset the offset. + _, _ = currentDir.Seek(0, unix.SEEK_SET) + } + } + return currentDir, nil +} + +// MkdirAll is a race-safe alternative to the Go stdlib's os.MkdirAll function, +// where the new directory is guaranteed to be within the root directory (if an +// attacker can move directories from inside the root to outside the root, the +// created directory tree might be outside of the root but the key constraint +// is that at no point will we walk outside of the directory tree we are +// creating). +// +// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// err := os.MkdirAll(path, mode) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and MkdirAll, it is +// possible for MkdirAll to resolve unsafe symlink components and create +// directories outside of the root. +// +// If you plan to open the directory after you have created it or want to use +// an open directory handle as the root, you should use MkdirAllHandle instead. +// This function is a wrapper around MkdirAllHandle. +// +// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not +// the Go generic mode bits (os.Mode...). +func MkdirAll(root, unsafePath string, mode int) error { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return err + } + defer rootDir.Close() + + f, err := MkdirAllHandle(rootDir, unsafePath, mode) + if err != nil { + return err + } + _ = f.Close() + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go new file mode 100644 index 00000000..52dce76f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go @@ -0,0 +1,101 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// OpenatInRoot is equivalent to OpenInRoot, except that the root is provided +// using an *os.File handle, to ensure that the correct root directory is used. +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, err := completeLookupInRoot(root, unsafePath) + if err != nil { + return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} + } + return handle, nil +} + +// OpenInRoot safely opens the provided unsafePath within the root. +// Effectively, OpenInRoot(root, unsafePath) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and OpenFile, it is +// possible for the returned file to be outside of the root. +// +// Note that the returned handle is an O_PATH handle, meaning that only a very +// limited set of operations will work on the handle. This is done to avoid +// accidentally opening an untrusted file that could cause issues (such as a +// disconnected TTY that could cause a DoS, or some other issue). In order to +// use the returned handle, you can "upgrade" it to a proper handle using +// Reopen. +func OpenInRoot(root, unsafePath string) (*os.File, error) { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer rootDir.Close() + return OpenatInRoot(rootDir, unsafePath) +} + +// Reopen takes an *os.File handle and re-opens it through /proc/self/fd. +// Reopen(file, flags) is effectively equivalent to +// +// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) +// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) +// +// But with some extra hardenings to ensure that we are not tricked by a +// maliciously-configured /proc mount. While this attack scenario is not +// common, in container runtimes it is possible for higher-level runtimes to be +// tricked into configuring an unsafe /proc that can be used to attack file +// operations. See CVE-2019-19921 for more details. +func Reopen(handle *os.File, flags int) (*os.File, error) { + procRoot, err := getProcRoot() + if err != nil { + return nil, err + } + + // We can't operate on /proc/thread-self/fd/$n directly when doing a + // re-open, so we need to open /proc/thread-self/fd and then open a single + // final component. + procFdDir, closer, err := procThreadSelf(procRoot, "fd/") + if err != nil { + return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) + } + defer procFdDir.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link we are about + // to open. If we are using unsafeHostProcRoot(), this could change after + // we check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + fdStr := strconv.Itoa(int(handle.Fd())) + if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { + return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) + } + + flags |= unix.O_CLOEXEC + // Rather than just wrapping openatFile, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go new file mode 100644 index 00000000..921b3e1d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -0,0 +1,141 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "golang.org/x/sys/unix" +) + +var ( + hasOpenat2Bool bool + hasOpenat2Once sync.Once + + testingForceHasOpenat2 *bool +) + +func hasOpenat2() bool { + if testing.Testing() && testingForceHasOpenat2 != nil { + return *testingForceHasOpenat2 + } + hasOpenat2Once.Do(func() { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err == nil { + hasOpenat2Bool = true + _ = unix.Close(fd) + } + }) + return hasOpenat2Bool +} + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +const scopedLookupMaxRetries = 10 + +func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { + fullPath := dir.Name() + "/" + path + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for tries < scopedLookupMaxRetries { + fd, err := unix.Openat2(int(dir.Fd()), path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise + // you'll get infinite recursion here. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { + fullPath = actualPath + } + } + return os.NewFile(uintptr(fd), fullPath), nil + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} +} + +func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { + if !partial { + file, err := openat2File(root, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + return file, "", err + } + return partialLookupOpenat2(root, unsafePath) +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + var lastError error + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2File(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx += 1 + } + // We found a subpath! + return handle, unsafePath[endIdx:], lastError + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + lastError = err + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, lastError +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go new file mode 100644 index 00000000..949fb5f2 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go @@ -0,0 +1,59 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +func dupFile(f *os.File) (*os.File, error) { + fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + +func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} + } + // All of the paths we use with openatFile(2) are guaranteed to be + // lexically safe, so we can use path.Join here. + fullPath := filepath.Join(dir.Name(), path) + return os.NewFile(uintptr(fd), fullPath), nil +} + +func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} + } + return stat, nil +} + +func readlinkatFile(dir *os.File, path string) (string, error) { + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} + } + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go new file mode 100644 index 00000000..adf0bd08 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -0,0 +1,474 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" + + "golang.org/x/sys/unix" +) + +func fstat(f *os.File) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(f.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} + } + return stat, nil +} + +func fstatfs(f *os.File) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} + } + return statfs, nil +} + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +func verifyProcRoot(procRoot *os.File) error { + if statfs, err := fstatfs(procRoot); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + if stat, err := fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +var ( + hasNewMountApiBool bool + hasNewMountApiOnce sync.Once +) + +func hasNewMountApi() bool { + hasNewMountApiOnce.Do(func() { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err == nil { + hasNewMountApiBool = true + _ = unix.Close(fd) + } + }) + return hasNewMountApiBool +} + +func fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +func newPrivateProcMount() (*os.File, error) { + procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() + + // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) +} + +func openTree(dir *os.File, path string, flags uint) (*os.File, error) { + dirFd := -int(unix.EBADF) + dirName := "." + if dir != nil { + dirFd = int(dir.Fd()) + dirName = dir.Name() + } + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} + } + return os.NewFile(uintptr(fd), dirName+"/"+path), nil +} + +func clonePrivateProcMount() (_ *os.File, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || testingForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { + procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procfsHandle.Close() + } + }() + if err := verifyProcRoot(procfsHandle); err != nil { + return nil, err + } + return procfsHandle, nil +} + +func privateProcRoot() (*os.File, error) { + if !hasNewMountApi() || testingForceGetProcRootUnsafe() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount() + if err != nil || testingForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +var ( + procRootHandle *os.File + procRootError error + procRootOnce sync.Once + + errUnsafeProcfs = errors.New("unsafe procfs detected") +) + +func unsafeHostProcRoot() (_ *os.File, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + return procRoot, nil +} + +func doGetProcRoot() (*os.File, error) { + procRoot, err := privateProcRoot() + if err != nil { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + procRoot, err = unsafeHostProcRoot() + } + return procRoot, err +} + +func getProcRoot() (*os.File, error) { + procRootOnce.Do(func() { + procRootHandle, procRootError = doGetProcRoot() + }) + return procRootHandle, procRootError +} + +var ( + haveProcThreadSelf bool + haveProcThreadSelfOnce sync.Once +) + +type procThreadSelfCloser func() + +// procThreadSelf returns a handle to /proc/thread-self/ (or an +// equivalent handle on older kernels where /proc/thread-self doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// This is similar to ProcThreadSelf from runc, but with extra hardening +// applied and using *os.File. +func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { + haveProcThreadSelfOnce.Do(func() { + // If the kernel doesn't support thread-self, it doesn't matter which + // /proc handle we use. + _, err := fstatatFile(procRoot, "thread-self", unix.AT_SYMLINK_NOFOLLOW) + haveProcThreadSelf = (err == nil) + }) + + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get interrupted + // by the Go runtime and hit the case where the underlying thread is + // swapped out and the original thread is killed, resulting in + // pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + } + }() + + // Figure out what prefix we want to use. + threadSelf := "thread-self/" + if !haveProcThreadSelf || testingForceProcSelfTask() { + /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" + if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || testingForceProcSelf() { + // In this case, we running in a pid namespace that doesn't match + // the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID to + // use here because of the age of the kernel, so we have to just + // use /proc/self and hope that it works. + threadSelf = "self/" + } + } + + // Grab the handle. + var ( + handle *os.File + err error + ) + if hasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // stricly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses + // procSelfFdReadlink to clean up the returned f.Name() if we use + // RESOLVE_IN_ROOT (which would lead to an infinite recursion). + handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + } else { + handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + defer func() { + if Err != nil { + _ = handle.Close() + } + }() + // We can't detect bind-mounts of different parts of procfs on top of + // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we + // aren't on the wrong filesystem here. + if statfs, err := fstatfs(handle); err != nil { + return nil, nil, err + } else if statfs.Type != procSuperMagic { + return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + } + return handle, runtime.UnlockOSThread, nil +} + +var ( + hasStatxMountIdBool bool + hasStatxMountIdOnce sync.Once +) + +func hasStatxMountId() bool { + hasStatxMountIdOnce.Do(func() { + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) + hasStatxMountIdBool = (err == nil && (stx.Mask&wantStxMask != 0)) + }) + return hasStatxMountIdBool +} + +func getMountId(dir *os.File, path string) (uint64, error) { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountId() { + return 0, nil + } + + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + + err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) + if stx.Mask&wantStxMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) + } + if err != nil { + return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} + } + return stx.Mnt_id, nil +} + +func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { + // Get the mntId of our procfs handle. + expectedMountId, err := getMountId(procRoot, "") + if err != nil { + return err + } + // Get the mntId of the target magic-link. + gotMountId, err := getMountId(dir, path) + if err != nil { + return err + } + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountId != gotMountId { + return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) + } + return nil +} + +func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { + fdPath := fmt.Sprintf("fd/%d", fd) + procFdLink, closer, err := procThreadSelf(procRoot, fdPath) + if err != nil { + return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) + } + defer procFdLink.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly + // provide to the closure. If the closure uses the handle directly, this + // should be safe in general (a mount on top of the path afterwards would + // not affect the handle itself) and will definitely be safe if we are + // using privateProcRoot() (at least since Linux 5.12[1], when anonymous + // mount namespaces were completely isolated from external mounts including + // mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { + return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) + } + + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit + // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty + // relative pathnames"). + return readlinkatFile(procFdLink, "") +} + +func rawProcSelfFdReadlink(fd int) (string, error) { + procRoot, err := getProcRoot() + if err != nil { + return "", err + } + return doRawProcSelfFdReadlink(procRoot, fd) +} + +func procSelfFdReadlink(f *os.File) (string, error) { + return rawProcSelfFdReadlink(int(f.Fd())) +} + +var ( + errPossibleBreakout = errors.New("possible breakout detected") + errInvalidDirectory = errors.New("wandered into deleted directory") + errDeletedInode = errors.New("cannot verify path of deleted inode") +) + +func isDeadInode(file *os.File) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := errDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = errInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} + +func getUmask() int { + // umask is a per-thread property, but it is inherited by children, so we + // need to lock our OS thread to make sure that no other goroutine runs in + // this thread and no goroutines are spawned from this thread until we + // revert to the old umask. + // + // We could parse /proc/self/status to avoid this get-set problem, but + // /proc/thread-self requires LockOSThread anyway, so there's no real + // benefit over just using umask(2). + runtime.LockOSThread() + umask := unix.Umask(0) + unix.Umask(umask) + runtime.UnlockOSThread() + return umask +} + +func checkProcSelfFdPath(path string, file *os.File) error { + if err := isDeadInode(file); err != nil { + return err + } + actualPath, err := procSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) + } + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go new file mode 100644 index 00000000..a3aedf03 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go @@ -0,0 +1,68 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "testing" +) + +type forceGetProcRootLevel int + +const ( + forceGetProcRootDefault forceGetProcRootLevel = iota + forceGetProcRootOpenTree // force open_tree() + forceGetProcRootOpenTreeAtRecursive // force open_tree(AT_RECURSIVE) + forceGetProcRootUnsafe // force open() +) + +var testingForceGetProcRoot *forceGetProcRootLevel + +func testingCheckClose(check bool, f *os.File) bool { + if check { + if f != nil { + _ = f.Close() + } + return true + } + return false +} + +func testingForcePrivateProcRootOpenTree(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTree, f) +} + +func testingForcePrivateProcRootOpenTreeAtRecursive(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTreeAtRecursive, f) +} + +func testingForceGetProcRootUnsafe() bool { + return testing.Testing() && testingForceGetProcRoot != nil && + *testingForceGetProcRoot >= forceGetProcRootUnsafe +} + +type forceProcThreadSelfLevel int + +const ( + forceProcThreadSelfDefault forceProcThreadSelfLevel = iota + forceProcSelfTask + forceProcSelf +) + +var testingForceProcThreadSelf *forceProcThreadSelfLevel + +func testingForceProcSelfTask() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelfTask +} + +func testingForceProcSelf() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelf +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index a82a5eae..6e27c7dd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,4 +1,4 @@ -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 36315d42..5f93eeb4 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -10,6 +10,7 @@ Aaron Huslage Aaron L. Xu Aaron Lehmann Aaron Welch +Aaron Yoshitake Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -62,6 +63,7 @@ alambike Alan Hoyle Alan Scherger Alan Thompson +Alano Terblanche Albert Callarisa Albert Zhang Albin Kerouanton @@ -141,6 +143,7 @@ Andreas Tiefenthaler Andrei Gherzan Andrei Ushakov Andrei Vagin +Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> Andrew C. Bodine Andrew Clay Shafer Andrew Duckworth @@ -193,6 +196,7 @@ Anton Löfgren Anton Nikitin Anton Polonskiy Anton Tiurin +Antonio Aguilar Antonio Murdaca Antonis Kalipetis Antony Messerli @@ -221,7 +225,6 @@ Avi Das Avi Kivity Avi Miller Avi Vaid -ayoshitake Azat Khuyiyakhmetov Bao Yonglei Bardia Keyoumarsi @@ -316,6 +319,7 @@ Burke Libbey Byung Kang Caleb Spare Calen Pennington +Calvin Liu Cameron Boehmer Cameron Sparr Cameron Spear @@ -362,6 +366,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chentianze Chenyang Yan chenyuzhu Chetan Birajdar @@ -409,6 +414,7 @@ Christopher Crone Christopher Currie Christopher Jones Christopher Latham +Christopher Petito Christopher Rigor Christy Norman Chun Chen @@ -777,6 +783,7 @@ Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda +Gabriel Tomitsuka Gaetan de Villele Galen Sampson Gang Qiao @@ -792,6 +799,7 @@ Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze +George Ma George MacRorie George Xie Georgi Hristozov @@ -913,6 +921,7 @@ Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov +imalasong <2879499479@qq.com> imre Fitos inglesp Ingo Gottwald @@ -930,6 +939,7 @@ J Bruni J. Nunn Jack Danger Canty Jack Laxson +Jack Walker <90711509+j2walker@users.noreply.github.com> Jacob Atzen Jacob Edelman Jacob Tomlinson @@ -989,6 +999,7 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Jasper Siepkes Javier Bassi jaxgeller Jay @@ -1100,6 +1111,7 @@ Jon Johnson Jon Surrell Jon Wedaman Jonas Dohse +Jonas Geiler Jonas Heinrich Jonas Pfenniger Jonathan A. Schweder @@ -1267,6 +1279,7 @@ Lakshan Perera Lalatendu Mohanty Lance Chen Lance Kinley +Lars Andringa Lars Butler Lars Kellogg-Stedman Lars R. Damerow @@ -1673,6 +1686,7 @@ Patrick Böänziger Patrick Devine Patrick Haas Patrick Hemmer +Patrick St. laurent Patrick Stapleton Patrik Cyvoct pattichen @@ -1878,6 +1892,7 @@ Royce Remer Rozhnov Alexandr Rudolph Gottesheim Rui Cao +Rui JingAn Rui Lopes Ruilin Li Runshen Zhu @@ -2184,6 +2199,7 @@ Tomek MaÅ„ko Tommaso Visconti Tomoya Tabuchi Tomáš HrÄka +Tomáš Virtus tonic Tonny Xu Tony Abboud @@ -2228,6 +2244,7 @@ Victor I. Wood Victor Lyuboslavsky Victor Marmol Victor Palma +Victor Toni Victor Vieux Victoria Bialas Vijaya Kumar K @@ -2279,6 +2296,7 @@ Wassim Dhif Wataru Ishida Wayne Chang Wayne Song +weebney Weerasak Chongnguluam Wei Fu Wei Wu diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index b11c2fe0..f831735f 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.45" + DefaultVersion = "1.46" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 5677340d..78f0ce1f 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.45" +basePath: "/v1.46" info: title: "Docker Engine API" - version: "1.45" + version: "1.46" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.45) is used. - For example, calling `/info` is the same as calling `/v1.45/info`. Using the + If you omit the version-prefix, the current version of the API (v1.46) is used. + For example, calling `/info` is the same as calling `/v1.46/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -442,6 +442,21 @@ definitions: Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] RestartPolicy: description: | @@ -1198,13 +1213,6 @@ definitions: ContainerConfig: description: | Configuration for a container that is portable between hosts. - - When used as `ContainerConfig` field in an image, `ContainerConfig` is an - optional field containing the configuration of the container that was last - committed when creating the image. - - Previous versions of Docker builder used this field to store build cache, - and it is not in active use anymore. type: "object" properties: Hostname: @@ -1363,6 +1371,289 @@ definitions: type: "string" example: ["/bin/sh", "-c"] + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + example: "" + Domainname: + description: | + The domain name to use for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + example: "" + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + AttachStdin: + description: | + Whether to attach to `stdin`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + AttachStdout: + description: | + Whether to attach to `stdout`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + AttachStderr: + description: | + Whether to attach to `stderr`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + OpenStdin: + description: | + Open `stdin` + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + StdinOnce: + description: | + Close `stdin` after one attached client disconnects. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + default: "" + example: "" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: | + Disable networking for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + x-nullable: true + MacAddress: + description: | + MAC address of the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "string" + default: "" + example: "" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: | + Timeout to stop a container in seconds. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "Hostname": "" + "Domainname": "" + "User": "web:web" + "AttachStdin": false + "AttachStdout": false + "AttachStderr": false + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Tty": false + "OpenStdin": false + "StdinOnce": false + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Image": "" + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for @@ -1758,21 +2049,6 @@ definitions: format: "dateTime" x-nullable: true example: "2022-02-04T21:20:12.497794809Z" - Container: - description: | - The ID of the container that was used to create the image. - - Depending on how the image was created, this field may be empty. - - **Deprecated**: this field is kept for backward compatibility, but - will be removed in API v1.45. - type: "string" - example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" - ContainerConfig: - description: | - **Deprecated**: this field is kept for backward compatibility, but - will be removed in API v1.45. - $ref: "#/definitions/ContainerConfig" DockerVersion: description: | The version of Docker that was used to build the image. @@ -1780,7 +2056,7 @@ definitions: Depending on how the image was created, this field may be empty. type: "string" x-nullable: false - example: "20.10.7" + example: "27.0.1" Author: description: | Name of the author that was specified when committing the image, or as @@ -1789,7 +2065,7 @@ definitions: x-nullable: false example: "" Config: - $ref: "#/definitions/ContainerConfig" + $ref: "#/definitions/ImageConfig" Architecture: description: | Hardware CPU architecture that the image runs on. @@ -1866,6 +2142,7 @@ definitions: format: "dateTime" example: "2022-02-28T14:40:02.623929178Z" x-nullable: true + ImageSummary: type: "object" x-go-name: "Summary" @@ -2179,72 +2456,129 @@ definitions: type: "object" properties: Name: + description: | + Name of the network. type: "string" + example: "my_network" Id: + description: | + ID that uniquely identifies a network on a single machine. type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) type: "string" + example: "local" Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). type: "string" + example: "overlay" EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. type: "boolean" + example: false IPAM: $ref: "#/definitions/IPAM" Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. type: "boolean" + default: false + example: false Attachable: + description: | + Wheter a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. type: "boolean" + default: false + example: false Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. type: "boolean" + default: false Containers: + description: | + Contains endpoints attached to the network. type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" Options: + description: | + Network-specific options uses when creating the network. type: "object" additionalProperties: type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" Labels: + description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + IPAM: type: "object" properties: @@ -2252,6 +2586,7 @@ definitions: description: "Name of the IPAM driver to use." type: "string" default: "default" + example: "default" Config: description: | List of IPAM configuration options, specified as a map: @@ -2267,16 +2602,21 @@ definitions: type: "object" additionalProperties: type: "string" + example: + foo: "bar" IPAMConfig: type: "object" properties: Subnet: type: "string" + example: "172.20.0.0/16" IPRange: type: "string" + example: "172.20.10.0/24" Gateway: type: "string" + example: "172.20.10.11" AuxiliaryAddresses: type: "object" additionalProperties: @@ -2287,14 +2627,53 @@ definitions: properties: Name: type: "string" + example: "container_1" EndpointID: type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: type: "string" + example: "02:42:ac:13:00:02" IPv4Address: type: "string" + example: "172.19.0.2/16" IPv6Address: type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" BuildInfo: type: "object" @@ -2495,6 +2874,17 @@ definitions: example: - "server_x" - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" # Operational data NetworkID: @@ -2538,17 +2928,6 @@ definitions: type: "integer" format: "int64" example: 64 - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" DNSNames: description: | List of all DNS names an endpoint has on a specific network. This @@ -3720,6 +4099,13 @@ definitions: but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 Configs: description: | Configs contains references to zero or more configs that will be @@ -3916,7 +4302,7 @@ definitions: `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational @@ -4639,6 +5025,12 @@ definitions: properties: NetworkMode: type: "string" + Annotations: + description: "Arbitrary key-value metadata attached to container" + type: "object" + x-nullable: true + additionalProperties: + type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" @@ -4907,7 +5299,7 @@ definitions: Version of the component type: "string" x-nullable: false - example: "19.03.12" + example: "27.0.1" Details: description: | Key/value pairs of strings with additional information about the @@ -4921,17 +5313,17 @@ definitions: Version: description: "The version of the daemon" type: "string" - example: "19.03.12" + example: "27.0.1" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" - example: "1.40" + example: "1.46" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" - example: "1.12" + example: "1.24" GitCommit: description: | The Git commit of the source code that was used to build the daemon @@ -4942,7 +5334,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.13.14" + example: "go1.21.12" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -4959,7 +5351,7 @@ definitions: This field is omitted when empty. type: "string" - example: "4.19.76-linuxkit" + example: "6.8.0-31-generic" Experimental: description: | Indicates if the daemon is started with experimental features enabled. @@ -5165,13 +5557,13 @@ definitions: information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" - example: "4.9.38-moby" + example: "6.8.0-31-generic" OperatingSystem: description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" or "Windows Server 2016 Datacenter" type: "string" - example: "Alpine Linux v3.5" + example: "Ubuntu 24.04 LTS" OSVersion: description: | Version of the host's operating system @@ -5182,7 +5574,7 @@ definitions: > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" - example: "16.04" + example: "24.04" OSType: description: | Generic type of the operating system of the host, as returned by the @@ -5284,7 +5676,7 @@ definitions: description: | Version string of the daemon. type: "string" - example: "24.0.2" + example: "27.0.1" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) @@ -5436,6 +5828,58 @@ definitions: example: - "/etc/cdi" - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + x-nullable: true + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct @@ -6288,6 +6732,8 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.docker.type: "container" NetworkSettings: Networks: bridge: @@ -6323,6 +6769,9 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: Networks: bridge: @@ -6351,6 +6800,9 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + io.kubernetes.image.name: "ubuntu:latest" NetworkSettings: Networks: bridge: @@ -6379,6 +6831,8 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.config.source: "api" NetworkSettings: Networks: bridge: @@ -8656,6 +9110,11 @@ paths: details. type: "string" required: true + - name: "platform" + in: "query" + description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" + type: "string" + x-nullable: true tags: ["Image"] /images/{name}/tag: post: @@ -9104,7 +9563,7 @@ paths: Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` @@ -10060,19 +10519,9 @@ paths: - "application/json" responses: 201: - description: "No error" + description: "Network created successfully" schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" + $ref: "#/definitions/NetworkCreateResponse" 400: description: "bad parameter" schema: @@ -10104,14 +10553,17 @@ paths: Name: description: "The network's name." type: "string" - CheckDuplicate: - description: | - Deprecated: CheckDuplicate is now always enabled. - type: "boolean" + example: "my_network" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" Internal: description: "Restrict external access to the network." type: "boolean" @@ -10120,55 +10572,55 @@ paths: Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" + example: true Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" + example: true Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: @@ -11274,6 +11726,7 @@ paths: Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" + OomScoreAdj: 0 LogDriver: Name: "json-file" Options: @@ -11426,6 +11879,7 @@ paths: Image: "busybox" Args: - "top" + OomScoreAdj: 0 Resources: Limits: {} Reservations: {} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 882201f0..df791f02 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -2,43 +2,15 @@ package types // import "github.com/docker/docker/api/types" import ( "bufio" + "context" "io" "net" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - units "github.com/docker/go-units" ) -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - // NewHijackedResponse intializes a HijackedResponse type func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} @@ -101,7 +73,7 @@ type ImageBuildOptions struct { NetworkMode string ShmSize int64 Dockerfile string - Ulimits []*units.Ulimit + Ulimits []*container.Ulimit // BuildArgs needs to be a *string instead of just a string so that // we can tell the difference between "" (empty string) and no value // at all (nil). See the parsing of buildArgs in @@ -122,7 +94,7 @@ type ImageBuildOptions struct { Target string SessionID string Platform string - // Version specifies the version of the unerlying builder to use + // Version specifies the version of the underlying builder to use Version BuilderVersion // BuildID is an optional identifier that can be passed together with the // build request. The same identifier can be used to gracefully cancel the @@ -157,34 +129,13 @@ type ImageBuildResponse struct { OSType string } -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. // This function returns the registry authentication // header value in base 64 format, or an error // if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} +type RequestPrivilegeFunc func(context.Context) (string, error) // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { @@ -289,7 +240,7 @@ type PluginInstallOptions struct { RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry RemoteRef string // RemoteRef is the plugin name on the registry PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) Args []string } diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 945b6efa..00000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,18 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 86f46b74..d6b03e8b 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,7 +1,6 @@ package container // import "github.com/docker/docker/api/types/container" import ( - "io" "time" "github.com/docker/docker/api/types/strslice" @@ -36,14 +35,6 @@ type StopOptions struct { // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig = dockerspec.HealthcheckConfig -// ExecStartOptions holds the options to start container's exec. -type ExecStartOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - ConsoleSize *[2]uint `json:",omitempty"` -} - // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go new file mode 100644 index 00000000..711af12c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container.go @@ -0,0 +1,44 @@ +package container + +import ( + "io" + "os" + "time" +) + +// PruneReport contains the response for Engine API: +// POST "/containers/prune" +type PruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// PathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type PathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats +// for a container, as produced by the GET "/stats" endpoint. +// +// The OSType field is set to the server's platform to allow +// platform-specific handling of the response. +// +// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse]. +type StatsResponseReader struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/create_request.go b/vendor/github.com/docker/docker/api/types/container/create_request.go new file mode 100644 index 00000000..e98dd6ad --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_request.go @@ -0,0 +1,13 @@ +package container + +import "github.com/docker/docker/api/types/network" + +// CreateRequest is the request message sent to the server for container +// create calls. It is a config wrapper that holds the container [Config] +// (portable) and the corresponding [HostConfig] (non-portable) and +// [network.NetworkingConfig]. +type CreateRequest struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go new file mode 100644 index 00000000..96093eb5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/exec.go @@ -0,0 +1,43 @@ +package container + +// ExecOptions is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecOptions struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// ExecStartOptions is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartOptions struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} + +// ExecAttachOptions is a temp struct used by execAttach. +// +// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached. +type ExecAttachOptions = ExecStartOptions + +// ExecInspect holds information returned by exec inspect. +type ExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index efb96266..727da883 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -360,6 +360,12 @@ type LogConfig struct { Config map[string]string } +// Ulimit is an alias for [units.Ulimit], which may be moving to a different +// location or become a local type. This alias is to help transitioning. +// +// Users are recommended to use this alias instead of using [units.Ulimit] directly. +type Ulimit = units.Ulimit + // Resources contains container's resources (cgroups config, ulimits...) type Resources struct { // Applicable to all platforms @@ -387,14 +393,14 @@ type Resources struct { // KernelMemory specifies the kernel memory limit (in bytes) for the container. // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 42132923..cdee49ea 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -9,24 +9,6 @@ func (i Isolation) IsValid() bool { return i.IsDefault() } -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return network.NetworkBridge - } else if n.IsHost() { - return network.NetworkHost - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return network.NetworkNone - } else if n.IsDefault() { - return network.NetworkDefault - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { return n == network.NetworkBridge @@ -41,3 +23,23 @@ func (n NetworkMode) IsHost() bool { func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() } + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkBridge + case n.IsHost(): + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 154667f4..f0854554 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -2,6 +2,11 @@ package container // import "github.com/docker/docker/api/types/container" import "github.com/docker/docker/api/types/network" +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { @@ -19,24 +24,24 @@ func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() } -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { - if n.IsDefault() { + switch { + case n.IsDefault(): return network.NetworkDefault - } else if n.IsBridge() { + case n.IsBridge(): return network.NetworkNat - } else if n.IsNone() { + case n.IsHost(): + // Windows currently doesn't support host network-mode, so + // this would currently never happen.. + return network.NetworkHost + case n.IsNone(): return network.NetworkNone - } else if n.IsContainer() { + case n.IsContainer(): return "container" - } else if n.IsUserDefined() { + case n.IsUserDefined(): return n.UserDefined() + default: + return "" } - - return "" } diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/stats.go rename to vendor/github.com/docker/docker/api/types/container/stats.go index 20daebed..3b3fb131 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/container/stats.go @@ -1,6 +1,4 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" +package container import "time" @@ -169,8 +167,10 @@ type Stats struct { MemoryStats MemoryStats `json:"memory_stats,omitempty"` } -// StatsJSON is newly used Networks -type StatsJSON struct { +// StatsResponse is newly used Networks. +// +// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801 +type StatsResponse struct { Stats Name string `json:"name,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 6dbcd922..e225df4e 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,4 +1,5 @@ package events // import "github.com/docker/docker/api/types/events" +import "github.com/docker/docker/api/types/filters" // Type is used for event-types. type Type string @@ -125,3 +126,10 @@ type Message struct { Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` } + +// ListOptions holds parameters to filter events with. +type ListOptions struct { + Since string + Until string + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go index 167df28c..abb7ffd8 100644 --- a/vendor/github.com/docker/docker/api/types/image/image.go +++ b/vendor/github.com/docker/docker/api/types/image/image.go @@ -1,9 +1,47 @@ package image -import "time" +import ( + "io" + "time" +) // Metadata contains engine-local data about the image. type Metadata struct { // LastTagTime is the date and time at which the image was last tagged. LastTagTime time.Time `json:",omitempty"` } + +// PruneReport contains the response for Engine API: +// POST "/images/prune" +type PruneReport struct { + ImagesDeleted []DeleteResponse + SpaceReclaimed uint64 +} + +// LoadResponse returns information to the client about a load process. +// +// TODO(thaJeztah): remove this type, and just use an io.ReadCloser +// +// This type was added in https://github.com/moby/moby/pull/18878, related +// to https://github.com/moby/moby/issues/19177; +// +// Make docker load to output json when the response content type is json +// Swarm hijacks the response from docker load and returns JSON rather +// than plain text like the Engine does. This makes the API library to return +// information to figure that out. +// +// However the "load" endpoint unconditionally returns JSON; +// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255 +// +// PR https://github.com/moby/moby/pull/21959 made the response-type depend +// on whether "quiet" was set, but this logic got changed in a follow-up +// https://github.com/moby/moby/pull/25557, which made the JSON response-type +// unconditionally, but the output produced depend on whether"quiet" was set. +// +// We should deprecated the "quiet" option, as it's really a client +// responsibility. +type LoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index c6b1f351..8e32c9af 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,6 +1,18 @@ package image -import "github.com/docker/docker/api/types/filters" +import ( + "context" + "io" + + "github.com/docker/docker/api/types/filters" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImportSource holds source information for ImageImport +type ImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} // ImportOptions holds information to import images from the client host. type ImportOptions struct { @@ -27,12 +39,28 @@ type PullOptions struct { // privilege request fails. // // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. - PrivilegeFunc func() (string, error) + PrivilegeFunc func(context.Context) (string, error) Platform string } // PushOptions holds information to push images. -type PushOptions PullOptions +type PushOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + PrivilegeFunc func(context.Context) (string, error) + + // Platform is an optional field that selects a specific platform to push + // when the image is a multi-platform image. + // Using this will only push a single platform-specific manifest. + Platform *ocispec.Platform `json:",omitempty"` +} // ListOptions holds parameters to list images with. type ListOptions struct { diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 6fe04da2..c68dcf65 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -119,7 +119,11 @@ type TmpfsOptions struct { SizeBytes int64 `json:",omitempty"` // Mode of the tmpfs upon creation Mode os.FileMode `json:",omitempty"` - + // Options to be passed to the tmpfs mount. An array of arrays. Flag + // options should be provided as 1-length arrays. Other types should be + // provided as 2-length arrays, where the first item is the key and the + // second the value. + Options [][]string `json:",omitempty"` // TODO(stevvooe): There are several more tmpfs flags, specified in the // daemon, that are accepted. Only the most basic are added for now. // diff --git a/vendor/github.com/docker/docker/api/types/network/create_response.go b/vendor/github.com/docker/docker/api/types/network/create_response.go new file mode 100644 index 00000000..c32b35bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/create_response.go @@ -0,0 +1,19 @@ +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse NetworkCreateResponse +// +// OK response to NetworkCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created network. + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warning string `json:"Warning"` +} diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 9edd1c38..0fbb40b3 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -18,6 +18,7 @@ type EndpointSettings struct { // Once the container is running, it becomes operational data (it may contain a // generated address). MacAddress string + DriverOpts map[string]string // Operational data NetworkID string EndpointID string @@ -27,7 +28,6 @@ type EndpointSettings struct { IPv6Gateway string GlobalIPv6Address string GlobalIPv6PrefixLen int - DriverOpts map[string]string // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to // generate PTR records. DNSNames []string diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index f1f300f3..c8db97a7 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,6 +1,8 @@ package network // import "github.com/docker/docker/api/types/network" import ( + "time" + "github.com/docker/docker/api/types/filters" ) @@ -17,6 +19,82 @@ const ( NetworkNat = "nat" ) +// CreateRequest is the request message sent to the server for network create call. +type CreateRequest struct { + CreateOptions + Name string // Name is the requested name of the network. + + // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client + // package to older daemons. + CheckDuplicate *bool `json:",omitempty"` +} + +// CreateOptions holds options to create a network. +type CreateOptions struct { + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. + IPAM *IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// ListOptions holds parameters to filter the list of networks with. +type ListOptions struct { + Filters filters.Args +} + +// InspectOptions holds parameters to inspect network. +type InspectOptions struct { + Scope string + Verbose bool +} + +// ConnectOptions represents the data to be used to connect a container to the +// network. +type ConnectOptions struct { + Container string + EndpointConfig *EndpointSettings `json:",omitempty"` +} + +// DisconnectOptions represents the data to be used to disconnect a container +// from the network. +type DisconnectOptions struct { + Container string + Force bool +} + +// Inspect is the body of the "get network" http response message. +type Inspect struct { + Name string // Name is the name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]ServiceInfo `json:",omitempty"` +} + +// Summary is used as response when listing networks. It currently is an alias +// for [Inspect], but may diverge in the future, as not all information may +// be included when listing networks. +type Summary = Inspect + // Address represents an IP address type Address struct { Addr string @@ -45,6 +123,16 @@ type ServiceInfo struct { Tasks []Task } +// EndpointResource contains network resources allocated and used for a +// container in a network. +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { @@ -70,3 +158,9 @@ var acceptedFilters = map[string]bool{ func ValidateFilters(filter filters.Args) error { return filter.Validate(acceptedFilters) } + +// PruneReport contains the response for Engine API: +// POST "/networks/prune" +type PruneReport struct { + NetworksDeleted []string +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 6bbae93e..75ee07b1 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -84,32 +84,6 @@ type IndexInfo struct { Official bool } -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false". - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - // DistributionInspect describes the result obtained from contacting the // registry to retrieve image metadata type DistributionInspect struct { diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go new file mode 100644 index 00000000..a0a1eec5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/search.go @@ -0,0 +1,47 @@ +package registry + +import ( + "context" + + "github.com/docker/docker/api/types/filters" +) + +// SearchOptions holds parameters to search images with. +type SearchOptions struct { + RegistryAuth string + + // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can + // supply to retry operations after getting an authorization error. + // + // It must return the registry authentication header value in base64 + // format, or an error if the privilege request fails. + PrivilegeFunc func(context.Context) (string, error) + Filters filters.Args + Limit int +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false". + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 65f61d2d..30e3de70 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -5,7 +5,6 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" ) // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) @@ -115,5 +114,6 @@ type ContainerSpec struct { Sysctls map[string]string `json:",omitempty"` CapabilityAdd []string `json:",omitempty"` CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` + Ulimits []*container.Ulimit `json:",omitempty"` + OomScoreAdj int64 `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 89d4a009..c66a2afb 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -75,8 +75,7 @@ type Info struct { DefaultAddressPools []NetworkAddressPool `json:",omitempty"` CDISpecDirs []string - // Legacy API fields for older API versions. - legacyFields + Containerd *ContainerdInfo `json:",omitempty"` // Warnings contains a slice of warnings that occurred while collecting // system information. These warnings are intended to be informational @@ -85,8 +84,41 @@ type Info struct { Warnings []string } -type legacyFields struct { - ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. +// ContainerdInfo holds information about the containerd instance used by the daemon. +type ContainerdInfo struct { + // Address is the path to the containerd socket. + Address string `json:",omitempty"` + // Namespaces is the containerd namespaces used by the daemon. + Namespaces ContainerdNamespaces +} + +// ContainerdNamespaces reflects the containerd namespaces used by the daemon. +// +// These namespaces can be configured in the daemon configuration, and are +// considered to be used exclusively by the daemon, +// +// As these namespaces are considered to be exclusively accessed +// by the daemon, it is not recommended to change these values, +// or to change them to a value that is used by other systems, +// such as cri-containerd. +type ContainerdNamespaces struct { + // Containers holds the default containerd namespace used for + // containers managed by the daemon. + // + // The default namespace for containers is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Containers string + + // Plugins holds the default containerd namespace used for + // plugins managed by the daemon. + // + // The default namespace for plugins is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Plugins string } // PluginsInfo is a temp struct holding Plugins name diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index ca07162a..fe99b743 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,8 +1,6 @@ package types // import "github.com/docker/docker/api/types" import ( - "io" - "os" "time" "github.com/docker/docker/api/types/container" @@ -155,36 +153,13 @@ type Container struct { State string Status string HostConfig struct { - NetworkMode string `json:",omitempty"` + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` } NetworkSettings *SummaryNetworkSettings Mounts []MountPoint } -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - // Ping contains response of Engine API: // GET "/_ping" type Ping struct { @@ -230,17 +205,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - // HealthcheckResult stores information about a single run of a healthcheck probe type HealthcheckResult struct { Start time.Time // Start is the time this check started @@ -281,18 +245,6 @@ type ContainerState struct { Health *Health `json:",omitempty"` } -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - // ContainerJSONBase contains response of Engine API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { @@ -306,7 +258,7 @@ type ContainerJSONBase struct { HostnamePath string HostsPath string LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release. Name string RestartCount int Driver string @@ -423,84 +375,6 @@ type MountPoint struct { Propagation mount.Propagation } -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client - // package to older daemons. - CheckDuplicate bool `json:",omitempty"` - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -533,27 +407,6 @@ type DiskUsage struct { BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []image.DeleteResponse - SpaceReclaimed uint64 -} - // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { @@ -561,12 +414,6 @@ type BuildCachePruneReport struct { SpaceReclaimed uint64 } -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - // SecretCreateResponse contains the information returned to a client // on the creation of a new secret. type SecretCreateResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 231a5cca..43ffe104 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,35 +1,210 @@ package types import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/volume" ) -// ImageImportOptions holds information to import images from the client host. +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" // -// Deprecated: use [image.ImportOptions]. -type ImageImportOptions = image.ImportOptions +// Deprecated: use [image.PruneReport]. +type ImagesPruneReport = image.PruneReport -// ImageCreateOptions holds information to create images. +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune". // -// Deprecated: use [image.CreateOptions]. -type ImageCreateOptions = image.CreateOptions +// Deprecated: use [volume.PruneReport]. +type VolumesPruneReport = volume.PruneReport -// ImagePullOptions holds information to pull images. +// NetworkCreateRequest is the request message sent to the server for network create call. // -// Deprecated: use [image.PullOptions]. -type ImagePullOptions = image.PullOptions +// Deprecated: use [network.CreateRequest]. +type NetworkCreateRequest = network.CreateRequest -// ImagePushOptions holds information to push images. +// NetworkCreate is the expected body of the "create network" http request message // -// Deprecated: use [image.PushOptions]. -type ImagePushOptions = image.PushOptions +// Deprecated: use [network.CreateOptions]. +type NetworkCreate = network.CreateOptions -// ImageListOptions holds parameters to list images with. +// NetworkListOptions holds parameters to filter the list of networks with. // -// Deprecated: use [image.ListOptions]. -type ImageListOptions = image.ListOptions +// Deprecated: use [network.ListOptions]. +type NetworkListOptions = network.ListOptions -// ImageRemoveOptions holds parameters to remove images. +// NetworkCreateResponse is the response message sent by the server for network create call. // -// Deprecated: use [image.RemoveOptions]. -type ImageRemoveOptions = image.RemoveOptions +// Deprecated: use [network.CreateResponse]. +type NetworkCreateResponse = network.CreateResponse + +// NetworkInspectOptions holds parameters to inspect network. +// +// Deprecated: use [network.InspectOptions]. +type NetworkInspectOptions = network.InspectOptions + +// NetworkConnect represents the data to be used to connect a container to the network +// +// Deprecated: use [network.ConnectOptions]. +type NetworkConnect = network.ConnectOptions + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +// +// Deprecated: use [network.DisconnectOptions]. +type NetworkDisconnect = network.DisconnectOptions + +// EndpointResource contains network resources allocated and used for a container in a network. +// +// Deprecated: use [network.EndpointResource]. +type EndpointResource = network.EndpointResource + +// NetworkResource is the body of the "get network" http response message/ +// +// Deprecated: use [network.Inspect] or [network.Summary] (for list operations). +type NetworkResource = network.Inspect + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +// +// Deprecated: use [network.PruneReport]. +type NetworksPruneReport = network.PruneReport + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +// +// Deprecated: use [container.ExecOptions]. +type ExecConfig = container.ExecOptions + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +// +// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions]. +type ExecStartCheck = container.ExecStartOptions + +// ContainerExecInspect holds information returned by exec inspect. +// +// Deprecated: use [container.ExecInspect]. +type ContainerExecInspect = container.ExecInspect + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +// +// Deprecated: use [container.PruneReport]. +type ContainersPruneReport = container.PruneReport + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +// +// Deprecated: use [container.PathStat]. +type ContainerPathStat = container.PathStat + +// CopyToContainerOptions holds information +// about files to copy into a container. +// +// Deprecated: use [container.CopyToContainerOptions], +type CopyToContainerOptions = container.CopyToContainerOptions + +// ContainerStats contains response of Engine API: +// GET "/stats" +// +// Deprecated: use [container.StatsResponseReader]. +type ContainerStats = container.StatsResponseReader + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +// +// Deprecated: use [container.ThrottlingData]. +type ThrottlingData = container.ThrottlingData + +// CPUUsage stores All CPU stats aggregated since container inception. +// +// Deprecated: use [container.CPUUsage]. +type CPUUsage = container.CPUUsage + +// CPUStats aggregates and wraps all CPU related info of container +// +// Deprecated: use [container.CPUStats]. +type CPUStats = container.CPUStats + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +// +// Deprecated: use [container.MemoryStats]. +type MemoryStats = container.MemoryStats + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +// +// Deprecated: use [container.BlkioStatEntry]. +type BlkioStatEntry = container.BlkioStatEntry + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +// +// Deprecated: use [container.BlkioStats]. +type BlkioStats = container.BlkioStats + +// StorageStats is the disk I/O stats for read/write on Windows. +// +// Deprecated: use [container.StorageStats]. +type StorageStats = container.StorageStats + +// NetworkStats aggregates the network stats of one container +// +// Deprecated: use [container.NetworkStats]. +type NetworkStats = container.NetworkStats + +// PidsStats contains the stats of a container's pids +// +// Deprecated: use [container.PidsStats]. +type PidsStats = container.PidsStats + +// Stats is Ultimate struct aggregating all types of stats of one container +// +// Deprecated: use [container.Stats]. +type Stats = container.Stats + +// StatsJSON is newly used Networks +// +// Deprecated: use [container.StatsResponse]. +type StatsJSON = container.StatsResponse + +// EventsOptions holds parameters to filter events with. +// +// Deprecated: use [events.ListOptions]. +type EventsOptions = events.ListOptions + +// ImageSearchOptions holds parameters to search images with. +// +// Deprecated: use [registry.SearchOptions]. +type ImageSearchOptions = registry.SearchOptions + +// ImageImportSource holds source information for ImageImport +// +// Deprecated: use [image.ImportSource]. +type ImageImportSource image.ImportSource + +// ImageLoadResponse returns information to the client about a load process. +// +// Deprecated: use [image.LoadResponse]. +type ImageLoadResponse = image.LoadResponse + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API. +// +// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release. +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go index 8b0dd138..0b9645e0 100644 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -6,3 +6,10 @@ import "github.com/docker/docker/api/types/filters" type ListOptions struct { Filters filters.Args } + +// PruneReport contains the response for Engine API: +// POST "/volumes/prune" +type PruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index f2eeb6c5..60d91bc6 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -49,6 +49,8 @@ import ( "net/url" "path" "strings" + "sync" + "sync/atomic" "time" "github.com/docker/docker/api" @@ -131,7 +133,10 @@ type Client struct { negotiateVersion bool // negotiated indicates that API version negotiation took place - negotiated bool + negotiated atomic.Bool + + // negotiateLock is used to single-flight the version negotiation process + negotiateLock sync.Mutex tp trace.TracerProvider @@ -266,7 +271,16 @@ func (cli *Client) Close() error { // be negotiated when making the actual requests, and for which cases // we cannot do the negotiation lazily. func (cli *Client) checkVersion(ctx context.Context) error { - if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated { + if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() { + // Ensure exclusive write access to version and negotiated fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + + // May have been set during last execution of critical zone + if cli.negotiated.Load() { + return nil + } + ping, err := cli.Ping(ctx) if err != nil { return err @@ -312,6 +326,10 @@ func (cli *Client) ClientVersion() string { // added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { + // Avoid concurrent modification of version-related fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + ping, err := cli.Ping(ctx) if err != nil { // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it. @@ -336,6 +354,10 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { // added (1.24). func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { if !cli.manualOverride { + // Avoid concurrent modification of version-related fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + cli.negotiateAPIVersionPing(pingResponse) } } @@ -361,7 +383,7 @@ func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { // Store the results, so that automatic API version negotiation (if enabled) // won't be performed on the next request. if cli.negotiateVersion { - cli.negotiated = true + cli.negotiated.Store(true) } } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 883be7fa..8490a3b1 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -11,11 +11,11 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. @@ -23,14 +23,14 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri response, err := cli.head(ctx, urlStr, query, nil) defer ensureReaderClosed(response) if err != nil { - return types.ContainerPathStat{}, err + return container.PathStat{}, err } return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. // Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { query := url.Values{} query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. @@ -55,14 +55,14 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str // CopyFromContainer gets the content from the container and returns it as a Reader // for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, err + return nil, container.PathStat{}, err } // In order to get the copy behavior right, we need to know information @@ -78,8 +78,8 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s return response.body, stat, err } -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat +func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { + var stat container.PathStat encodedStat := header.Get("X-Docker-Container-Path-Stat") statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 526a3876..9379448d 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -6,11 +6,12 @@ import ( "net/http" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) { var response types.IDResponse // Make sure we negotiated (if the client is configured to do so), @@ -22,14 +23,14 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co return response, err } - if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { + if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { return response, err } if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil + options.ConsoleSize = nil } - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { return response, err @@ -39,7 +40,7 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co } // ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error { if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } @@ -52,7 +53,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) { if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } @@ -62,8 +63,8 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi } // ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) { + var response container.ExecInspect resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index ca509238..29c922da 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { + var report container.PruneReport if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 3fabb75f..b5641dae 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -4,12 +4,12 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { query := url.Values{} query.Set("stream", "0") if stream { @@ -18,10 +18,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) if err != nil { - return types.ContainerStats{}, err + return container.StatsResponseReader{}, err } - return types.ContainerStats{ + return container.StatsResponseReader{ Body: resp.body, OSType: getDockerOS(resp.header.Get("Server")), }, nil @@ -29,17 +29,17 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea // ContainerStatsOneShot gets a single stat entry from a container. // It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { query := url.Values{} query.Set("stream", "0") query.Set("one-shot", "1") resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) if err != nil { - return types.ContainerStats{}, err + return container.StatsResponseReader{}, err } - return types.ContainerStats{ + return container.StatsResponseReader{ Body: resp.body, OSType: getDockerOS(resp.header.Get("Server")), }, nil diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index a9c48a92..d3ab26be 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -6,7 +6,6 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" timetypes "github.com/docker/docker/api/types/time" @@ -16,7 +15,7 @@ import ( // by cancelling the context. Once the stream has been completely read an io.EOF error will // be sent over the error channel. If an error is sent all processing will be stopped. It's up // to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { +func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) { messages := make(chan events.Message) errs := make(chan error, 1) @@ -68,7 +67,7 @@ func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-c return messages, errs } -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { +func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) { query := url.Values{} ref := time.Now() diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index 5a890b0c..43d55eda 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -7,13 +7,12 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" ) // ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { +func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { if ref != "" { // Check if the given image name can be resolved if _, err := reference.ParseNormalizedNamed(ref); err != nil { diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index c825206e..c68f0013 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -6,13 +6,13 @@ import ( "net/http" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageLoad loads an image in the docker host from the client host. // It's up to the caller to close the io.ReadCloser in the // ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) { v := url.Values{} v.Set("quiet", "0") if quiet { @@ -22,9 +22,9 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) ( "Content-Type": {"application/x-tar"}, }) if err != nil { - return types.ImageLoadResponse{}, err + return image.LoadResponse{}, err } - return types.ImageLoadResponse{ + return image.LoadResponse{ Body: resp.body, JSON: resp.header.Get("Content-Type") == "application/json", }, nil diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 6b82d6ab..5ee987e2 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" ) // ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { + var report image.PruneReport if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 6438cf6a..1634c4c8 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -36,7 +36,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index e6a6b11e..16f9c465 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -2,7 +2,9 @@ package client // import "github.com/docker/docker/client" import ( "context" + "encoding/json" "errors" + "fmt" "io" "net/http" "net/url" @@ -36,9 +38,23 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu } } + if options.Platform != nil { + if err := cli.NewVersionError(ctx, "1.46", "platform"); err != nil { + return nil, err + } + + p := *options.Platform + pJson, err := json.Marshal(p) + if err != nil { + return nil, fmt.Errorf("invalid platform: %v", err) + } + + query.Set("platform", string(pJson)) + } + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 8971b139..0a074575 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -7,7 +7,6 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -15,7 +14,7 @@ import ( // ImageSearch makes the docker host search by a term in a remote registry. // The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { +func (cli *Client) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", term) @@ -34,7 +33,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) defer ensureReaderClosed(resp) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return results, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 45d233f2..cc60a5d1 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -50,11 +50,11 @@ type ContainerAPIClient interface { ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) @@ -66,18 +66,18 @@ type ContainerAPIClient interface { ContainerRename(ctx context.Context, container, newContainerName string) error ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (container.StatsResponseReader, error) + ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) } // DistributionAPIClient defines API client methods for the registry @@ -92,29 +92,29 @@ type ImageAPIClient interface { BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) + ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) } // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkInspect(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, error) + NetworkInspectWithRaw(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, []byte, error) + NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error) } // NodeAPIClient defines API client methods for the nodes @@ -165,7 +165,7 @@ type SwarmAPIClient interface { // SystemAPIClient defines API client methods for the system type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (system.Info, error) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) @@ -179,7 +179,7 @@ type VolumeAPIClient interface { VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error } diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index 57189461..8daf8906 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -3,13 +3,12 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" ) // NetworkConnect connects a container to an existent network in the docker host. func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ + nc := network.ConnectOptions{ Container: containerID, EndpointConfig: config, } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index d510feb3..850e31cc 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -4,13 +4,13 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" ) // NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - var response types.NetworkCreateResponse +func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { + var response network.CreateResponse // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -21,12 +21,13 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types return response, err } - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, + networkCreateRequest := network.CreateRequest{ + CreateOptions: options, Name: name, } if versions.LessThan(cli.version, "1.44") { - networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. + enabled := true + networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. } serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index dd156766..aaf428d8 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -3,12 +3,15 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} + nd := network.DisconnectOptions{ + Container: containerID, + Force: force, + } resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 0f90e2bb..afc47de6 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -7,25 +7,20 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, error) { networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + return network.Inspect{}, nil, objectNotFoundError{object: "network", id: networkID} } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) query := url.Values{} if options.Verbose { query.Set("verbose", "true") @@ -33,17 +28,19 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, if options.Scope != "" { query.Set("scope", options.Scope) } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + + resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return networkResource, nil, err + return network.Inspect{}, nil, err } - body, err := io.ReadAll(resp.body) + raw, err := io.ReadAll(resp.body) if err != nil { - return networkResource, nil, err + return network.Inspect{}, nil, err } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err + + var nw network.Inspect + err = json.NewDecoder(bytes.NewReader(raw)).Decode(&nw) + return nw, raw, err } diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index ed2acb55..72957d47 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -5,12 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" ) // NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { +func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) { query := url.Values{} if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code @@ -21,7 +21,7 @@ func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOpt query.Set("filters", filterJSON) } - var networkResources []types.NetworkResource + var networkResources []network.Summary resp, err := cli.get(ctx, "/networks", query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 7b5f831e..708cc61a 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" ) // NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { + var report network.PruneReport if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 69184619..a0d8c350 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -84,7 +84,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { ensureReaderClosed(resp) return nil, privilegeErr @@ -105,7 +105,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, ensureReaderClosed(resp) if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) + accept, err := options.AcceptPermissionsFunc(ctx, privileges) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 50e213b5..6eea9b4e 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -184,10 +184,10 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") } else { - f.Close() + _ = f.Close() err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") } } @@ -278,7 +278,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { func ensureReaderClosed(response serverResponse) { if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(io.Discard, response.body, 512) - response.body.Close() + _, _ = io.CopyN(io.Discard, response.body, 512) + _ = response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 9333f6ee..9b09c30f 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" ) // VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { + var report volume.PruneReport if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 43133a09..cde64f08 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -20,7 +20,6 @@ import ( "syscall" "time" - "github.com/containerd/containerd/pkg/userns" "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" @@ -98,24 +97,16 @@ func NewDefaultArchiver() *Archiver { type breakoutError error const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz - // Zstd is zstd compression algorithm. - Zstd + Uncompressed Compression = 0 // Uncompressed represents the uncompressed. + Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm. + Gzip Compression = 2 // Gzip is gzip compression algorithm. + Xz Compression = 3 // Xz is xz compression algorithm. + Zstd Compression = 4 // Zstd is zstd compression algorithm. ) const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat + AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts + OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. ) // IsArchivePath checks if the (possibly compressed) file at the given path @@ -159,7 +150,7 @@ func magicNumberMatcher(m []byte) matcher { // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { @@ -541,8 +532,10 @@ func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOp } // CanonicalTarNameForPath canonicalizes relativePath to a POSIX-style path using -// forward slashes. It is an alias for filepath.ToSlash, which is a no-op on +// forward slashes. It is an alias for [filepath.ToSlash], which is a no-op on // Linux and Unix. +// +// Deprecated: use [filepath.ToSlash]. This function will be removed in the next release. func CanonicalTarNameForPath(relativePath string) string { return filepath.ToSlash(relativePath) } @@ -681,9 +674,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o inUserns, bestEffortXattrs bool chownOpts *idtools.Identity ) + + // TODO(thaJeztah): make opts a required argument. if opts != nil { Lchown = !opts.NoLchown - inUserns = opts.InUserNS + inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally. chownOpts = opts.ChownOpts bestEffortXattrs = opts.BestEffortXattrs } @@ -718,6 +713,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns + log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns") return nil } // Handle this is an OS-specific way @@ -728,6 +724,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + if inUserns && errors.Is(err, syscall.EPERM) { + // In most cases, cannot create a fifo if running in user namespace + log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns") + return nil + } return err } @@ -771,7 +772,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { msg := "failed to Lchown %q for UID %d, GID %d" - if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + if inUserns && errors.Is(err, syscall.EINVAL) { msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" } return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) @@ -877,21 +878,16 @@ func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) { return nil, err } - whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) - if err != nil { - return nil, err - } - return &Tarballer{ // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. - srcPath: fixVolumePathPrefix(srcPath), + srcPath: addLongPathPrefix(srcPath), options: options, pm: pm, pipeReader: pipeReader, pipeWriter: pipeWriter, compressWriter: compressWriter, - whiteoutConverter: whiteoutConverter, + whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), }, nil } @@ -1086,10 +1082,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) - if err != nil { - return err - } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. loop: @@ -1452,6 +1445,8 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. +// +// Deprecated: NewTempArchive is only used in tests and will be removed in the next release. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { @@ -1473,6 +1468,8 @@ func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. +// +// Deprecated: TempArchive is only used in tests and will be removed in the next release. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 2c3786cd..45ac2aa6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -6,19 +6,17 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/system" "github.com/pkg/errors" "golang.org/x/sys/unix" ) -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { if format == OverlayWhiteoutFormat { - if inUserNS { - return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns") - } - return overlayWhiteoutConverter{}, nil + return overlayWhiteoutConverter{} } - return nil, nil + return nil } type overlayWhiteoutConverter struct{} @@ -35,20 +33,25 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os } if fi.Mode()&os.ModeDir != 0 { + opaqueXattrName := "trusted.overlay.opaque" + if userns.RunningInUserNS() { + opaqueXattrName = "user.overlay.opaque" + } + // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + opaque, err := system.Lgetxattr(path, opaqueXattrName) if err != nil { return nil, err } if len(opaque) == 1 && opaque[0] == 'y' { - delete(hdr.PAXRecords, paxSchilyXattr+"trusted.overlay.opaque") + delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName) // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file wo = &tar.Header{ Typeflag: tar.TypeReg, Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted. Size: 0, Uid: hdr.Uid, Uname: hdr.Uname, @@ -56,7 +59,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os Gname: hdr.Gname, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, - } //#nosec G305 -- An archive is being created, not extracted. + } } } @@ -69,9 +72,14 @@ func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (boo // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + opaqueXattrName := "trusted.overlay.opaque" + if userns.RunningInUserNS() { + opaqueXattrName = "user.overlay.opaque" + } + + err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0) if err != nil { - return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + return false, errors.Wrapf(err, "setxattr(%q, %s=y)", dir, opaqueXattrName) } // don't write the file itself return false, err diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 3de1d64c..7dee1f7a 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -2,6 +2,6 @@ package archive // import "github.com/docker/docker/pkg/archive" -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { - return nil, nil +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index ff59d019..f559a305 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -11,7 +11,6 @@ import ( "strings" "syscall" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "golang.org/x/sys/unix" @@ -21,9 +20,9 @@ func init() { sysStat = statUnix } -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { +// addLongPathPrefix adds the Windows long path prefix to the path provided if +// it does not already have it. It is a no-op on platforms other than Windows. +func addLongPathPrefix(srcPath string) string { return srcPath } @@ -95,7 +94,10 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) { } // handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo +// createTarFile to handle the following types of header: Block; Char; Fifo. +// +// Creating device nodes is not supported when running in a user namespace, +// produces a [syscall.EPERM] in most cases. func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { @@ -107,12 +109,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode |= unix.S_IFIFO } - err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) - if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() { - // In most cases, cannot create a device if running in user namespace - err = nil - } - return err + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go index 09a25832..e25c64b4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -4,15 +4,27 @@ import ( "archive/tar" "os" "path/filepath" + "strings" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" ) -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) +// longPathPrefix is the longpath prefix for Windows file paths. +const longPathPrefix = `\\?\` + +// addLongPathPrefix adds the Windows long path prefix to the path provided if +// it does not already have it. It is a no-op on platforms other than Windows. +// +// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. +func addLongPathPrefix(srcPath string) string { + if strings.HasPrefix(srcPath, longPathPrefix) { + return srcPath + } + if strings.HasPrefix(srcPath, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + return longPathPrefix + `UNC` + srcPath[1:] + } + return longPathPrefix + srcPath } // getWalkRoot calculates the root path when performing a TarWithOptions. diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index f9f16c92..5f12ca40 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -23,12 +23,9 @@ import ( type ChangeType int const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete + ChangeModify = 0 // ChangeModify represents the modify operation. + ChangeAdd = 1 // ChangeAdd represents the add operation. + ChangeDelete = 2 // ChangeDelete represents the delete operation. ) func (c ChangeType) String() string { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go index 13a7d3c0..28f741a2 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -72,19 +72,23 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) } + s, err := system.Lstat(path) + if err != nil { + return err + } + info := &FileInfo{ name: filepath.Base(relPath), children: make(map[string]*FileInfo), parent: parent, + stat: s, } - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") + // system.Lgetxattr is only implemented on Linux and produces an error + // on other platforms. This code is intentionally left commented-out + // as a reminder to include this code if this would ever be implemented + // on other platforms. + // info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 318f5942..e080e310 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -102,7 +102,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, continue } } - //#nosec G305 -- The joined path is guarded against path traversal. + // #nosec G305 -- The joined path is guarded against path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -198,7 +198,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } for _, hdr := range dirs { - //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go index 82671d8c..05da97b0 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -9,6 +9,7 @@ import ( // NewAtomicFileWriter returns WriteCloser so that writing to it writes to a // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. +// NOTE: umask is not considered for the file's permissions. func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { @@ -26,7 +27,8 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err }, nil } -// AtomicWriteFile atomically writes data to a file named by filename. +// AtomicWriteFile atomically writes data to a file named by filename and with the specified permission bits. +// NOTE: umask is not considered for the file's permissions. func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { f, err := NewAtomicFileWriter(filename, perm) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 1c5dde52..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package longpath introduces some constants and helper functions for handling -// long paths in Windows. -// -// Long paths are expected to be prepended with "\\?\" and followed by either a -// drive letter, a UNC server\share, or a volume identifier. -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "os" - "runtime" - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} - -// MkdirTemp is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -func MkdirTemp(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) - if err != nil { - return "", err - } - if runtime.GOOS != "windows" { - return tempDir, nil - } - return AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go deleted file mode 100644 index 4267ac75..00000000 --- a/vendor/github.com/go-jose/go-jose/v3/jwe.go +++ /dev/null @@ -1,295 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "encoding/base64" - "fmt" - "strings" - - "github.com/go-jose/go-jose/v3/json" -) - -// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. -type rawJSONWebEncryption struct { - Protected *byteBuffer `json:"protected,omitempty"` - Unprotected *rawHeader `json:"unprotected,omitempty"` - Header *rawHeader `json:"header,omitempty"` - Recipients []rawRecipientInfo `json:"recipients,omitempty"` - Aad *byteBuffer `json:"aad,omitempty"` - EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"` - Iv *byteBuffer `json:"iv,omitempty"` - Ciphertext *byteBuffer `json:"ciphertext,omitempty"` - Tag *byteBuffer `json:"tag,omitempty"` -} - -// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing. -type rawRecipientInfo struct { - Header *rawHeader `json:"header,omitempty"` - EncryptedKey string `json:"encrypted_key,omitempty"` -} - -// JSONWebEncryption represents an encrypted JWE object after parsing. -type JSONWebEncryption struct { - Header Header - protected, unprotected *rawHeader - recipients []recipientInfo - aad, iv, ciphertext, tag []byte - original *rawJSONWebEncryption -} - -// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing. -type recipientInfo struct { - header *rawHeader - encryptedKey []byte -} - -// GetAuthData retrieves the (optional) authenticated data attached to the object. -func (obj JSONWebEncryption) GetAuthData() []byte { - if obj.aad != nil { - out := make([]byte, len(obj.aad)) - copy(out, obj.aad) - return out - } - - return nil -} - -// Get the merged header values -func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { - out := rawHeader{} - out.merge(obj.protected) - out.merge(obj.unprotected) - - if recipient != nil { - out.merge(recipient.header) - } - - return out -} - -// Get the additional authenticated data from a JWE object. -func (obj JSONWebEncryption) computeAuthData() []byte { - var protected string - - switch { - case obj.original != nil && obj.original.Protected != nil: - protected = obj.original.Protected.base64() - case obj.protected != nil: - protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected))) - default: - protected = "" - } - - output := []byte(protected) - if obj.aad != nil { - output = append(output, '.') - output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...) - } - - return output -} - -// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format. -func ParseEncrypted(input string) (*JSONWebEncryption, error) { - input = stripWhitespace(input) - if strings.HasPrefix(input, "{") { - return parseEncryptedFull(input) - } - - return parseEncryptedCompact(input) -} - -// parseEncryptedFull parses a message in compact format. -func parseEncryptedFull(input string) (*JSONWebEncryption, error) { - var parsed rawJSONWebEncryption - err := json.Unmarshal([]byte(input), &parsed) - if err != nil { - return nil, err - } - - return parsed.sanitized() -} - -// sanitized produces a cleaned-up JWE object from the raw JSON. -func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{ - original: parsed, - unprotected: parsed.Unprotected, - } - - // Check that there is not a nonce in the unprotected headers - if parsed.Unprotected != nil { - if nonce := parsed.Unprotected.getNonce(); nonce != "" { - return nil, ErrUnprotectedNonce - } - } - if parsed.Header != nil { - if nonce := parsed.Header.getNonce(); nonce != "" { - return nil, ErrUnprotectedNonce - } - } - - if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 { - err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected) - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64()) - } - } - - // Note: this must be called _after_ we parse the protected header, - // otherwise fields from the protected header will not get picked up. - var err error - mergedHeaders := obj.mergedHeaders(nil) - obj.Header, err = mergedHeaders.sanitized() - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders) - } - - if len(parsed.Recipients) == 0 { - obj.recipients = []recipientInfo{ - { - header: parsed.Header, - encryptedKey: parsed.EncryptedKey.bytes(), - }, - } - } else { - obj.recipients = make([]recipientInfo, len(parsed.Recipients)) - for r := range parsed.Recipients { - encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey) - if err != nil { - return nil, err - } - - // Check that there is not a nonce in the unprotected header - if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" { - return nil, ErrUnprotectedNonce - } - - obj.recipients[r].header = parsed.Recipients[r].Header - obj.recipients[r].encryptedKey = encryptedKey - } - } - - for _, recipient := range obj.recipients { - headers := obj.mergedHeaders(&recipient) - if headers.getAlgorithm() == "" || headers.getEncryption() == "" { - return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers") - } - } - - obj.iv = parsed.Iv.bytes() - obj.ciphertext = parsed.Ciphertext.bytes() - obj.tag = parsed.Tag.bytes() - obj.aad = parsed.Aad.bytes() - - return obj, nil -} - -// parseEncryptedCompact parses a message in compact format. -func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { - parts := strings.Split(input, ".") - if len(parts) != 5 { - return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") - } - - rawProtected, err := base64URLDecode(parts[0]) - if err != nil { - return nil, err - } - - encryptedKey, err := base64URLDecode(parts[1]) - if err != nil { - return nil, err - } - - iv, err := base64URLDecode(parts[2]) - if err != nil { - return nil, err - } - - ciphertext, err := base64URLDecode(parts[3]) - if err != nil { - return nil, err - } - - tag, err := base64URLDecode(parts[4]) - if err != nil { - return nil, err - } - - raw := &rawJSONWebEncryption{ - Protected: newBuffer(rawProtected), - EncryptedKey: newBuffer(encryptedKey), - Iv: newBuffer(iv), - Ciphertext: newBuffer(ciphertext), - Tag: newBuffer(tag), - } - - return raw.sanitized() -} - -// CompactSerialize serializes an object using the compact serialization format. -func (obj JSONWebEncryption) CompactSerialize() (string, error) { - if len(obj.recipients) != 1 || obj.unprotected != nil || - obj.protected == nil || obj.recipients[0].header != nil { - return "", ErrNotSupported - } - - serializedProtected := mustSerializeJSON(obj.protected) - - return base64JoinWithDots( - serializedProtected, - obj.recipients[0].encryptedKey, - obj.iv, - obj.ciphertext, - obj.tag, - ), nil -} - -// FullSerialize serializes an object using the full JSON serialization format. -func (obj JSONWebEncryption) FullSerialize() string { - raw := rawJSONWebEncryption{ - Unprotected: obj.unprotected, - Iv: newBuffer(obj.iv), - Ciphertext: newBuffer(obj.ciphertext), - EncryptedKey: newBuffer(obj.recipients[0].encryptedKey), - Tag: newBuffer(obj.tag), - Aad: newBuffer(obj.aad), - Recipients: []rawRecipientInfo{}, - } - - if len(obj.recipients) > 1 { - for _, recipient := range obj.recipients { - info := rawRecipientInfo{ - Header: recipient.header, - EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey), - } - raw.Recipients = append(raw.Recipients, info) - } - } else { - // Use flattened serialization - raw.Header = obj.recipients[0].header - raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey) - } - - if obj.protected != nil { - raw.Protected = newBuffer(mustSerializeJSON(obj.protected)) - } - - return string(mustSerializeJSON(raw)) -} diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/.gitignore rename to vendor/github.com/go-jose/go-jose/v4/.gitignore diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/.golangci.yml rename to vendor/github.com/go-jose/go-jose/v4/.golangci.yml diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/.travis.yml rename to vendor/github.com/go-jose/go-jose/v4/.travis.yml diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md similarity index 96% rename from vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md rename to vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index ce2a54eb..28bdd2fc 100644 --- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -45,12 +45,6 @@ token". [1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf -# v3.0.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. - # v3.0.2 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md rename to vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/LICENSE rename to vendor/github.com/go-jose/go-jose/v4/LICENSE diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md similarity index 82% rename from vendor/github.com/go-jose/go-jose/v3/README.md rename to vendor/github.com/go-jose/go-jose/v4/README.md index 282cd9e1..79a7c5ec 100644 --- a/vendor/github.com/go-jose/go-jose/v3/README.md +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -1,17 +1,9 @@ # Go JOSE -### Versions - -[Version 4](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/), -[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: - - import "github.com/go-jose/go-jose/v4" - -The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which -are deprecated. - -### Summary +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) +[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, @@ -43,6 +35,20 @@ of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/curren This is to avoid differences in interpretation of messages between go-jose and libraries in other languages. +### Versions + +[Version 4](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/main), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are still useable but not actively developed anymore. + +Version 3, in this repo, is still receiving security fixes but not functionality +updates. + ### Supported algorithms See below for a table of supported algorithms. Algorithm identifiers match @@ -98,11 +104,11 @@ allows attaching a key id. ## Examples -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/SECURITY.md rename to vendor/github.com/go-jose/go-jose/v4/SECURITY.md diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go similarity index 99% rename from vendor/github.com/go-jose/go-jose/v3/asymmetric.go rename to vendor/github.com/go-jose/go-jose/v4/asymmetric.go index d4d4961b..f8d5774e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go @@ -29,8 +29,8 @@ import ( "fmt" "math/big" - josecipher "github.com/go-jose/go-jose/v3/cipher" - "github.com/go-jose/go-jose/v3/json" + josecipher "github.com/go-jose/go-jose/v4/cipher" + "github.com/go-jose/go-jose/v4/json" ) // A generic RSA-based encrypter/verifier diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go similarity index 99% rename from vendor/github.com/go-jose/go-jose/v3/crypter.go rename to vendor/github.com/go-jose/go-jose/v4/crypter.go index 8870e890..aba08424 100644 --- a/vendor/github.com/go-jose/go-jose/v3/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -22,7 +22,7 @@ import ( "errors" "fmt" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // Encrypter represents an encrypter which produces an encrypted JWE object. diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/doc.go rename to vendor/github.com/go-jose/go-jose/v4/doc.go diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go similarity index 92% rename from vendor/github.com/go-jose/go-jose/v3/encoding.go rename to vendor/github.com/go-jose/go-jose/v4/encoding.go index 9f07cfdc..4f6e0d4a 100644 --- a/vendor/github.com/go-jose/go-jose/v3/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go @@ -27,7 +27,7 @@ import ( "strings" "unicode" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // Helper function to serialize known-good objects. @@ -106,10 +106,7 @@ func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - maxCompressedSize := 10 * int64(len(input)) - if maxCompressedSize < 250000 { - maxCompressedSize = 250000 - } + maxCompressedSize := max(250_000, 10*int64(len(input))) limit := maxCompressedSize + 1 n, err := io.CopyN(output, reader, limit) @@ -167,7 +164,7 @@ func (b *byteBuffer) UnmarshalJSON(data []byte) error { return nil } - decoded, err := base64URLDecode(encoded) + decoded, err := base64.RawURLEncoding.DecodeString(encoded) if err != nil { return err } @@ -197,12 +194,6 @@ func (b byteBuffer) toInt() int { return int(b.bigInt().Int64()) } -// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C -func base64URLDecode(value string) ([]byte, error) { - value = strings.TrimRight(value, "=") - return base64.RawURLEncoding.DecodeString(value) -} - func base64EncodeLen(sl []byte) int { return base64.RawURLEncoding.EncodedLen(len(sl)) } diff --git a/vendor/github.com/go-jose/go-jose/v3/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/LICENSE rename to vendor/github.com/go-jose/go-jose/v4/json/LICENSE diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/README.md rename to vendor/github.com/go-jose/go-jose/v4/json/README.md diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/decode.go rename to vendor/github.com/go-jose/go-jose/v4/json/decode.go diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/encode.go rename to vendor/github.com/go-jose/go-jose/v4/json/encode.go diff --git a/vendor/github.com/go-jose/go-jose/v3/json/indent.go b/vendor/github.com/go-jose/go-jose/v4/json/indent.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/indent.go rename to vendor/github.com/go-jose/go-jose/v4/json/indent.go diff --git a/vendor/github.com/go-jose/go-jose/v3/json/scanner.go b/vendor/github.com/go-jose/go-jose/v4/json/scanner.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/scanner.go rename to vendor/github.com/go-jose/go-jose/v4/json/scanner.go diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v4/json/stream.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/stream.go rename to vendor/github.com/go-jose/go-jose/v4/json/stream.go diff --git a/vendor/github.com/go-jose/go-jose/v3/json/tags.go b/vendor/github.com/go-jose/go-jose/v4/json/tags.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/json/tags.go rename to vendor/github.com/go-jose/go-jose/v4/json/tags.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go similarity index 64% rename from vendor/gopkg.in/go-jose/go-jose.v2/jwe.go rename to vendor/github.com/go-jose/go-jose/v4/jwe.go index a8966ab8..89f03ee3 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -18,10 +18,11 @@ package jose import ( "encoding/base64" + "errors" "fmt" "strings" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. @@ -86,11 +87,12 @@ func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { func (obj JSONWebEncryption) computeAuthData() []byte { var protected string - if obj.original != nil && obj.original.Protected != nil { + switch { + case obj.original != nil && obj.original.Protected != nil: protected = obj.original.Protected.base64() - } else if obj.protected != nil { + case obj.protected != nil: protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected))) - } else { + default: protected = "" } @@ -103,29 +105,75 @@ func (obj JSONWebEncryption) computeAuthData() []byte { return output } -// ParseEncrypted parses an encrypted message in compact or full serialization format. -func ParseEncrypted(input string) (*JSONWebEncryption, error) { +func containsKeyAlgorithm(haystack []KeyAlgorithm, needle KeyAlgorithm) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false +} + +func containsContentEncryption(haystack []ContentEncryption, needle ContentEncryption) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false +} + +// ParseEncrypted parses an encrypted message in JWE Compact or JWE JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.1 +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.2 +// +// The keyAlgorithms and contentEncryption parameters are used to validate the "alg" and "enc" +// header parameters respectively. They must be nonempty, and each "alg" or "enc" header in +// parsed data must contain a value that is present in the corresponding parameter. That +// includes the protected and unprotected headers as well as all recipients. To accept +// multiple algorithms, pass a slice of all the algorithms you want to accept. +func ParseEncrypted(input string, + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { input = stripWhitespace(input) if strings.HasPrefix(input, "{") { - return parseEncryptedFull(input) + return ParseEncryptedJSON(input, keyEncryptionAlgorithms, contentEncryption) } - return parseEncryptedCompact(input) + return ParseEncryptedCompact(input, keyEncryptionAlgorithms, contentEncryption) } -// parseEncryptedFull parses a message in compact format. -func parseEncryptedFull(input string) (*JSONWebEncryption, error) { +// ParseEncryptedJSON parses a message in JWE JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.2 +func ParseEncryptedJSON( + input string, + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { var parsed rawJSONWebEncryption err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err } - return parsed.sanitized() + return parsed.sanitized(keyEncryptionAlgorithms, contentEncryption) } // sanitized produces a cleaned-up JWE object from the raw JSON. -func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { +func (parsed *rawJSONWebEncryption) sanitized( + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { + if len(keyEncryptionAlgorithms) == 0 { + return nil, errors.New("go-jose/go-jose: no key algorithms provided") + } + if len(contentEncryption) == 0 { + return nil, errors.New("go-jose/go-jose: no content encryption algorithms provided") + } + obj := &JSONWebEncryption{ original: parsed, unprotected: parsed.Unprotected, @@ -184,10 +232,31 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { } } - for _, recipient := range obj.recipients { + for i, recipient := range obj.recipients { headers := obj.mergedHeaders(&recipient) - if headers.getAlgorithm() == "" || headers.getEncryption() == "" { - return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers") + if headers.getAlgorithm() == "" { + return nil, fmt.Errorf(`go-jose/go-jose: recipient %d: missing header "alg"`, i) + } + if headers.getEncryption() == "" { + return nil, fmt.Errorf(`go-jose/go-jose: recipient %d: missing header "enc"`, i) + } + err := validateAlgEnc(headers, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: recipient %d: %s", i, err) + } + + } + + if obj.protected != nil { + err := validateAlgEnc(*obj.protected, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: protected header: %s", err) + } + } + if obj.unprotected != nil { + err := validateAlgEnc(*obj.unprotected, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: unprotected header: %s", err) } } @@ -199,8 +268,26 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { return obj, nil } -// parseEncryptedCompact parses a message in compact format. -func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { +func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption) error { + alg := headers.getAlgorithm() + enc := headers.getEncryption() + if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { + return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) + } + if alg != "" && !containsContentEncryption(contentEncryption, enc) { + return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) + } + return nil +} + +// ParseEncryptedCompact parses a message in JWE Compact Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.1 +func ParseEncryptedCompact( + input string, + keyAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { parts := strings.Split(input, ".") if len(parts) != 5 { return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") @@ -239,7 +326,7 @@ func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { Tag: newBuffer(tag), } - return raw.sanitized() + return raw.sanitized(keyAlgorithms, contentEncryption) } // CompactSerialize serializes an object using the compact serialization format. @@ -251,13 +338,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go similarity index 99% rename from vendor/github.com/go-jose/go-jose/v3/jwk.go rename to vendor/github.com/go-jose/go-jose/v4/jwk.go index e4021959..a565aaab 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -35,7 +35,7 @@ import ( "reflect" "strings" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing. @@ -266,7 +266,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { // x5t parameters are base64url-encoded SHA thumbprints // See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8 - x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1) + x5tSHA1bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA1) if err != nil { return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding") } @@ -286,7 +286,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { k.CertificateThumbprintSHA1 = x5tSHA1bytes - x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256) + x5tSHA256bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA256) if err != nil { return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding") } diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go similarity index 80% rename from vendor/github.com/go-jose/go-jose/v3/jws.go rename to vendor/github.com/go-jose/go-jose/v4/jws.go index e37007db..3a912301 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -23,7 +23,7 @@ import ( "fmt" "strings" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing. @@ -75,22 +75,41 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in compact or JWS JSON Serialization format. -func ParseSigned(signature string) (*JSONWebSignature, error) { +// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7 +func ParseSigned( + signature string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { signature = stripWhitespace(signature) if strings.HasPrefix(signature, "{") { - return parseSignedFull(signature) + return ParseSignedJSON(signature, signatureAlgorithms) } - return parseSignedCompact(signature, nil) + return parseSignedCompact(signature, nil, signatureAlgorithms) +} + +// ParseSignedCompact parses a message in JWS Compact Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 +func ParseSignedCompact( + signature string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { + return parseSignedCompact(signature, nil, signatureAlgorithms) } // ParseDetached parses a signed message in compact serialization format with detached payload. -func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) { +func ParseDetached( + signature string, + payload []byte, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { if payload == nil { return nil, errors.New("go-jose/go-jose: nil payload") } - return parseSignedCompact(stripWhitespace(signature), payload) + return parseSignedCompact(stripWhitespace(signature), payload, signatureAlgorithms) } // Get a header value @@ -137,19 +156,36 @@ func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature return authData.Bytes(), nil } -// parseSignedFull parses a message in full format. -func parseSignedFull(input string) (*JSONWebSignature, error) { +// ParseSignedJSON parses a message in JWS JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7.2 +func ParseSignedJSON( + input string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { var parsed rawJSONWebSignature err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err } - return parsed.sanitized() + return parsed.sanitized(signatureAlgorithms) +} + +func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureAlgorithm) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false } // sanitized produces a cleaned-up JWS object from the raw JSON. -func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { +func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { + if len(signatureAlgorithms) == 0 { + return nil, errors.New("go-jose/go-jose: no signature algorithms specified") + } if parsed.Payload == nil { return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message") } @@ -198,6 +234,12 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { return nil, err } + alg := SignatureAlgorithm(signature.Header.Algorithm) + if !containsSignatureAlgorithm(signatureAlgorithms, alg) { + return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", + alg, signatureAlgorithms) + } + if signature.header != nil { signature.Unprotected, err = signature.header.sanitized() if err != nil { @@ -241,6 +283,12 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { return nil, err } + alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) + if !containsSignatureAlgorithm(signatureAlgorithms, alg) { + return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", + alg, signatureAlgorithms) + } + if obj.Signatures[i].header != nil { obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized() if err != nil { @@ -274,7 +322,11 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { } // parseSignedCompact parses a message in compact format. -func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) { +func parseSignedCompact( + input string, + payload []byte, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { parts := strings.Split(input, ".") if len(parts) != 3 { return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") @@ -284,19 +336,19 @@ func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") } - rawProtected, err := base64URLDecode(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { return nil, err } if payload == nil { - payload, err = base64URLDecode(parts[1]) + payload, err = base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { return nil, err } } - signature, err := base64URLDecode(parts[2]) + signature, err := base64.RawURLEncoding.DecodeString(parts[2]) if err != nil { return nil, err } @@ -306,7 +358,7 @@ func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) Protected: newBuffer(rawProtected), Signature: newBuffer(signature), } - return raw.sanitized() + return raw.sanitized(signatureAlgorithms) } func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go similarity index 100% rename from vendor/github.com/go-jose/go-jose/v3/opaque.go rename to vendor/github.com/go-jose/go-jose/v4/opaque.go diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v4/shared.go similarity index 97% rename from vendor/github.com/go-jose/go-jose/v3/shared.go rename to vendor/github.com/go-jose/go-jose/v4/shared.go index 489a04e3..1ec33961 100644 --- a/vendor/github.com/go-jose/go-jose/v3/shared.go +++ b/vendor/github.com/go-jose/go-jose/v4/shared.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // KeyAlgorithm represents a key management algorithm. @@ -71,6 +71,12 @@ var ( // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a // nonce header parameter was included in an unprotected header object. ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header") + + // ErrMissingX5cHeader indicates that the JWT header is missing x5c headers. + ErrMissingX5cHeader = errors.New("go-jose/go-jose: no x5c header present in message") + + // ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found. + ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve") ) // Key management algorithms @@ -199,7 +205,7 @@ type Header struct { // not be validated with the given verify options. func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) { if len(h.certificates) == 0 { - return nil, errors.New("go-jose/go-jose: no x5c header present in message") + return nil, ErrMissingX5cHeader } leaf := h.certificates[0] @@ -501,7 +507,7 @@ func curveName(crv elliptic.Curve) (string, error) { case elliptic.P521(): return "P-521", nil default: - return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve") + return "", ErrUnsupportedEllipticCurve } } diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go similarity index 97% rename from vendor/github.com/go-jose/go-jose/v3/signing.go rename to vendor/github.com/go-jose/go-jose/v4/signing.go index 52f3d856..46c9a4d9 100644 --- a/vendor/github.com/go-jose/go-jose/v3/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -25,7 +25,7 @@ import ( "errors" "fmt" - "github.com/go-jose/go-jose/v3/json" + "github.com/go-jose/go-jose/v4/json" ) // NonceSource represents a source of random nonces to go into JWS objects @@ -49,6 +49,11 @@ type Signer interface { // - JSONWebKey // - []byte (an HMAC key) // - Any type that satisfies the OpaqueSigner interface +// +// If the key is an HMAC key, it must have at least as many bytes as the relevant hash output: +// - HS256: 32 bytes +// - HS384: 48 bytes +// - HS512: 64 bytes type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -353,8 +358,15 @@ func (ctx *genericSigner) Options() SignerOptions { // - *rsa.PublicKey // - *JSONWebKey // - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet // - []byte (an HMAC key) // - Any type that implements the OpaqueVerifier interface. +// +// If the key is an HMAC key, it must have at least as many bytes as the relevant hash output: +// - HS256: 32 bytes +// - HS384: 48 bytes +// - HS512: 64 bytes func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go similarity index 96% rename from vendor/github.com/go-jose/go-jose/v3/symmetric.go rename to vendor/github.com/go-jose/go-jose/v4/symmetric.go index 10d8e19f..a69103b0 100644 --- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -32,7 +32,7 @@ import ( "golang.org/x/crypto/pbkdf2" - josecipher "github.com/go-jose/go-jose/v3/cipher" + josecipher "github.com/go-jose/go-jose/v4/cipher" ) // RandReader is a cryptographically secure random number generator (stubbed out in tests). @@ -454,7 +454,7 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { mac, err := ctx.hmac(payload, alg) if err != nil { - return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac") + return Signature{}, err } return Signature{ @@ -486,12 +486,24 @@ func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureA func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) { var hash func() hash.Hash + // https://datatracker.ietf.org/doc/html/rfc7518#section-3.2 + // A key of the same size as the hash output (for instance, 256 bits for + // "HS256") or larger MUST be used switch alg { case HS256: + if len(ctx.key)*8 < 256 { + return nil, ErrInvalidKeySize + } hash = sha256.New case HS384: + if len(ctx.key)*8 < 384 { + return nil, ErrInvalidKeySize + } hash = sha512.New384 case HS512: + if len(ctx.key)*8 < 512 { + return nil, ErrInvalidKeySize + } hash = sha512.New default: return nil, ErrUnsupportedAlgorithm diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index c049c1ef..28f6967b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -17,6 +17,7 @@ package name import ( // nolint: depguard _ "crypto/sha256" // Recommended by go-digest. + "encoding/json" "strings" "github.com/opencontainers/go-digest" @@ -59,6 +60,25 @@ func (d Digest) String() string { return d.original } +// MarshalJSON formats the digest into a string for JSON serialization. +func (d Digest) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON parses a JSON string into a Digest. +func (d *Digest) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewDigest(s) + if err != nil { + return err + } + *d = n + return nil +} + // NewDigest returns a new Digest representing the given name. func NewDigest(name string, opts ...Option) (Digest, error) { // Split on "@" diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go index 1d7e2408..d5e7a872 100644 --- a/vendor/github.com/letsencrypt/boulder/core/challenges.go +++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go @@ -10,27 +10,23 @@ func newChallenge(challengeType AcmeChallenge, token string) Challenge { } } -// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// HTTPChallenge01 constructs a http-01 challenge. func HTTPChallenge01(token string) Challenge { return newChallenge(ChallengeTypeHTTP01, token) } -// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// DNSChallenge01 constructs a dns-01 challenge. func DNSChallenge01(token string) Challenge { return newChallenge(ChallengeTypeDNS01, token) } -// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// TLSALPNChallenge01 constructs a tls-alpn-01 challenge. func TLSALPNChallenge01(token string) Challenge { return newChallenge(ChallengeTypeTLSALPN01, token) } -// NewChallenge constructs a random challenge of the given kind. It returns an -// error if the challenge type is unrecognized. If token is empty a random token -// will be generated, otherwise the provided token is used. +// NewChallenge constructs a challenge of the given kind. It returns an +// error if the challenge type is unrecognized. func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) { switch kind { case ChallengeTypeHTTP01: diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go index 003329c3..59b55a3f 100644 --- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go +++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go @@ -7,7 +7,7 @@ import ( // PolicyAuthority defines the public interface for the Boulder PA // TODO(#5891): Move this interface to a more appropriate location. type PolicyAuthority interface { - WillingToIssueWildcards([]identifier.ACMEIdentifier) error + WillingToIssue([]string) error ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error) ChallengeTypeEnabled(AcmeChallenge) bool CheckAuthz(*Authorization) error diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index b52f0f5e..64df4a8d 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/go-jose/go-jose/v4" "golang.org/x/crypto/ocsp" - "gopkg.in/go-jose/go-jose.v2" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" @@ -119,7 +119,7 @@ type Registration struct { } // ValidationRecord represents a validation attempt against a specific URL/hostname -// and the IP addresses that were resolved and used +// and the IP addresses that were resolved and used. type ValidationRecord struct { // SimpleHTTP only URL string `json:"url,omitempty"` @@ -144,6 +144,17 @@ type ValidationRecord struct { // ... // } AddressesTried []net.IP `json:"addressesTried,omitempty"` + // ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the + // lookup for AddressUsed. During recursive A and AAAA lookups, a record may + // instead look like A:host:port or AAAA:host:port + ResolverAddrs []string `json:"resolverAddrs,omitempty"` + // UsedRSAKEX is a *temporary* addition to the validation record, so we can + // see how many servers that we reach out to during HTTP-01 and TLS-ALPN-01 + // validation are only willing to negotiate RSA key exchange mechanisms. The + // field is not included in the serialized json to avoid cluttering the + // database and log lines. + // TODO(#7321): Remove this when we have collected sufficient data. + UsedRSAKEX bool `json:"-"` } func looksLikeKeyAuthorization(str string) error { @@ -225,6 +236,8 @@ func (ch Challenge) RecordsSane() bool { switch ch.Type { case ChallengeTypeHTTP01: for _, rec := range ch.ValidationRecord { + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil || len(rec.AddressesResolved) == 0 { return false @@ -237,6 +250,8 @@ func (ch Challenge) RecordsSane() bool { if ch.ValidationRecord[0].URL != "" { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 { return false @@ -245,6 +260,8 @@ func (ch Challenge) RecordsSane() bool { if len(ch.ValidationRecord) > 1 { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" { return false } @@ -483,6 +500,12 @@ type SuggestedWindow struct { End time.Time `json:"end"` } +// IsWithin returns true if the given time is within the suggested window, +// inclusive of the start time and exclusive of the end time. +func (window SuggestedWindow) IsWithin(now time.Time) bool { + return !now.Before(window.Start) && now.Before(window.End) +} + // RenewalInfo is a type which is exposed to clients which query the renewalInfo // endpoint specified in draft-aaron-ari. type RenewalInfo struct { diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index d7fe0266..31f6d2fc 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -25,7 +25,9 @@ import ( "time" "unicode" - "gopkg.in/go-jose/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const Unspecified = "Unspecified" @@ -92,8 +94,7 @@ func Fingerprint256(data []byte) string { type Sha256Digest [sha256.Size]byte -// KeyDigest produces a Base64-encoded SHA256 digest of a -// provided public key. +// KeyDigest produces the SHA256 digest of a provided public key. func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: @@ -212,10 +213,83 @@ func IsAnyNilOrZero(vals ...interface{}) bool { switch v := val.(type) { case nil: return true + case bool: + if !v { + return true + } + case string: + if v == "" { + return true + } + case []string: + if len(v) == 0 { + return true + } + case byte: + // Byte is an alias for uint8 and will cover that case. + if v == 0 { + return true + } case []byte: if len(v) == 0 { return true } + case int: + if v == 0 { + return true + } + case int8: + if v == 0 { + return true + } + case int16: + if v == 0 { + return true + } + case int32: + if v == 0 { + return true + } + case int64: + if v == 0 { + return true + } + case uint: + if v == 0 { + return true + } + case uint16: + if v == 0 { + return true + } + case uint32: + if v == 0 { + return true + } + case uint64: + if v == 0 { + return true + } + case float32: + if v == 0 { + return true + } + case float64: + if v == 0 { + return true + } + case time.Time: + if v.IsZero() { + return true + } + case *timestamppb.Timestamp: + if v == nil || v.AsTime().IsZero() { + return true + } + case *durationpb.Duration: + if v == nil || v.AsDuration() == time.Duration(0) { + return true + } default: if reflect.ValueOf(v).IsZero() { return true diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 2cc76623..ec6c272a 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -20,6 +20,8 @@ const ( BadRevocationReasonProblem = ProblemType("badRevocationReason") BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") CAAProblem = ProblemType("caa") + // ConflictProblem is a problem type that is not defined in RFC8555. + ConflictProblem = ProblemType("conflict") ConnectionProblem = ProblemType("connection") DNSProblem = ProblemType("dns") InvalidContactProblem = ProblemType("invalidContact") @@ -290,11 +292,11 @@ func Canceled(detail string, a ...any) *ProblemDetails { } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict +// Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict // status code. func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConflictProblem, Detail: detail, HTTPStatus: http.StatusConflict, } diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go index e78e7261..58f13c26 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -51,7 +51,7 @@ func mountedByOpenat2(path string) (bool, error) { Resolve: unix.RESOLVE_NO_XDEV, }) _ = unix.Close(dirfd) - switch err { //nolint:errorlint // unix errors are bare + switch err { case nil: // definitely not a mount _ = unix.Close(fd) return false, nil diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go index 984466d1..198c4936 100644 --- a/vendor/github.com/moby/sys/user/user.go +++ b/vendor/github.com/moby/sys/user/user.go @@ -197,7 +197,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { for { var line []byte line, isPrefix, err = rd.ReadLine() - if err != nil { // We should return no error if EOF is reached // without a match. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go index 6c79f899..72c9cd70 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go @@ -35,15 +35,31 @@ func (s *CpuGroup) Apply(path string, r *configs.Resources, pid int) error { } func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error { + var period string if r.CpuRtPeriod != 0 { - if err := cgroups.WriteFile(path, "cpu.rt_period_us", strconv.FormatUint(r.CpuRtPeriod, 10)); err != nil { - return err + period = strconv.FormatUint(r.CpuRtPeriod, 10) + if err := cgroups.WriteFile(path, "cpu.rt_period_us", period); err != nil { + // The values of cpu.rt_period_us and cpu.rt_runtime_us + // are inter-dependent and need to be set in a proper order. + // If the kernel rejects the new period value with EINVAL + // and the new runtime value is also being set, let's + // ignore the error for now and retry later. + if !errors.Is(err, unix.EINVAL) || r.CpuRtRuntime == 0 { + return err + } + } else { + period = "" } } if r.CpuRtRuntime != 0 { if err := cgroups.WriteFile(path, "cpu.rt_runtime_us", strconv.FormatInt(r.CpuRtRuntime, 10)); err != nil { return err } + if period != "" { + if err := cgroups.WriteFile(path, "cpu.rt_period_us", period); err != nil { + return err + } + } } return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 984466d1..198c4936 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -197,7 +197,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { for { var line []byte line, isPrefix, err = rd.ReadLine() - if err != nil { // We should return no error if EOF is reached // without a match. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md deleted file mode 100644 index 7e78dce0..00000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md +++ /dev/null @@ -1,48 +0,0 @@ -## pwalk: parallel implementation of filepath.Walk - -This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk) -which may speed it up by calling multiple callback functions (WalkFunc) in parallel, -utilizing goroutines. - -By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. -This can be changed by using WalkN function which has the additional -parameter, specifying the number of goroutines (concurrency). - -### pwalk vs pwalkdir - -This package is deprecated in favor of -[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), -which is faster, but requires at least Go 1.16. - -### Caveats - -Please note the following limitations of this code: - -* Unlike filepath.Walk, the order of calls is non-deterministic; - -* Only primitive error handling is supported: - - * filepath.SkipDir is not supported; - - * no errors are ever passed to WalkFunc; - - * once any error is returned from any WalkFunc instance, no more new calls - to WalkFunc are made, and the error is returned to the caller of Walk; - - * if more than one walkFunc instance will return an error, only one - of such errors will be propagated and returned by Walk, others - will be silently discarded. - -### Documentation - -For the official documentation, see -https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc - -### Benchmarks - -For a WalkFunc that consists solely of the return statement, this -implementation is about 10% slower than the standard library's -filepath.Walk. - -Otherwise (if a WalkFunc is doing something) this is usually faster, -except when the WalkN(..., 1) is used. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go deleted file mode 100644 index a28b4c4b..00000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ /dev/null @@ -1,123 +0,0 @@ -package pwalk - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "sync" -) - -// WalkFunc is the type of the function called by Walk to visit each -// file or directory. It is an alias for [filepath.WalkFunc]. -// -// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir] and [fs.WalkDirFunc]. -type WalkFunc = filepath.WalkFunc - -// Walk is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// twice the runtime.NumCPU() walkFn will be called at any one time. -// If you want to change the maximum, use WalkN instead. -// -// The order of calls is non-deterministic. -// -// Note that this implementation only supports primitive error handling: -// -// - no errors are ever passed to walkFn; -// -// - once a walkFn returns any error, all further processing stops -// and the error is returned to the caller of Walk; -// -// - filepath.SkipDir is not supported; -// -// - if more than one walkFn instance will return an error, only one -// of such errors will be propagated and returned by Walk, others -// will be silently discarded. -// -// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.Walk] -func Walk(root string, walkFn WalkFunc) error { - return WalkN(root, walkFn, runtime.NumCPU()*2) -} - -// WalkN is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// num walkFn will be called at any one time. -// -// Please see Walk documentation for caveats of using this function. -// -// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.WalkN] -func WalkN(root string, walkFn WalkFunc, num int) error { - // make sure limit is sensible - if num < 1 { - return fmt.Errorf("walk(%q): num must be > 0", root) - } - - files := make(chan *walkArgs, 2*num) - errCh := make(chan error, 1) // get the first error, ignore others - - // Start walking a tree asap - var ( - err error - wg sync.WaitGroup - - rootLen = len(root) - rootEntry *walkArgs - ) - wg.Add(1) - go func() { - err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { - if err != nil { - close(files) - return err - } - if len(p) == rootLen { - // Root entry is processed separately below. - rootEntry = &walkArgs{path: p, info: &info} - return nil - } - // add a file to the queue unless a callback sent an error - select { - case e := <-errCh: - close(files) - return e - default: - files <- &walkArgs{path: p, info: &info} - return nil - } - }) - if err == nil { - close(files) - } - wg.Done() - }() - - wg.Add(num) - for i := 0; i < num; i++ { - go func() { - for file := range files { - if e := walkFn(file.path, *file.info, nil); e != nil { - select { - case errCh <- e: // sent ok - default: // buffer full - } - } - } - wg.Done() - }() - } - - wg.Wait() - - if err == nil { - err = walkFn(rootEntry.path, *rootEntry.info, nil) - } - - return err -} - -// walkArgs holds the arguments that were passed to the Walk or WalkN -// functions. -type walkArgs struct { - info *os.FileInfo - path string -} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24..2c8f4808 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e05..25c30e3c 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a..ed1e70ce 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a531518..f4d198cb 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce..e0b0947b 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -193,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c13..54748fc6 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -154,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -706,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -1187,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1236,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1427,6 +1430,10 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1443,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1460,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b20..c0c08b05 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -211,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -291,7 +298,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -899,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f..560612fd 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 55195193..a830b7bc 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/add.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/add.go new file mode 100644 index 00000000..db1bd6b4 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/add.go @@ -0,0 +1,81 @@ +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "fmt" + "time" +) + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{} + + if !f.isDeterministic() { + ao.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index be3f6dc1..0eb1e1d1 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2024, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -8,7 +8,6 @@ package sif import ( - "encoding" "encoding/binary" "errors" "fmt" @@ -56,6 +55,20 @@ func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorIn return nil } +// calculatedDataSize calculates the size of the data section based on the in-use descriptors. +func (f *FileImage) calculatedDataSize() int64 { + dataEnd := f.DataOffset() + + f.WithDescriptors(func(d Descriptor) bool { + if objectEnd := d.Offset() + d.Size(); dataEnd < objectEnd { + dataEnd = objectEnd + } + return false + }) + + return dataEnd - f.DataOffset() +} + var ( errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") errPrimaryPartition = errors.New("image already contains a primary partition") @@ -81,6 +94,8 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro d := &f.rds[i] d.ID = uint32(i) + 1 + f.h.DataSize = f.calculatedDataSize() + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { return err } @@ -321,378 +336,3 @@ func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { f.closeOnUnload = true return f, nil } - -// addOpts accumulates object add options. -type addOpts struct { - t time.Time -} - -// AddOpt are used to specify object add options. -type AddOpt func(*addOpts) error - -// OptAddDeterministic sets header/descriptor fields to values that support deterministic -// modification of images. -func OptAddDeterministic() AddOpt { - return func(ao *addOpts) error { - ao.t = time.Time{} - return nil - } -} - -// OptAddWithTime specifies t as the image modification time. -func OptAddWithTime(t time.Time) AddOpt { - return func(ao *addOpts) error { - ao.t = t - return nil - } -} - -// AddObject adds a new data object and its descriptor into the specified SIF file. -// -// By default, the image modification time is set to the current time for non-deterministic images, -// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime. -func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { - ao := addOpts{} - - if !f.isDeterministic() { - ao.t = time.Now() - } - - for _, opt := range opts { - if err := opt(&ao); err != nil { - return fmt.Errorf("%w", err) - } - } - - // Find an unused descriptor. - i := 0 - for _, rd := range f.rds { - if !rd.Used { - break - } - i++ - } - - if err := f.writeDataObject(i, di, ao.t); err != nil { - return fmt.Errorf("%w", err) - } - - if err := f.writeDescriptors(); err != nil { - return fmt.Errorf("%w", err) - } - - f.h.ModifiedAt = ao.t.Unix() - - if err := f.writeHeader(); err != nil { - return fmt.Errorf("%w", err) - } - - return nil -} - -// isLast return true if the data object associated with d is the last in f. -func (f *FileImage) isLast(d *rawDescriptor) bool { - isLast := true - - end := d.Offset + d.Size - f.WithDescriptors(func(d Descriptor) bool { - isLast = d.Offset()+d.Size() <= end - return !isLast - }) - - return isLast -} - -// zeroReader is an io.Reader that returns a stream of zero-bytes. -type zeroReader struct{} - -func (zeroReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = 0 - } - return len(b), nil -} - -// zero overwrites the data object described by d with a stream of zero bytes. -func (f *FileImage) zero(d *rawDescriptor) error { - if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil { - return err - } - - _, err := io.CopyN(f.rw, zeroReader{}, d.Size) - return err -} - -// truncateAt truncates f at the start of the padded data object described by d. -func (f *FileImage) truncateAt(d *rawDescriptor) error { - start := d.Offset + d.Size - d.SizeWithPadding - - return f.rw.Truncate(start) -} - -// deleteOpts accumulates object deletion options. -type deleteOpts struct { - zero bool - compact bool - t time.Time -} - -// DeleteOpt are used to specify object deletion options. -type DeleteOpt func(*deleteOpts) error - -// OptDeleteZero specifies whether the deleted object should be zeroed. -func OptDeleteZero(b bool) DeleteOpt { - return func(do *deleteOpts) error { - do.zero = b - return nil - } -} - -// OptDeleteCompact specifies whether the image should be compacted following object deletion. -func OptDeleteCompact(b bool) DeleteOpt { - return func(do *deleteOpts) error { - do.compact = b - return nil - } -} - -// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic -// modification of images. -func OptDeleteDeterministic() DeleteOpt { - return func(do *deleteOpts) error { - do.t = time.Time{} - return nil - } -} - -// OptDeleteWithTime specifies t as the image modification time. -func OptDeleteWithTime(t time.Time) DeleteOpt { - return func(do *deleteOpts) error { - do.t = t - return nil - } -} - -var errCompactNotImplemented = errors.New("compact not implemented for non-last object") - -// DeleteObject deletes the data object with id, according to opts. -// -// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following -// object deletion, use OptDeleteCompact. -// -// By default, the image modification time is set to the current time for non-deterministic images, -// and unset otherwise. To override this, consider using OptDeleteDeterministic or -// OptDeleteWithTime. -func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { - do := deleteOpts{} - - if !f.isDeterministic() { - do.t = time.Now() - } - - for _, opt := range opts { - if err := opt(&do); err != nil { - return fmt.Errorf("%w", err) - } - } - - d, err := f.getDescriptor(WithID(id)) - if err != nil { - return fmt.Errorf("%w", err) - } - - if do.compact && !f.isLast(d) { - return fmt.Errorf("%w", errCompactNotImplemented) - } - - if do.zero { - if err := f.zero(d); err != nil { - return fmt.Errorf("%w", err) - } - } - - if do.compact { - if err := f.truncateAt(d); err != nil { - return fmt.Errorf("%w", err) - } - - f.h.DataSize -= d.SizeWithPadding - } - - f.h.DescriptorsFree++ - f.h.ModifiedAt = do.t.Unix() - - // If we remove the primary partition, set the global header Arch field to HdrArchUnknown - // to indicate that the SIF file doesn't include a primary partition and no dependency - // on any architecture exists. - if d.isPartitionOfType(PartPrimSys) { - f.h.Arch = hdrArchUnknown - } - - // Reset rawDescripter with empty struct - *d = rawDescriptor{} - - if err := f.writeDescriptors(); err != nil { - return fmt.Errorf("%w", err) - } - - if err := f.writeHeader(); err != nil { - return fmt.Errorf("%w", err) - } - - return nil -} - -// setOpts accumulates object set options. -type setOpts struct { - t time.Time -} - -// SetOpt are used to specify object set options. -type SetOpt func(*setOpts) error - -// OptSetDeterministic sets header/descriptor fields to values that support deterministic -// modification of images. -func OptSetDeterministic() SetOpt { - return func(so *setOpts) error { - so.t = time.Time{} - return nil - } -} - -// OptSetWithTime specifies t as the image/object modification time. -func OptSetWithTime(t time.Time) SetOpt { - return func(so *setOpts) error { - so.t = t - return nil - } -} - -var ( - errNotPartition = errors.New("data object not a partition") - errNotSystem = errors.New("data object not a system partition") -) - -// SetPrimPart sets the specified system partition to be the primary one. -// -// By default, the image/object modification times are set to the current time for -// non-deterministic images, and unset otherwise. To override this, consider using -// OptSetDeterministic or OptSetWithTime. -func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { - so := setOpts{} - - if !f.isDeterministic() { - so.t = time.Now() - } - - for _, opt := range opts { - if err := opt(&so); err != nil { - return fmt.Errorf("%w", err) - } - } - - descr, err := f.getDescriptor(WithID(id)) - if err != nil { - return fmt.Errorf("%w", err) - } - - if descr.DataType != DataPartition { - return fmt.Errorf("%w", errNotPartition) - } - - var p partition - if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { - return fmt.Errorf("%w", err) - } - - // if already primary system partition, nothing to do - if p.Parttype == PartPrimSys { - return nil - } - - if p.Parttype != PartSystem { - return fmt.Errorf("%w", errNotSystem) - } - - // If there is currently a primary system partition, update it. - if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { - var p partition - if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { - return fmt.Errorf("%w", err) - } - - p.Parttype = PartSystem - - if err := d.setExtra(p); err != nil { - return fmt.Errorf("%w", err) - } - - d.ModifiedAt = so.t.Unix() - } else if !errors.Is(err, ErrObjectNotFound) { - return fmt.Errorf("%w", err) - } - - // Update the descriptor of the new primary system partition. - p.Parttype = PartPrimSys - - if err := descr.setExtra(p); err != nil { - return fmt.Errorf("%w", err) - } - - descr.ModifiedAt = so.t.Unix() - - if err := f.writeDescriptors(); err != nil { - return fmt.Errorf("%w", err) - } - - f.h.Arch = p.Arch - f.h.ModifiedAt = so.t.Unix() - - if err := f.writeHeader(); err != nil { - return fmt.Errorf("%w", err) - } - - return nil -} - -// SetMetadata sets the metadata of the data object with id to md, according to opts. -// -// By default, the image/object modification times are set to the current time for -// non-deterministic images, and unset otherwise. To override this, consider using -// OptSetDeterministic or OptSetWithTime. -func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error { - so := setOpts{} - - if !f.isDeterministic() { - so.t = time.Now() - } - - for _, opt := range opts { - if err := opt(&so); err != nil { - return fmt.Errorf("%w", err) - } - } - - rd, err := f.getDescriptor(WithID(id)) - if err != nil { - return fmt.Errorf("%w", err) - } - - if err := rd.setExtra(md); err != nil { - return fmt.Errorf("%w", err) - } - - rd.ModifiedAt = so.t.Unix() - - if err := f.writeDescriptors(); err != nil { - return fmt.Errorf("%w", err) - } - - f.h.ModifiedAt = so.t.Unix() - - if err := f.writeHeader(); err != nil { - return fmt.Errorf("%w", err) - } - - return nil -} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/delete.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/delete.go new file mode 100644 index 00000000..23d9e7d9 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/delete.go @@ -0,0 +1,163 @@ +// Copyright (c) 2018-2024, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "fmt" + "io" + "time" +) + +// zeroReader is an io.Reader that returns a stream of zero-bytes. +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + clear(b) + return len(b), nil +} + +// zero overwrites the data object described by d with a stream of zero bytes. +func (f *FileImage) zero(d *rawDescriptor) error { + if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil { + return err + } + + _, err := io.CopyN(f.rw, zeroReader{}, d.Size) + return err +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +// DeleteObject deletes the data object with id, according to opts. If no matching descriptor is +// found, an error wrapping ErrObjectNotFound is returned. +// +// To zero the data region of the deleted object, use OptDeleteZero. To remove unused space at the +// end of the FileImage following object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptDeleteDeterministic or +// OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + return f.DeleteObjects(WithID(id), opts...) +} + +// DeleteObjects deletes the data objects selected by fn, according to opts. If no descriptors are +// selected by fns, an error wrapping ErrObjectNotFound is returned. +// +// To zero the data region of the deleted object, use OptDeleteZero. To remove unused space at the +// end of the FileImage following object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptDeleteDeterministic or +// OptDeleteWithTime. +func (f *FileImage) DeleteObjects(fn DescriptorSelectorFunc, opts ...DeleteOpt) error { + do := deleteOpts{} + + if !f.isDeterministic() { + do.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + var selected bool + + if err := f.withDescriptors(fn, func(d *rawDescriptor) error { + selected = true + + if do.zero { + if err := f.zero(d); err != nil { + return fmt.Errorf("%w", err) + } + } + + f.h.DescriptorsFree++ + + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if d.isPartitionOfType(PartPrimSys) { + f.h.Arch = hdrArchUnknown + } + + // Reset rawDescripter with empty struct + *d = rawDescriptor{} + + return nil + }); err != nil { + return fmt.Errorf("%w", err) + } + + if !selected { + return fmt.Errorf("%w", ErrObjectNotFound) + } + + f.h.ModifiedAt = do.t.Unix() + + if do.compact { + f.h.DataSize = f.calculatedDataSize() + + if err := f.rw.Truncate(f.h.DataOffset + f.h.DataSize); err != nil { + return fmt.Errorf("%w", err) + } + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go index 3b7a4776..bb191aee 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2024, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -92,7 +92,9 @@ func newOCIBlobDigest() *ociBlob { // MarshalBinary encodes ob into binary format. func (ob *ociBlob) MarshalBinary() ([]byte, error) { - ob.digest.Hex = hex.EncodeToString(ob.hasher.Sum(nil)) + if ob.digest.Hex == "" { + ob.digest.Hex = hex.EncodeToString(ob.hasher.Sum(nil)) + } return ob.digest.MarshalText() } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go index ee7892f7..fd29a409 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2021-2024, Sylabs Inc. All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the // LICENSE file distributed with the sources of this project regarding your // rights to use or distribute this software. @@ -184,10 +184,16 @@ func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { } } +var errNilSelectFunc = errors.New("descriptor selector func must not be nil") + // withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns // true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is // returned to the caller. func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + if selectFn == nil { + return errNilSelectFunc + } + for i, d := range f.rds { if !d.Used { continue diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/set.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/set.go new file mode 100644 index 00000000..0750bd22 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/set.go @@ -0,0 +1,220 @@ +// Copyright (c) 2018-2024, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding" + "errors" + "fmt" + "time" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + var p partition + if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if p.Parttype == PartPrimSys { + return nil + } + + if p.Parttype != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + // If there is currently a primary system partition, update it. + if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { + var p partition + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { + return fmt.Errorf("%w", err) + } + + p.Parttype = PartSystem + + if err := d.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + d.ModifiedAt = so.t.Unix() + } else if !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + // Update the descriptor of the new primary system partition. + p.Parttype = PartPrimSys + + if err := descr.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + descr.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.Arch = p.Arch + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// SetMetadata sets the metadata of the data object with id to md, according to opts. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error { + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + rd, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if err := rd.setExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + + rd.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// SetOCIBlobDigest updates the digest of the OCI blob object with id to h, according to opts. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetOCIBlobDigest(id uint32, h v1.Hash, opts ...SetOpt) error { + rd, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if got := rd.DataType; got != DataOCIRootIndex && got != DataOCIBlob { + return &unexpectedDataTypeError{got, []DataType{DataOCIRootIndex, DataOCIBlob}} + } + + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + md := &ociBlob{ + digest: h, + } + if err := rd.setExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + + rd.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index 5d7837ad..3860d2f6 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -233,7 +233,7 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { var tip component var refilling, filling, padding []byte var fillCount int - curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) + curWidth := int(internal.PercentageRound(stat.Total, stat.Current, int64(width))) if curWidth != 0 { if !stat.Completed || s.tipOnComplete { @@ -241,20 +241,19 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { s.tip.count++ fillCount += tip.width } - if stat.Refill != 0 { - refWidth := int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) + switch refWidth := 0; { + case stat.Refill != 0: + refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, int64(width))) curWidth -= refWidth refWidth += curWidth + fallthrough + default: for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { filling = append(filling, s.components[iFiller].bytes...) } for w := s.components[iRefiller].width; refWidth-fillCount >= w; fillCount += w { refilling = append(refilling, s.components[iRefiller].bytes...) } - } else { - for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { - filling = append(filling, s.components[iFiller].bytes...) - } } } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index c4cb2a14..cbe657f1 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -81,15 +81,15 @@ func (d *movingAverageETA) Decor(s Statistics) (string, int) { func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { if n <= 0 { d.zDur += dur - } else { - durPerItem := float64(d.zDur+dur) / float64(n) - if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { - d.zDur += dur - return - } - d.zDur = 0 - d.average.Add(durPerItem) + return + } + durPerItem := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { + d.zDur += dur + return } + d.zDur = 0 + d.average.Add(durPerItem) } // AverageETA decorator. It's wrapper of NewAverageETA. diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go index 9709c196..298fc1a1 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -61,7 +61,7 @@ func NewPercentage(format string, wcc ...WC) Decorator { format = "% d" } f := func(s Statistics) string { - p := internal.Percentage(s.Total, s.Current, 100) + p := internal.PercentageRound(s.Total, s.Current, 100) return fmt.Sprintf(format, percentageType(p)) } return Any(f, wcc...) diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index a90a8edd..50415fd9 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -96,15 +96,15 @@ func (d *movingAverageSpeed) Decor(_ Statistics) (string, int) { func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { if n <= 0 { d.zDur += dur - } else { - durPerByte := float64(d.zDur+dur) / float64(n) - if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { - d.zDur += dur - return - } - d.zDur = 0 - d.average.Add(durPerByte) + return + } + durPerByte := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { + d.zDur += dur + return } + d.zDur = 0 + d.average.Add(durPerByte) } // AverageSpeed decorator with dynamic unit measure adjustment. It's diff --git a/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go index 4bc36f5b..988227d3 100644 --- a/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go @@ -3,17 +3,20 @@ package internal import "math" // Percentage is a helper function, to calculate percentage. -func Percentage(total, current int64, width uint) float64 { - if total <= 0 { +func Percentage(total, current, width uint) float64 { + if total == 0 { return 0 } if current >= total { return float64(width) } - return float64(int64(width)*current) / float64(total) + return float64(width*current) / float64(total) } // PercentageRound same as Percentage but with math.Round. -func PercentageRound(total, current int64, width uint) float64 { - return math.Round(Percentage(total, current, width)) +func PercentageRound(total, current, width int64) float64 { + if total < 0 || current < 0 || width < 0 { + return 0 + } + return math.Round(Percentage(uint(total), uint(current), uint(width))) } diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 213bf204..08989568 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" +package blowfish // The code is a port of Bruce Schneier's C implementation. // See https://www.schneier.com/blowfish.html. diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eec..016e9021 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 00f963ea..21ca3b2e 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -6,9 +6,11 @@ // performs scalar multiplication on the elliptic curve known as Curve25519. // See RFC 7748. // -// Starting in Go 1.20, this package is a wrapper for the X25519 implementation +// This package is a wrapper for the X25519 implementation // in the crypto/ecdh package. -package curve25519 // import "golang.org/x/crypto/curve25519" +package curve25519 + +import "crypto/ecdh" // ScalarMult sets dst to the product scalar * point. // @@ -16,7 +18,13 @@ package curve25519 // import "golang.org/x/crypto/curve25519" // zeroes, irrespective of the scalar. Instead, use the X25519 function, which // will return an error. func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } } // ScalarBaseMult sets dst to the product scalar * base where base is the @@ -25,7 +33,12 @@ func ScalarMult(dst, scalar, point *[32]byte) { // It is recommended to use the X25519 function with Basepoint instead, as // copying into fixed size arrays can lead to unexpected bugs. func ScalarBaseMult(dst, scalar *[32]byte) { - scalarBaseMult(dst, scalar) + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) } const ( @@ -57,3 +70,21 @@ func X25519(scalar, point []byte) ([]byte, error) { var dst [32]byte return x25519(&dst, scalar, point) } + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go deleted file mode 100644 index ba647e8d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package curve25519 - -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - -func scalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -func scalarBaseMult(dst, scalar *[32]byte) { - checkBasepoint() - scalarMult(dst, scalar, &basePoint) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - scalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - scalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go deleted file mode 100644 index 627df497..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package curve25519 - -import "crypto/ecdh" - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - curve := ecdh.X25519() - pub, err := curve.NewPublicKey(point) - if err != nil { - return nil, err - } - priv, err := curve.NewPrivateKey(scalar) - if err != nil { - return nil, err - } - out, err := priv.ECDH(pub) - if err != nil { - return nil, err - } - copy(dst[:], out) - return dst[:], nil -} - -func scalarMult(dst, scalar, point *[32]byte) { - if _, err := x25519(dst, scalar[:], point[:]); err != nil { - // The only error condition for x25519 when the inputs are 32 bytes long - // is if the output would have been the all-zero value. - for i := range dst { - dst[i] = 0 - } - } -} - -func scalarBaseMult(dst, scalar *[32]byte) { - curve := ecdh.X25519() - priv, err := curve.NewPrivateKey(scalar[:]) - if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") - } - copy(dst[:], priv.PublicKey().Bytes()) -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2âµÂ¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index 70c54169..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,15 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 60817acc..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,378 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index 9da280d1..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index 075fe9b9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 3126a434..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index fc029ac1..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 2671217d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²âµâµ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²âµâµ becomes 19 * r5, - // r6 * 2³â°â¶ becomes 19 * r6 * 2âµÂ¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2âµÂ¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2âµÂ²Ã—2âµÂ² + 19×(2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ²) - // r0 < (1 + 19 × 4) × 2âµÂ² × 2âµÂ² - // r0 < 2ⷠ× 2âµÂ² × 2âµÂ² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2âµÂ² × 2âµÂ² - // r4 < 2¹â°â· - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2âµÂ¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²âµâµ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519†function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seedâ€. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go index f3c3242a..1fe600ad 100644 --- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -32,7 +32,7 @@ chunk size. This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. */ -package secretbox // import "golang.org/x/crypto/nacl/secretbox" +package secretbox import ( "golang.org/x/crypto/internal/alias" diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index bf225953..e6c645e7 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -5,7 +5,7 @@ // Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses // are signed messages attesting to the validity of a certificate for a small // period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" +package ocsp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index 8907183e..e664d127 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -10,14 +10,15 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" +package armor import ( "bufio" "bytes" "encoding/base64" - "golang.org/x/crypto/openpgp/errors" "io" + + "golang.org/x/crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 743b35a1..f922bdbc 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -16,7 +16,7 @@ // https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has // compatibility and security issues (see https://eprint.iacr.org/2021/923). // Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" +package elgamal import ( "crypto/rand" diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go index 1d7a0ea0..a3287494 100644 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" +package errors import ( "strconv" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 0a19794a..a84a1a21 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" +package packet import ( "bufio" diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go index 48a89314..cff3db91 100644 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" +package openpgp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index f53244a1..fa1a9190 100644 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" +package s2k import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e0..28cd99c7 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go index 3fd05b27..3685b344 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package salsa provides low-level access to functions in the Salsa family. -package salsa // import "golang.org/x/crypto/salsa20/salsa" +package salsa import "math/bits" diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index c971a99f..76fa40fb 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -5,7 +5,7 @@ // Package scrypt implements the scrypt key derivation function as defined in // Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard // Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" +package scrypt import ( "crypto/sha256" diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9..7e023090 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -59,4 +59,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 0d8043fd..c544b29e 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -16,39 +17,50 @@ import ( // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. func New224() hash.Hash { - if h := new224Asm(); h != nil { - return h - } - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return new224() } // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. func New256() hash.Hash { - if h := new256Asm(); h != nil { - return h - } - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return new256() } // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. func New384() hash.Hash { - if h := new384Asm(); h != nil { - return h - } - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return new384() } // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. func New512() hash.Hash { - if h := new512Asm(); h != nil { - return h - } + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +func new224Generic() *state { + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +func new256Generic() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +func new384Generic() *state { + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +func new512Generic() *state { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go deleted file mode 100644 index fe8c8479..00000000 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -import ( - "hash" -) - -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { return nil } - -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { return nil } - -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { return nil } - -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 00000000..9d85fb62 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd504..00000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index 4884d172..afedde5a 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -23,7 +23,6 @@ const ( type state struct { // Generic sponge components. a [25]uint64 // main state of the hash - buf []byte // points into storage rate int // the number of bytes of state to use // dsbyte contains the "domain separation" bits and the first bit of @@ -40,7 +39,8 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - storage storageBuf + i, n int // storage[i:n] is the buffer, i is only used while squeezing + storage [maxRate]byte // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes @@ -54,24 +54,18 @@ func (d *state) BlockSize() int { return d.rate } func (d *state) Size() int { return d.outputLen } // Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. +// the buffer indexes, and setting Sponge.state to absorbing. func (d *state) Reset() { // Zero the permutation's state. for i := range d.a { d.a[i] = 0 } d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] + d.i, d.n = 0, 0 } func (d *state) clone() *state { ret := *d - if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] - } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] - } - return &ret } @@ -82,43 +76,40 @@ func (d *state) permute() { case spongeAbsorbing: // If we're absorbing, we need to xor the input into the state // before applying the permutation. - xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] + xorIn(d, d.storage[:d.rate]) + d.n = 0 keccakF1600(&d.a) case spongeSqueezing: // If we're squeezing, we need to apply the permutation before // copying more output. keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.i = 0 + copyOut(d, d.storage[:d.rate]) } } // pads appends the domain separation bits in dsbyte, applies // the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute(dsbyte byte) { - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } +func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's // at least one byte of space in d.buf because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.buf = append(d.buf, dsbyte) - zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] - for i := zerosStart; i < d.rate; i++ { - d.buf[i] = 0 + d.storage[d.n] = d.dsbyte + d.n++ + for d.n < d.rate { + d.storage[d.n] = 0 + d.n++ } // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.buf[d.rate-1] ^= 0x80 + d.storage[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.n = d.rate + copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any @@ -127,28 +118,25 @@ func (d *state) Write(p []byte) (written int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } written = len(p) for len(p) > 0 { - if len(d.buf) == 0 && len(p) >= d.rate { + if d.n == 0 && len(p) >= d.rate { // The fast path; absorb a full "rate" bytes of input and apply the permutation. xorIn(d, p[:d.rate]) p = p[d.rate:] keccakF1600(&d.a) } else { // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - len(d.buf) + todo := d.rate - d.n if todo > len(p) { todo = len(p) } - d.buf = append(d.buf, p[:todo]...) + d.n += copy(d.storage[d.n:], p[:todo]) p = p[todo:] // If the sponge is full, apply the permutation. - if len(d.buf) == d.rate { + if d.n == d.rate { d.permute() } } @@ -161,19 +149,19 @@ func (d *state) Write(p []byte) (written int, err error) { func (d *state) Read(out []byte) (n int, err error) { // If we're still absorbing, pad and apply the permutation. if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) + d.padAndPermute() } n = len(out) // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.buf) - d.buf = d.buf[n:] + n := copy(out, d.storage[d.i:d.n]) + d.i += n out = out[n:] // Apply the permutation if we've squeezed the sponge dry. - if len(d.buf) == 0 { + if d.i == d.rate { d.permute() } } diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index b4fbbf86..00d8034a 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -248,56 +248,56 @@ func (s *asmState) Clone() ShakeHash { return s.clone() } -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_224) } - return nil + return new224Generic() } -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_256) } - return nil + return new256Generic() } -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_384) } - return nil + return new384Generic() } -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_512) } - return nil + return new512Generic() } -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_128) } - return nil + return newShake128Generic() } -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_256) } - return nil + return newShake256Generic() } diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index bb699840..1ea9275b 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -115,19 +115,21 @@ func (c *state) Clone() ShakeHash { // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. func NewShake128() ShakeHash { - if h := newShake128Asm(); h != nil { - return h - } - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return newShake128() } // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { - if h := newShake256Asm(); h != nil { - return h - } + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} } diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go deleted file mode 100644 index 8d31cf5b..00000000 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { - return nil -} - -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { - return nil -} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 00000000..4276ba4a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 7337cca8..6ada5c95 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,22 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !386 && !ppc64le) || purego - package sha3 -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} +import ( + "crypto/subtle" + "encoding/binary" + "unsafe" -var ( - xorIn = xorInGeneric - copyOut = copyOutGeneric - xorInUnaligned = xorInGeneric - copyOutUnaligned = copyOutGeneric + "golang.org/x/sys/cpu" ) -const xorImplementationUnaligned = "generic" +// xorIn xors the bytes in buf into the state. +func xorIn(d *state, buf []byte) { + if cpu.IsBigEndian { + for i := 0; len(buf) >= 8; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + subtle.XORBytes(ab[:], ab[:], buf) + } +} + +// copyOut copies uint64s to a byte buffer. +func copyOut(d *state, b []byte) { + if cpu.IsBigEndian { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + copy(b, ab[:]) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go deleted file mode 100644 index 8d947711..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import "encoding/binary" - -// xorInGeneric xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorInGeneric(d *state, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOutGeneric copies uint64s to a byte buffer. -func copyOutGeneric(d *state, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go deleted file mode 100644 index 870e2d16..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || 386 || ppc64le) && !purego - -package sha3 - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - -// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorInUnaligned(d *state, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - if n >= 72 { - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - } - if n >= 104 { - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - } - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } - if n >= 144 { - d.a[17] ^= bw[17] - } - if n >= 168 { - d.a[18] ^= bw[18] - d.a[19] ^= bw[19] - d.a[20] ^= bw[20] - } -} - -func copyOutUnaligned(d *state, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} - -var ( - xorIn = xorInUnaligned - copyOut = copyOutUnaligned -) - -const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index fecba8eb..106708d2 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -10,7 +10,7 @@ // References: // // [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" +package agent import ( "bytes" diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 9486c598..b9396101 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -71,6 +71,10 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { for auth := AuthMethod(new(noneAuth)); auth != nil; { ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { + // On disconnect, return error immediately + if _, ok := err.(*disconnectMsg); ok { + return err + } // We return the error later if there is no other method left to // try. ok = authFailure diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index edbe6334..f5d352fe 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -20,4 +20,4 @@ References: This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. */ -package ssh // import "golang.org/x/crypto/ssh" +package ssh diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index df4ebdad..7967665f 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -904,6 +904,10 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return errors.New("ssh: signature did not verify") } +func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey { + return &k.PublicKey +} + type skEd25519PublicKey struct { // application is a URL-like string, typically "ssh:" for SSH. // see openssh/PROTOCOL.u2f for details. @@ -1000,6 +1004,10 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return nil } +func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, // *ecdsa.PrivateKey or any other crypto.Signer and returns a // corresponding Signer instance. ECDSA keys must use P-256, P-384 or diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index e2ae4f89..3ca9e89e 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -462,6 +462,24 @@ func (p *PartialSuccessError) Error() string { // It is returned in ServerAuthError.Errors from NewServerConn. var ErrNoAuth = errors.New("ssh: no auth passed yet") +// BannerError is an error that can be returned by authentication handlers in +// ServerConfig to send a banner message to the client. +type BannerError struct { + Err error + Message string +} + +func (b *BannerError) Unwrap() error { + return b.Err +} + +func (b *BannerError) Error() string { + if b.Err == nil { + return b.Message + } + return b.Err.Error() +} + func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { sessionID := s.transport.getSessionID() var cache pubKeyCache @@ -734,6 +752,18 @@ userAuthLoop: config.AuthLogCallback(s, userAuthReq.Method, authErr) } + var bannerErr *BannerError + if errors.As(authErr, &bannerErr) { + if bannerErr.Message != "" { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + if authErr == nil { break userAuthLoop } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f7..f58bbc7b 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281..003e649f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index c5d08108..6c349f3e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -811,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -843,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -922,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1057,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1425,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1639,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1661,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2021,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2119,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2343,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2639,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2761,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2777,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2787,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2803,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd1..00000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 00000000..0b1c17b8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2fa49490..98a49c6b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1144,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1156,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1168,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1206,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1321,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1398,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1445,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1457,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1471,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1510,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1594,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1603,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1766,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2028,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2311,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2333,6 +2248,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2399,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2436,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3034,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3089,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3133,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3144,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c66..f6783339 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974..4ed2e488 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe52..3a5e776f 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a89..4cc7b005 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8..4e92e5aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a97..877a62b4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -502,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -657,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1339,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1627,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1653,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -2169,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2403,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2896,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2918,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3051,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3071,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3260,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c..e4bc0bd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca43600..689317af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1..14270508 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f24..07642c30 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284..923e08cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997..7d73dda6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1a..05770011 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746e..4740b834 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4605,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5703,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -6001,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8..97651b5b 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -893,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1086,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1157,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035dd..eba76101 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -401,6 +402,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -1223,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -3486,6 +3496,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1..0854d298 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `./scripts/vet.sh` to catch vet errors - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index c6672c0a..6a8a0778 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,6 +9,7 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [atollena](https://github.com/atollena), Datadog, Inc. - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f896092..be38384f 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -30,17 +30,20 @@ testdeps: GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ build \ clean \ + deps \ proto \ test \ + testsubmodule \ testrace \ + testdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d79560a2..f391744f 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -54,13 +54,14 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - if strings.ToLower(b.Name()) != b.Name() { + name := strings.ToLower(b.Name()) + if name != b.Name() { // TODO: Skip the use of strings.ToLower() to index the map after v1.59 // is released to switch to case sensitive balancer registry. Also, // remove this warning and update the docstrings for Register and Get. logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) } - m[strings.ToLower(b.Name())] = b + m[name] = b } // unregisterForTesting deletes the balancer with the given name from the @@ -232,8 +233,8 @@ type BuildOptions struct { // implementations which do not communicate with a remote load balancer // server can ignore this field. Authority string - // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID *channelz.Identifier + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index b5e30cff..af39b8a4 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -21,7 +21,6 @@ package grpc import ( "context" "fmt" - "strings" "sync" "google.golang.org/grpc/balancer" @@ -66,19 +65,20 @@ type ccBalancerWrapper struct { } // newCCBalancerWrapper creates a new balancer wrapper in idle state. The -// underlying balancer is not created until the switchTo() method is invoked. +// underlying balancer is not created until the updateClientConnState() method +// is invoked. func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ cc: cc, opts: balancer.BuildOptions{ - DialCreds: cc.dopts.copts.TransportCredentials, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -97,6 +97,11 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat if ctx.Err() != nil || ccb.balancer == nil { return } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } err := ccb.balancer.UpdateClientConnState(*ccs) if logger.V(2) && err != nil { logger.Infof("error from balancer.UpdateClientConnState: %v", err) @@ -120,54 +125,6 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { }) } -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - // close initiates async shutdown of the wrapper. cc.mu must be held when // calling this function. To determine the wrapper has finished shutting down, // the channel should block on ccb.serializer.Done() without cc.mu held. @@ -175,7 +132,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.mu.Lock() ccb.closed = true ccb.mu.Unlock() - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") ccb.serializer.Schedule(func(context.Context) { if ccb.balancer == nil { return @@ -212,7 +169,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ @@ -304,7 +261,7 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { } func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 856c75dd..1afb1e84 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f0b7f320..2359f94b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -67,7 +66,7 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel + // errConnIdling indicates the connection is being closed as the channel // is moving to an idle mode due to inactivity. errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default @@ -101,11 +100,6 @@ const ( defaultReadBufSize = 32 * 1024 ) -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - type defaultConfigSelector struct { sc *ServiceConfig } @@ -117,13 +111,23 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// newClient returns a new client in idle mode. -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), - czData: new(channelzData), } cc.retryThrottler.Store((*retryThrottler)(nil)) @@ -175,15 +179,15 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) // Determine the resolver to use. if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil, err } if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil, err } - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. @@ -191,39 +195,36 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) return cc, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. +// Dial calls DialContext(context.Background(), target, opts...). // -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. // -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. // -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc, err := newClient(target, opts...) + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) if err != nil { return nil, err } // We start the channel off in idle mode, but kick it out of idle now, - // instead of waiting for the first RPC. Other gRPC implementations do wait - // for the first RPC to kick the channel out of idle. But doing so would be - // a major behavior change for our users who are used to seeing the channel - // active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, i.e. by making newClient exported. - + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. defer func() { if err != nil { cc.Close() @@ -291,17 +292,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // addTraceEvent is a helper method to add a trace event on the channel. If the // channel is a nested one, the same event is also added on the parent channel. func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ + ted := &channelz.TraceEvent{ Desc: fmt.Sprintf("Channel %s", msg), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) } type idler ClientConn @@ -418,14 +419,15 @@ func (cc *ClientConn) validateTransportCredentials() error { } // channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) cc.addTraceEvent("created") } @@ -492,11 +494,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } // newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), } } @@ -510,7 +512,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID *channelz.Identifier + channelz *channelz.Channel pubSub *grpcsync.PubSub } @@ -527,9 +529,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) csm.pubSub.Publish(state) - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -583,12 +586,12 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -596,7 +599,6 @@ type ClientConn struct { csMgr *connectivityStateManager pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector - czData *channelzData retryThrottler atomic.Value // Updated from service config. // mu protects the following fields. @@ -690,6 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { + balancer.Register(pickfirstBuilder{}) cfg := parseServiceConfig("{}") if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) @@ -707,15 +710,15 @@ func init() { } } -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) } } @@ -733,7 +736,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) // May need to apply the initial service config in case the resolver // doesn't support service configs, or doesn't provide a service config // with the new addresses. - cc.maybeApplyDefaultServiceConfig(nil) + cc.maybeApplyDefaultServiceConfig() cc.balancerWrapper.resolverError(err) @@ -744,10 +747,10 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var ret error if cc.dopts.disableServiceConfig { - channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) - cc.maybeApplyDefaultServiceConfig(s.Addresses) + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() } else if s.ServiceConfig == nil { - cc.maybeApplyDefaultServiceConfig(s.Addresses) + cc.maybeApplyDefaultServiceConfig() // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { @@ -755,12 +758,12 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) configSelector := iresolver.GetConfigSelector(s) if configSelector != nil { if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { - channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") } } else { configSelector = &defaultConfigSelector{sc} } - cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + cc.applyServiceConfigAndBalancer(sc, configSelector) } else { ret = balancer.ErrBadResolverState if cc.sc == nil { @@ -775,7 +778,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var balCfg serviceconfig.LoadBalancingConfig if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig.cfg + balCfg = cc.sc.lbConfig } bw := cc.balancerWrapper cc.mu.Unlock() @@ -834,22 +837,20 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, - czData: new(channelzData), + channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) - var err error - ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") - if err != nil { - return nil, err - } - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), Severity: channelz.CtInfo, }, }) @@ -872,38 +873,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - // Target returns the target string of the ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) Target() string { return cc.target } +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.channelz.ChannelMetrics.CallsFailed.Add(1) } // connect starts creating a transport. @@ -946,10 +936,14 @@ func equalAddresses(a, b []resolver.Address) bool { // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - addrs = copyAddressesWithoutBalancerAttributes(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1067,7 +1061,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st }) } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. return @@ -1088,17 +1082,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } - - var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB - } - cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1174,7 +1157,7 @@ func (cc *ClientConn) Close() error { // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil } @@ -1195,6 +1178,10 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. @@ -1206,8 +1193,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.SubChannel } // Note: this requires a lock on ac.mu. @@ -1219,10 +1205,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) close(ac.stateChan) ac.stateChan = make(chan struct{}) ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } ac.acbw.updateState(s, lastErr) } @@ -1320,6 +1307,7 @@ func (ac *addrConn) resetTransport() { func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) if ctx.Err() != nil { return errConnClosing } @@ -1335,7 +1323,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c } ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { @@ -1388,7 +1376,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - copts.ChannelzParentID = ac.channelzID + copts.ChannelzParent = ac.channelz newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { @@ -1397,7 +1385,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, } // newTr is either nil, or closed. hcancel() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1469,7 +1457,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") return } @@ -1499,9 +1487,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) } } }() @@ -1566,18 +1554,18 @@ func (ac *addrConn) tearDown(err error) { ac.cancel() ac.curAddr = resolver.Address{} - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), Severity: channelz.CtInfo, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from // being deleted right away. - channelz.RemoveEntry(ac.channelzID) + channelz.RemoveEntry(ac.channelz.ID) ac.mu.Unlock() // We have to release the lock before the call to GracefulClose/Close here @@ -1604,39 +1592,6 @@ func (ac *addrConn) tearDown(err error) { } } -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - type retryThrottler struct { max float64 thresh float64 @@ -1674,12 +1629,17 @@ func (rt *retryThrottler) successfulRPC() { } } -type channelzChannel struct { - cc *ClientConn +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) } // ErrClientConnTimeout indicates that the ClientConn cannot establish the @@ -1721,14 +1681,14 @@ func (cc *ClientConn) connectionError() error { // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1740,17 +1700,22 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { // We are here because the user's dial target did not contain a scheme or // specified an unregistered scheme. We should fallback to the default // scheme, except when a custom dialer is specified in which case, we should - // always use passthrough scheme. - defScheme := resolver.GetDefaultScheme() - channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + + channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1790,7 +1755,7 @@ func encodeAuthority(authority string) string { return false case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters return false - case ':', '[', ']', '@': // Authority related delimeters + case ':', '[', ']', '@': // Authority related delimiters return false } // Everything else must be escaped. @@ -1873,6 +1838,6 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7..00000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1..0b42c302 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 5feac3aa..665e790b 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -28,9 +28,9 @@ import ( "fmt" "net" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ba242618..00273702 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -68,7 +68,7 @@ type dialOptions struct { binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption - channelzParentID *channelz.Identifier + channelzParent channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -79,6 +79,7 @@ type dialOptions struct { resolvers []resolver.Builder idleTimeout time.Duration recvBufferPool SharedBufferPool + defaultScheme string } // DialOption configures how we set up the connection. @@ -154,9 +155,7 @@ func WithSharedWriteBuffer(val bool) DialOption { } // WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. +// write on the wire. The default value for this buffer is 32KB. // // Zero or negative values will disable the write buffer such that each write // will be on underlying connection. Note: A Send call may not directly @@ -301,6 +300,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -315,10 +317,8 @@ func WithBlock() DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -388,8 +388,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -471,9 +471,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a // later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { @@ -555,9 +554,9 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id *channelz.Identifier) DialOption { +func WithChannelzParentID(c channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id + o.channelzParent = c }) } @@ -602,12 +601,22 @@ func WithDisableRetry() DialOption { }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all @@ -645,10 +654,11 @@ func defaultDialOptions() dialOptions { healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, + defaultScheme: "dns", } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // @@ -659,6 +669,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 00000000..13821a92 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 3c594e6e..73bb4c4e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { // process is not complete when this method returns. This method must be called // synchronously alongside the rest of the balancer.Balancer methods this // Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { gsb.mu.Lock() if gsb.closed { gsb.mu.Unlock() - return errBalancerClosed + return nil, errBalancerClosed } bw := &balancerWrapper{ - gsb: gsb, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { gsb.balancerCurrent = nil } gsb.mu.Unlock() - return balancer.ErrBadResolverState + return nil, balancer.ErrBadResolverState } // This write doesn't need to take gsb.mu because this field never gets read @@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { // bw.Balancer field will never be forwarded to until this SwitchTo() // function returns. bw.Balancer = newBalancer - return nil + return bw, nil } // Returns nil if the graceful switch balancer is closed. @@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { } // UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + if balToUpdate == nil { return errBalancerClosed } + // Perform this call without gsb.mu to prevent deadlocks if the child calls // back into the channel. The latest balancer can never be closed during a // call from the channel, even without gsb.mu held. @@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) return } // Perform this call without gsb.mu to prevent deadlocks if the child calls @@ -261,7 +294,8 @@ func (gsb *Balancer) Close() { // graceful switch logic. type balancerWrapper struct { balancer.Balancer - gsb *Balancer + gsb *Balancer + builder balancer.Builder lastState balancer.State subconns map[balancer.SubConn]bool // subconns created by this balancer diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index e8456a77..aa4505a8 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -65,7 +65,7 @@ type TruncatingMethodLogger struct { callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } // NewTruncatingMethodLogger returns a new truncating method logger. @@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: DefaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } @@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 00000000..d7e9e1d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 00000000..dfe18b08 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,402 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided in two two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index fc094f34..03e24e15 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -16,47 +16,32 @@ * */ -// Package channelz defines APIs for enabling channelz service, entry +// Package channelz defines internal APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. -// -// All APIs in this package are experimental. package channelz import ( - "errors" - "sort" - "sync" "sync/atomic" "time" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" ) -const ( - defaultMaxTraceEntry int32 = 30 -) - var ( // IDGen is the global channelz entity ID generator. It should not be used // outside this package except by tests. IDGen IDGenerator - db dbWrapper - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + db *channelMap = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. func TurnOn() { - if !IsOn() { - db.set(newChannelMap()) - IDGen.Reset() - atomic.StoreInt32(&curState, 1) - } + atomic.StoreInt32(&curState, 1) } func init() { @@ -70,49 +55,15 @@ func IsOn() bool { return atomic.LoadInt32(&curState) == 1 } -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a @@ -120,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to +// SocketMetrics, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) } -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) } -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) } -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) +func GetServer(id int64) *Server { + return db.getServer(id) } // RegisterChannel registers the given channel c in the channelz database with -// ref as its reference name, and adds it to the child list of its parent -// (identified by pid). pid == nil means no parent. +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. // // Returns a unique channelz identifier assigned to this channel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +func RegisterChannel(parent *Channel, target string) *Channel { id := IDGen.genID() - var parent int64 - isTopChannel := true - if pid != nil { - isTopChannel = false - parent = pid.Int() - } if !IsOn() { - return newIdentifer(RefChannel, id, pid) + return &Channel{ID: id} } - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, nestedChans: make(map[int64]string), - id: id, - pid: parent, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - db.get().addChannel(id, cn, isTopChannel, parent) - return newIdentifer(RefChannel, id, pid) + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn } // RegisterSubChannel registers the given subChannel c in the channelz database @@ -196,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { // Returns a unique channelz identifier assigned to this subChannel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a SubChannel's parent id cannot be nil") - } +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefSubChannel, id, pid), nil + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, } - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid.Int(), - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + if !IsOn() { + return sc } - db.get().addSubChannel(id, sc, pid.Int()) - return newIdentifer(RefSubChannel, id, pid), nil + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterServer(s Server, ref string) *Identifier { +func RegisterServer(ref string) *Server { id := IDGen.genID() if !IsOn() { - return newIdentifer(RefServer, id, nil) + return &Server{ID: id} } - svr := &server{ - refName: ref, - s: s, + svr := &Server{ + RefName: ref, sockets: make(map[int64]string), listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return newIdentifer(RefServer, id, nil) -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a ListenSocket's parent id cannot be 0") + ID: id, } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefListenSocket, id, pid), nil - } - - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addListenSocket(id, ls, pid.Int()) - return newIdentifer(RefListenSocket, id, pid), nil + db.addServer(id, svr) + return svr } -// RegisterNormalSocket registers the given normal socket s in channelz database +// RegisterSocket registers the given normal socket s in channelz database // with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a NormalSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefNormalSocket, id, pid), nil +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) } - - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addNormalSocket(id, ns, pid.Int()) - return newIdentifer(RefNormalSocket, id, pid), nil + return skt } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. // // If channelz is not turned ON, this function is a no-op. -func RemoveEntry(id *Identifier) { +func RemoveEntry(id int64) { if !IsOn() { return } - db.get().removeEntry(id.Int()) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe -// the event to be added to the channel trace. -// -// The Parent field is optional. It is used for an event that will be recorded -// in the entity's parent trace. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the -// provided TraceEventDesc. -// -// If channelz is not turned ON, this will simply log the event descriptions. -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { - // Log only the trace description associated with the bottom most entity. - switch desc.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, withParens(id)+desc.Desc) - case CtWarning: - l.WarningDepth(depth+1, withParens(id)+desc.Desc) - case CtError: - l.ErrorDepth(depth+1, withParens(id)+desc.Desc) - } - - if getMaxTraceEntry() == 0 { - return - } - if IsOn() { - db.get().traceEvent(id.Int(), desc) - } -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func newChannelMap() *channelMap { - return &channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - } -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - s := make([]*SocketMetric, 0, len(sks)) - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm + db.removeEntry(id) } // IDGenerator is an incrementing atomic that tracks IDs for channelz entities. @@ -761,3 +220,11 @@ func (i *IDGenerator) Reset() { func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go deleted file mode 100644 index c9a27acd..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/id.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import "fmt" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier struct { - typ RefChannelType - id int64 - str string - pid *Identifier -} - -// Type returns the entity type corresponding to id. -func (id *Identifier) Type() RefChannelType { - return id.typ -} - -// Int returns the integer identifier corresponding to id. -func (id *Identifier) Int() int64 { - return id.id -} - -// String returns a string representation of the entity corresponding to id. -// -// This includes some information about the parent as well. Examples: -// Top-level channel: [Channel #channel-number] -// Nested channel: [Channel #parent-channel-number Channel #channel-number] -// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] -func (id *Identifier) String() string { - return id.str -} - -// Equal returns true if other is the same as id. -func (id *Identifier) Equal(other *Identifier) bool { - if (id != nil) != (other != nil) { - return false - } - if id == nil && other == nil { - return true - } - return id.typ == other.typ && id.id == other.id && id.pid == other.pid -} - -// NewIdentifierForTesting returns a new opaque identifier to be used only for -// testing purposes. -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { - return newIdentifer(typ, id, pid) -} - -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { - str := fmt.Sprintf("%s #%d", typ, id) - if pid != nil { - str = fmt.Sprintf("%s %s", pid, str) - } - return &Identifier{typ: typ, id: id, str: str, pid: pid} -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index f89e6f77..ee4d7212 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,53 +26,49 @@ import ( var logger = grpclog.Component("channelz") -func withParens(id *Identifier) string { - return "[" + id.String() + "] " -} - // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtInfo, }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtError, }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtError, }) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 00000000..cdfc49d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 00000000..fa64834b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 00000000..3b88e4cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go similarity index 83% rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go index 1b1c4cce..5ac73ff8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { s.TCPInfo = v } } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index 8b06eed1..d1ed8df6 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* * @@ -41,3 +40,8 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 00000000..36b86740 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 1d4020f5..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,727 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - clearCalled bool - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUnknown indicates unknown severity of a trace event. - CtUnknown Severity = iota - // CtInfo indicates info level severity of a trace event. - CtInfo - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefUnknown indicates an unknown entity type, the zero value for this type. - RefUnknown RefChannelType = iota - // RefChannel indicates the referenced entity is a Channel. - RefChannel - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel - // RefServer indicates the referenced entity is a Server. - RefServer - // RefListenSocket indicates the referenced entity is a ListenSocket. - RefListenSocket - // RefNormalSocket indicates the referenced entity is a NormalSocket. - RefNormalSocket -) - -var refChannelTypeToString = map[RefChannelType]string{ - RefUnknown: "Unknown", - RefChannel: "Channel", - RefSubChannel: "SubChannel", - RefServer: "Server", - RefListenSocket: "ListenSocket", - RefNormalSocket: "NormalSocket", -} - -func (r RefChannelType) String() string { - return refChannelTypeToString[r] -} - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go deleted file mode 100644 index 98288c3f..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" -) - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go deleted file mode 100644 index b5568b22..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 685a3cb4..9c915d9e 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -28,9 +28,6 @@ import ( var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go index 9f409096..e8d86698 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -20,8 +20,6 @@ package grpcutil import ( "strings" - - "google.golang.org/grpc/internal/envconfig" ) // RegisteredCompressorNames holds names of the registered compressors. @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool { // RegisteredCompressors returns a string of registered compressor names // separated by comma. func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } return strings.Join(RegisteredCompressorNames, ",") } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 6c7ea6a5..48d24bdb 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -190,12 +190,16 @@ var ( // function makes events more predictable than relying on timer events. TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. + // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client + // singleton to invoke resource not found for a resource type name and + // resource name. TriggerXDSResourceNameNotFoundClient any // func(string, string) error // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. + UserSetDefaultScheme bool = false ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 52cfab1b..dbee7a60 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,9 +24,8 @@ import ( "encoding/json" "fmt" - protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" - protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) const jsonIndent = " " @@ -35,21 +34,14 @@ const jsonIndent = " " // // If marshal fails, it falls back to fmt.Sprintf("%+v"). func ToJSON(e any) string { - switch ee := e.(type) { - case protov1.Message: - mm := protojson.MarshalOptions{Indent: jsonIndent} - ret, err := mm.Marshal(protov1.MessageV2(ee)) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return string(ret) - case protov2.Message: + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { mm := protojson.MarshalOptions{ - Multiline: true, Indent: jsonIndent, + Multiline: true, } ret, err := mm.Marshal(ee) if err != nil { @@ -59,13 +51,13 @@ func ToJSON(e any) string { return fmt.Sprintf("%+v", ee) } return string(ret) - default: - ret, err := json.MarshalIndent(ee, "", jsonIndent) - if err != nil { - return fmt.Sprintf("%+v", ee) - } - return string(ret) } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) } // FormatJSON formats the input json bytes with indentation. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index b66dcb21..f3f52a59 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -41,11 +41,24 @@ import ( "google.golang.org/grpc/serviceconfig" ) -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false - -var logger = grpclog.Component("dns") +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) func init() { resolver.Register(NewBuilder()) @@ -201,7 +214,7 @@ func (d *dnsResolver) watcher() { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = internal.MinResolutionRate + waitTime = MinResolutionInterval select { case <-d.ctx.Done(): return @@ -221,18 +234,18 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { if !EnableSRVLookups { return nil, nil } var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) if err != nil { err = handleDNSError(err, "SRV") // may become nil return nil, err } for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) if err != nil { err = handleDNSError(err, "A") // may become nil if err == nil { @@ -269,8 +282,8 @@ func handleDNSError(err error, lookupType string) error { return err } -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) if err != nil { if envconfig.TXTErrIgnore { return nil @@ -297,8 +310,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - addrs, err := d.resolver.LookupHost(d.ctx, d.host) +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err @@ -316,8 +329,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { } func (d *dnsResolver) lookup() (*resolver.State, error) { - srv, srvErr := d.lookupSRV() - addrs, hostErr := d.lookupHost() + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } @@ -327,7 +342,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { - state.ServiceConfig = d.lookupTXT() + state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index c7fc557d..a7ecaf8d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -28,7 +28,7 @@ import ( // NetResolver groups the methods on net.Resolver that are used by the DNS // resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. +// overridden from tests. type NetResolver interface { LookupHost(ctx context.Context, host string) (addrs []string, err error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) @@ -50,10 +50,6 @@ var ( // The following vars are overridden from tests. var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - // TimeAfterFunc is used by the DNS resolver to wait for the given duration // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 83c38298..3deadfb4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -193,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err return false, c.err } if f != nil { - if !f(it) { // f wasn't successful + if !f() { // f wasn't successful c.mu.Unlock() return false, nil } @@ -495,21 +495,22 @@ type loopyWriter struct { ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, } return l } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index bd39ff9a..4a3ddce2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -51,14 +51,10 @@ import ( // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - if r.Method != "POST" { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) + http.Error(w, msg, http.StatusMethodNotAllowed) return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") @@ -69,6 +65,11 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s http.Error(w, msg, http.StatusUnsupportedMediaType) return nil, errors.New(msg) } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } if _, ok := w.(http.Flusher); !ok { msg := "gRPC requires a ResponseWriter supporting http.Flusher" http.Error(w, msg, http.StatusInternalServerError) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index eff87996..3c63c706 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -114,11 +114,11 @@ type http2Client struct { streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -140,9 +140,7 @@ type http2Client struct { // variable. kpDormant bool - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket onClose func(GoAwayReason) @@ -319,6 +317,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } + t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. @@ -346,11 +345,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -381,10 +394,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } sh.HandleConn(t.ctx, connBegin) } - t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - if err != nil { - return nil, err - } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() @@ -399,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerErrCh := make(chan error, 1) go t.reader(readerErrCh) defer func() { - if err == nil { - err = <-readerErrCh - } if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) t.Close(err) } }() @@ -449,8 +458,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -508,6 +521,17 @@ func (t *http2Client) getPeer() *peer.Peer { } } +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ @@ -756,8 +780,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return ErrConnClosing } if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { @@ -772,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -784,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -810,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -825,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { // Connection closed. @@ -928,16 +952,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.mu.Unlock() if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } }, rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -957,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. +// accessed anymore. func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only close once. @@ -982,10 +1006,13 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + <-t.writerDone t.cancel() t.conn.Close() - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -1090,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1243,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1708,7 +1735,7 @@ func (t *http2Client) keepalive() { // keepalive timer expired. In both cases, we need to send a ping. if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) timeoutLeft = t.kp.Timeout @@ -1738,40 +1765,23 @@ func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3839c1ad..cab0e2d3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -118,8 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket bufferPool *bufferPool connectionID uint64 @@ -262,9 +261,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), bufferPool: newBufferPool(), } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) @@ -274,10 +288,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) - if err != nil { - return nil, err - } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() @@ -320,8 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -334,9 +343,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // closed, would lead to a TCP RST instead of FIN, and the client // encountering errors. For more info: // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() select { case <-t.readerDone: - case <-time.After(time.Second): + case <-timer.C: } t.conn.Close() } @@ -592,8 +603,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) @@ -658,8 +669,14 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: if err := t.operateHeaders(ctx, frame, handle); err != nil { - t.Close(err) - break + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue } case *http2.DataFrame: t.handleData(frame) @@ -842,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -996,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -1190,12 +1208,12 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) kpTimeoutLeft = t.kp.Timeout @@ -1235,7 +1253,7 @@ func (t *http2Server) Close(err error) { if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1256,9 +1274,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } } @@ -1375,38 +1393,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, nil } -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.peer.LocalAddr, - RemoteAddr: t.peer.Addr, - // RemoteName : - } - if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index dc29d590..39cef3bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -418,10 +418,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } -func getWriteBufferPool(writeBufferSize int) *sync.Pool { +func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 pool, ok := writeBufferPoolMap[size] if ok { return pool diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3796c25..4b39c0ad 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } @@ -571,7 +571,7 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int SharedWriteBuffer bool - ChannelzParentID *channelz.Identifier + ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -606,8 +606,8 @@ type ConnectOptions struct { ReadBufferSize int // SharedWriteBuffer indicates whether connections should reuse write buffer SharedWriteBuffer bool - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID *channelz.Identifier + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -820,30 +820,6 @@ const ( GoAwayTooManyPings GoAwayReason = 2 ) -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go deleted file mode 100644 index e8b49277..00000000 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package internal - -import ( - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/resolver" -) - -// handshakeClusterNameKey is the type used as the key to store cluster name in -// the Attributes field of resolver.Address. -type handshakeClusterNameKey struct{} - -// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field -// is updated with the cluster name. -func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) - return addr -} - -// GetXDSHandshakeClusterName returns cluster name stored in attr. -func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { - v := attr.Value(handshakeClusterNameKey{}) - name, ok := v.(string) - return name, ok -} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index a821ff9b..499a49c8 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -39,6 +41,34 @@ type Peer struct { AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bf56faa7..56e8aba7 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,6 +20,7 @@ package grpc import ( "context" + "fmt" "io" "sync" @@ -117,7 +118,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = ctx.Err().Error() + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) } switch ctx.Err() { case context.DeadlineExceeded: diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5128f936..88536266 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -38,19 +38,15 @@ const ( logPrefix = "[pick-first-lb %p] " ) -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - type pickfirstBuilder struct{} -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (*pickfirstBuilder) Name() string { +func (pickfirstBuilder) Name() string { return PickFirstBalancerName } @@ -58,12 +54,12 @@ type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to + // of endpoints received from the name resolver before attempting to // connect to them. ShuffleAddressList bool `json:"shuffleAddressList"` } -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) @@ -98,8 +94,7 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -111,22 +106,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig // already does so. cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forwarrd the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil @@ -243,7 +265,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8a..3edca296 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -63,7 +63,7 @@ LEGACY_SOURCES=( # Generates only the new gRPC Service symbols SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto @@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ @@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # grpc_testing_not_regenerate/*.pb.go are not re-generated, # see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 14aa6f20..ef3d6ed6 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -18,19 +18,43 @@ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. package dns import ( + "time" + "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. // // Deprecated: import grpc and use resolver.Get("dns") instead. func NewBuilder() resolver.Builder { return dns.NewBuilder() } + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index d72f21c1..20285451 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -63,16 +64,18 @@ func Get(scheme string) Builder { } // SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". +// scheme is initially set to "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme + internal.UserSetDefaultScheme = true } -// GetDefaultScheme gets the default scheme that will be used. +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. func GetDefaultScheme() string { return defaultScheme } @@ -284,9 +287,9 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } -// String returns a string representation of Target. +// String returns the canonical string representation of Target. func (t Target) String() string { - return t.URL.String() + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index f845ac95..9dcc9780 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -97,7 +97,7 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { // finished shutting down, the channel should block on ccr.serializer.Done() // without cc.mu held. func (ccr *ccResolverWrapper) close() { - channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") ccr.mu.Lock() ccr.closed = true ccr.mu.Unlock() @@ -147,7 +147,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { return } ccr.mu.Unlock() - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) } @@ -194,5 +194,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 82493d23..fdd49e6e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -962,22 +962,9 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. +// support package version is 9. // // Older versions are kept for compatibility. // @@ -989,6 +976,7 @@ const ( SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index a6a11704..89f8e479 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -137,8 +137,7 @@ type Server struct { serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Server serverWorkerChannel chan func() serverWorkerChannelClose func() @@ -249,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption { } // WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s @@ -530,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption { }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } } // HeaderTableSize returns a ServerOption that sets the size of dynamic @@ -661,7 +668,7 @@ func NewServer(opt ...ServerOption) *Server { services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - czData: new(channelzData), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -675,8 +682,7 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - channelz.Info(logger, s.channelzID, "Server created") + channelz.Info(logger, s.channelz, "Server created") return s } @@ -802,20 +808,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID *channelz.Identifier -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } + channelz *channelz.Socket } func (l *listenSocket) Close() error { err := l.Listener.Close() - channelz.RemoveEntry(l.channelzID) - channelz.Info(logger, l.channelzID, "ListenSocket deleted") + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") return err } @@ -857,7 +856,16 @@ func (s *Server) Serve(lis net.Listener) error { } }() - ls := &listenSocket{Listener: lis} + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } s.lis[ls] = true defer func() { @@ -869,14 +877,8 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var err error - ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - if err != nil { - s.mu.Unlock() - return err - } s.mu.Unlock() - channelz.Info(logger, ls.channelzID, "ListenSocket created") + channelz.Info(logger, ls.channelz, "ListenSocket created") var tempDelay time.Duration // how long to sleep on accept failure for { @@ -975,7 +977,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, SharedWriteBuffer: s.opts.sharedWriteBuffer, - ChannelzParentID: s.channelzID, + ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } @@ -989,7 +991,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -1121,37 +1123,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) { } } -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.channelz.ServerMetrics.CallsSucceeded.Add(1) } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.channelz.ServerMetrics.CallsFailed.Add(1) } func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1346,7 +1339,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1397,7 +1390,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { @@ -1437,7 +1430,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1765,7 +1758,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1822,7 +1815,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1894,8 +1887,7 @@ func (s *Server) stop(graceful bool) { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) s.mu.Lock() s.closeListenersLocked() // Wait for serving threads to be ready to exit. Only then can we be sure no @@ -2150,14 +2142,6 @@ func Method(ctx context.Context) (string, bool) { return s.Method(), true } -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} - // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. func validateSendCompressor(name string, clientCompressors []string) error { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc0..9da8fc80 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,8 +25,10 @@ import ( "reflect" "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -41,11 +43,6 @@ const maxInt = int(^uint(0) >> 1) // https://github.com/grpc/grpc/blob/master/doc/service_config.md type MethodConfig = internalserviceconfig.MethodConfig -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} - // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // @@ -55,14 +52,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. This is - // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, - // lbConfig will be used. - LB *string - // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig + lbConfig serviceconfig.LoadBalancingConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the @@ -164,7 +156,7 @@ type jsonMC struct { // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *internalserviceconfig.BalancerConfig + LoadBalancingConfig *json.RawMessage MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig @@ -180,22 +172,37 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if c := rsc.LoadBalancingConfig; c != nil { - sc.lbConfig = &lbConfig{ - name: c.Name, - cfg: c.Config, + c := rsc.LoadBalancingConfig + if c == nil { + name := PickFirstBalancerName + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy + } + if balancer.Get(name) == nil { + name = PickFirstBalancerName } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r + } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} } + sc.lbConfig = cfg if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} @@ -212,7 +219,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -232,13 +239,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d..fdb0bd65 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {} type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any @@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 814e9983..b54563e8 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -516,6 +516,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s + a.ctx = s.Context() a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -655,13 +656,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 00000000..8b813529 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + Recv() (*Res, error) + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingServer[Res any] interface { + Send(*Res) error + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + Send(*Req) error + CloseAndRecv() (*Res, error) + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + SendAndClose(*Res) error + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + Send(*Req) error + Recv() (*Res, error) + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + Send(*Res) error + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 46ad8113..a0b78289 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.62.1" +const Version = "1.64.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 7a33c215..00000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, go vet, go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: -Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' - -echo SUCCESS diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore b/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore deleted file mode 100644 index 95a85158..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*~ -.*.swp -*.out -*.test -*.pem -*.cov -jose-util/jose-util -jose-util.t.err \ No newline at end of file diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml b/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml deleted file mode 100644 index 391b99a4..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -sudo: false - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: -- '1.14.x' -- '1.15.x' -- tip - -go_import_path: gopkg.in/square/go-jose.v2 - -before_script: -- export PATH=$HOME/.local/bin:$PATH - -before_install: -# Install encrypted gitcookies to get around bandwidth-limits -# that is causing Travis-CI builds to fail. For more info, see -# https://github.com/golang/go/issues/12933 -- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true -- bash .gitcookies.sh || true -- go get github.com/wadey/gocovmerge -- go get github.com/mattn/goveralls -- go get github.com/stretchr/testify/assert -- go get github.com/stretchr/testify/require -- go get github.com/google/go-cmp/cmp -- go get golang.org/x/tools/cmd/cover || true -- go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user - -script: -- go test . -v -covermode=count -coverprofile=profile.cov -- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov -- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov -- go test ./json -v # no coverage for forked encoding/json package -- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util -- cd .. - -after_success: -- gocovmerge *.cov */*.cov > merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md deleted file mode 100644 index 8e6e9132..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md +++ /dev/null @@ -1,84 +0,0 @@ -# v4.0.1 - -## Fixed - - - An attacker could send a JWE containing compressed data that used large - amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. - Those functions now return an error if the decompressed data would exceed - 250kB or 10x the compressed size (whichever is larger). Thanks to - Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) - for reporting. - -# v4.0.0 - -This release makes some breaking changes in order to more thoroughly -address the vulnerabilities discussed in [Three New Attacks Against JSON Web -Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot -token". - -## Changed - - - Limit JWT encryption types (exclude password or public key types) (#78) - - Enforce minimum length for HMAC keys (#85) - - jwt: match any audience in a list, rather than requiring all audiences (#81) - - jwt: accept only Compact Serialization (#75) - - jws: Add expected algorithms for signatures (#74) - - Require specifying expected algorithms for ParseEncrypted, - ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, - jwt.ParseSignedAndEncrypted (#69, #74) - - Usually there is a small, known set of appropriate algorithms for a program - to use and it's a mistake to allow unexpected algorithms. For instance the - "billion hash attack" relies in part on programs accepting the PBES2 - encryption algorithm and doing the necessary work even if they weren't - specifically configured to allow PBES2. - - Revert "Strip padding off base64 strings" (#82) - - The specs require base64url encoding without padding. - - Minimum supported Go version is now 1.21 - -## Added - - - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. - - These allow parsing a specific serialization, as opposed to ParseSigned and - ParseEncrypted, which try to automatically detect which serialization was - provided. It's common to require a specific serialization for a specific - protocol - for instance JWT requires Compact serialization. - -[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v3.0.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. - -# v3.0.2 - -## Fixed - - - DecryptMulti: handle decompression error (#19) - -## Changed - - - jwe/CompactSerialize: improve performance (#67) - - Increase the default number of PBKDF2 iterations to 600k (#48) - - Return the proper algorithm for ECDSA keys (#45) - -## Added - - - Add Thumbprint support for opaque signers (#38) - -# v3.0.1 - -## Fixed - - - Security issue: an attacker specifying a large "p2c" value can cause - JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large - amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the - disclosure and to Tom Tervoort for originally publishing the category of attack. - https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v2.6.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md deleted file mode 100644 index 61b18365..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md deleted file mode 100644 index b877f412..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# go-jose v2 - -Version 2 of this library is no longer supported. [Please use v4 -instead](https://pkg.go.dev/github.com/go-jose/go-jose/v4). diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go deleted file mode 100644 index 43f9ce2f..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go +++ /dev/null @@ -1,595 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "golang.org/x/crypto/ed25519" - josecipher "gopkg.in/go-jose/go-jose.v2/cipher" - "gopkg.in/go-jose/go-jose.v2/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the - // random parameter is legacy and ignored, and it can be nil. - // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 - out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("go-jose/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("go-jose/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("go-jose/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("go-jose/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go deleted file mode 100644 index 87065a5b..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures that the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdb..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go deleted file mode 100644 index 093c6467..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go +++ /dev/null @@ -1,86 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - zBytes := z.Bytes() - - // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from - // the returned byte array. This can lead to a problem where zBytes will be - // shorter than expected which breaks the key derivation. Therefore we must pad - // to the full length of the expected coordinate here before calling the KDF. - octSize := dSize(priv.Curve) - if len(zBytes) != octSize { - zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) - } - - reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - - return key -} - -// dSize returns the size in octets for a coordinate on a elliptic curve. -func dSize(curve elliptic.Curve) int { - order := curve.Params().P - bitLen := order.BitLen() - size := bitLen / 8 - if bitLen%8 != 0 { - size++ - } - return size -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go deleted file mode 100644 index 668358f9..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("go-jose/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go deleted file mode 100644 index 0ae2e5eb..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go +++ /dev/null @@ -1,548 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" - - "gopkg.in/go-jose/go-jose.v2/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case OpaqueKeyEncrypter: - keyID, rawKey = encryptionKey.KeyID(), encryptionKey - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { - return nil, ErrInvalidKeySize - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if rcpts == nil || len(rcpts) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) - } - return recipientKeyInfo{}, ErrUnsupportedKeyType -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >10x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >3x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go deleted file mode 100644 index dd1387f3..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple -recipients. - -*/ -package jose diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go deleted file mode 100644 index 636f6c8f..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go +++ /dev/null @@ -1,198 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "math/big" - "strings" - "unicode" - - "gopkg.in/go-jose/go-jose.v2/json" -) - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/go-jose/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - buf := strings.Builder{} - buf.Grow(len(data)) - for _, r := range data { - if !unicode.IsSpace(r) { - buf.WriteRune(r) - } - } - return buf.String() -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// deflate compresses the input. -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// inflate decompresses the input. -// -// Errors if the decompressed data would be >250kB or >10x the size of the -// compressed data, whichever is larger. -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - maxCompressedSize := 10 * int64(len(input)) - if maxCompressedSize < 250000 { - maxCompressedSize = 250000 - } - - limit := maxCompressedSize + 1 - n, err := io.CopyN(output, reader, limit) - if err != nil && err != io.EOF { - return nil, err - } - if n == limit { - return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md deleted file mode 100644 index 86de5e55..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go deleted file mode 100644 index 4dbc4146..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go +++ /dev/null @@ -1,1217 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -type NumberUnmarshalType int - -const ( - // unmarshal a JSON number into an interface{} as a float64 - UnmarshalFloat NumberUnmarshalType = iota - // unmarshal a JSON number into an interface{} as a `json.Number` - UnmarshalJSONNumber - // unmarshal a JSON number into an interface{} as a int64 - // if value is an integer otherwise float64 - UnmarshalIntOrFloat -) - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - numberType NumberUnmarshalType -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64, int64 or a Number -// depending on d.numberDecodeType. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - switch d.numberType { - - case UnmarshalJSONNumber: - return Number(s), nil - case UnmarshalIntOrFloat: - v, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return v, nil - } - - // tries to parse integer number in scientific notation - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - - // if it has no decimal value use int64 - if fi, fd := math.Modf(f); fd == 0.0 { - return int64(fi), nil - } - return f, nil - default: - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil - } - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go deleted file mode 100644 index 1dae8bb7..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML