From 0bda0171020022c9bcef08f7ea7540be2441942f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Thu, 28 Nov 2024 21:39:52 +0100 Subject: [PATCH] Test with current c/storage and c/image MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Miloslav Trmač --- go.mod | 16 +- go.sum | 28 +- .../stargz-snapshotter/estargz/testutil.go | 15 +- .../containers/image/v5/copy/single.go | 24 +- .../image/v5/directory/directory_dest.go | 1 + .../image/v5/docker/distribution_error.go | 8 +- .../image/v5/docker/docker_image_dest.go | 1 + .../image/v5/docker/docker_image_src.go | 4 + .../image/v5/docker/internal/tarfile/dest.go | 1 + .../stubs/original_oci_config.go | 16 + .../v5/internal/imagedestination/wrapper.go | 1 + .../internal/manifest/docker_schema2_list.go | 32 +- .../image/v5/internal/private/private.go | 7 + .../image/v5/oci/archive/oci_dest.go | 9 + .../image/v5/oci/internal/oci_util.go | 2 +- .../image/v5/oci/layout/oci_dest.go | 1 + .../image/v5/openshift/openshift_dest.go | 9 + .../containers/image/v5/pkg/blobcache/dest.go | 9 + .../image/v5/signature/fulcio_cert_stub.go | 2 +- .../v5/signature/internal/rekor_set_stub.go | 2 +- .../image/v5/storage/storage_dest.go | 281 ++++++++++++------ .../image/v5/storage/storage_reference.go | 4 +- .../image/v5/storage/storage_src.go | 22 +- .../containers/image/v5/version/version.go | 4 +- .../github.com/containers/storage/.cirrus.yml | 4 +- vendor/github.com/containers/storage/Makefile | 2 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/pkg/archive/archive.go | 14 +- .../storage/pkg/archive/xattr_freebsd.go | 38 +++ .../storage/pkg/archive/xattr_unix.go | 13 + .../storage/pkg/archive/xattr_unsupported.go | 12 + .../storage/pkg/chunked/compression_linux.go | 143 +++++---- .../storage/pkg/chunked/storage_linux.go | 128 ++++++-- .../storage/pkg/system/extattr_freebsd.go | 96 ++++++ .../storage/pkg/system/extattr_unsupported.go | 24 ++ .../storage/pkg/system/stat_netbsd.go | 13 + .../registry/client/auth/challenge/addr.go | 27 -- .../client/auth/challenge/authchallenge.go | 237 --------------- vendor/github.com/proglottis/gpgme/.gitignore | 2 + .../github.com/proglottis/gpgme/callbacks.go | 42 --- vendor/github.com/proglottis/gpgme/data.go | 119 +++++--- vendor/github.com/proglottis/gpgme/go_gpgme.c | 20 +- vendor/github.com/proglottis/gpgme/go_gpgme.h | 4 +- vendor/github.com/proglottis/gpgme/gpgme.go | 78 +++-- .../proglottis/gpgme/unset_agent_info.go | 1 + .../sigstore/pkg/oauthflow/interactive.go | 2 +- .../sylabs/sif/v2/pkg/sif/create.go | 12 +- vendor/golang.org/x/oauth2/README.md | 15 +- vendor/modules.txt | 25 +- 49 files changed, 887 insertions(+), 685 deletions(-) create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/xattr_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/xattr_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/xattr_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_netbsd.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go delete mode 100644 vendor/github.com/proglottis/gpgme/callbacks.go diff --git a/go.mod b/go.mod index 2aa308299f..b6707f2654 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/containers/podman/v5 // Warning: if there is a "toolchain" directive anywhere in this file (and most of the // time there shouldn't be), its version must be an exact match to the "go" directive. -go 1.22.6 +go 1.22.8 require ( github.com/BurntSushi/toml v1.4.0 @@ -17,11 +17,11 @@ require ( github.com/containers/common v0.61.1-0.20241125172552-a801fac4edc0 github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.8.0 - github.com/containers/image/v5 v5.33.0 + github.com/containers/image/v5 v5.33.1-0.20241128201909-5263462aa97a github.com/containers/libhvee v0.9.0 github.com/containers/ocicrypt v1.2.0 github.com/containers/psgo v1.9.0 - github.com/containers/storage v1.56.0 + github.com/containers/storage v1.56.1-0.20241127184055-88a8fd8ddb47 github.com/containers/winquit v1.1.0 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/crc-org/crc/v2 v2.44.0 @@ -102,7 +102,7 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.2 // indirect github.com/containerd/typeurl/v2 v2.2.0 // indirect github.com/containernetworking/cni v1.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect @@ -188,18 +188,18 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/proglottis/gpgme v0.1.3 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sigstore/fulcio v1.6.4 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.9 // indirect + github.com/sigstore/sigstore v1.8.10 // indirect github.com/skeema/knownhosts v1.3.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.19.1 // indirect + github.com/sylabs/sif/v2 v2.20.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect @@ -221,7 +221,7 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index 9f0d847d68..2c6241141d 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= +github.com/containerd/stargz-snapshotter/estargz v0.16.2 h1:DMcqm1rd1ak2hFghkyHlquacSo+zRe+cysRR3CmSpGk= +github.com/containerd/stargz-snapshotter/estargz v0.16.2/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= @@ -87,8 +87,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.8.0 h1:Z8ZEWb+Lio0d+lXexONdUWT4rm9lF91vH0g3ARnMy7o= github.com/containers/gvisor-tap-vsock v0.8.0/go.mod h1:LVwnMiNvhxyGfhaMEQcXKJhNnN4h8woB9U3wf8rYOPc= -github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE= -github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk= +github.com/containers/image/v5 v5.33.1-0.20241128201909-5263462aa97a h1:W+jpGG1Rkr6LVqO8/fEZ7fu15U2Kde3EQBAi+ol0+TY= +github.com/containers/image/v5 v5.33.1-0.20241128201909-5263462aa97a/go.mod h1:52xoKScvTXWo0qyt0jWxMYTrOIgWmWBg8noBE0vsFNI= github.com/containers/libhvee v0.9.0 h1:5UxJMka1lDfxTeITA25Pd8QVVttJAG43eQS1Getw1tc= github.com/containers/libhvee v0.9.0/go.mod h1:p44VJd8jMIx3SRN1eM6PxfCEwXQE0lJ0dQppCAlzjPQ= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -99,8 +99,8 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI= -github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk= +github.com/containers/storage v1.56.1-0.20241127184055-88a8fd8ddb47 h1:etz00TnS2HpeTxPz7EzIX1AnKaJHIWxWBZD4d3F5q/4= +github.com/containers/storage v1.56.1-0.20241127184055-88a8fd8ddb47/go.mod h1:OK1gkAT1DIuU68om4cRbLJcr48N3p0T1+wp19F/2XAk= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= @@ -427,8 +427,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= -github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -462,8 +462,8 @@ github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= -github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/sigstore v1.8.10 h1:r4t+TYzJlG9JdFxMy+um9GZhZ2N1hBTyTex0AHEZxFs= +github.com/sigstore/sigstore v1.8.10/go.mod h1:BekjqxS5ZtHNJC4u3Q3Stvfx2eyisbW/lUZzmPU2u4A= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= @@ -491,8 +491,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0= -github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E= +github.com/sylabs/sif/v2 v2.20.0 h1:RfDHEltUrchZbp/XGcWaw3nRSbufoNWqvwmf91/Q2gI= +github.com/sylabs/sif/v2 v2.20.0/go.mod h1:z6dq3B7QXK0pD71n15kAapven+gE+PZAIPOewBTNDpU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -612,8 +612,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 0ca6fd75f2..ba650b4d1d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,12 +26,13 @@ import ( "archive/tar" "bytes" "compress/gzip" + "crypto/rand" "crypto/sha256" "encoding/json" "errors" "fmt" "io" - "math/rand" + "math/big" "os" "path/filepath" "reflect" @@ -45,10 +46,6 @@ import ( digest "github.com/opencontainers/go-digest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + return } if targetEntry == nil { t.Fatalf("rewrite target not found") + return } targetEntry.Offset = sampleEntry.Offset }, @@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { - b[i] = runes[rand.Intn(len(runes))] + bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) + if err != nil { + panic(err) + } + b[i] = runes[int(bi.Int64())] } return string(b) } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index e008c7e86f..7bc44d1f83 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar } } - if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { return copySingleImageResult{}, err } @@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar return res, nil } -// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. -func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + if dest.MustMatchRuntimeOS() { - c, err := src.OCIConfig(ctx) - if err != nil { - return fmt.Errorf("parsing image configuration: %w", err) + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) } wantedPlatforms := platform.WantedPlatforms(sys) @@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst // For a transitional period, this might trigger warnings because the Variant // field was added to OCI config only recently. If this turns out to be too noisy, // revert this check to only look for (OS, Architecture). - if platform.MatchesPlatform(c.Platform, wantedPlatform) { + if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) { match = true break } @@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst } if !match { logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", - c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) } } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + return nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 000c8987d9..6e88aa01d1 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don' type dirImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.AlwaysSupportsSignatures diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 0a0064576a..622d21fb1c 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -24,7 +24,6 @@ import ( "slices" "github.com/docker/distribution/registry/api/errcode" - dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error { // UnexpectedHTTPStatusError returned for response code outside of expected // range. func handleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { + switch { + case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range dockerChallenge.ResponseChallenges(resp) { + for _, c := range parseAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error { return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) } } + fallthrough + case resp.StatusCode >= 400 && resp.StatusCode < 500: err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index f5e2bb61e8..3ac43cf9fc 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -41,6 +41,7 @@ import ( type dockerImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize ref dockerReference diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 6e44ce0960..41ab9bfd16 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } return } + if parts >= len(chunks) { + errs <- errors.New("too many parts returned by the server") + break + } s := signalCloseReader{ closed: make(chan struct{}), stream: p, diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index f142346807..8f5ba7e36e 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -24,6 +24,7 @@ import ( type Destination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go new file mode 100644 index 0000000000..c4536e933b --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go @@ -0,0 +1,16 @@ +package stubs + +import ( + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. +type IgnoresOriginalOCIConfig struct{} + +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 0fe902e31f..b2462a3bc1 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -14,6 +14,7 @@ import ( // wrapped provides the private.ImageDestination operations // for a destination that only implements types.ImageDestination type wrapped struct { + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize types.ImageDestination diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 07922ceceb..4c1589ef02 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { editInstances := []ListEdit{} for i, instance := range updates { editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, + UpdateOldDigest: list.Manifests[i].Digest, UpdateDigest: instance.Digest, UpdateSize: instance.Size, UpdateMediaType: instance.MediaType, ListOperation: ListOpUpdate}) } - return index.editInstances(editInstances) + return list.editInstances(editInstances) } -func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { +func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { addedEntries := []Schema2ManifestDescriptor{} for i, editInstance := range editInstances { switch editInstance.ListOperation { @@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if err := editInstance.UpdateDigest.Validate(); err != nil { return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) } - targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { return m.Digest == editInstance.UpdateOldDigest }) if targetIndex == -1 { return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + list.Manifests[targetIndex].Digest = editInstance.UpdateDigest if editInstance.UpdateSize < 0 { return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) } - index.Manifests[targetIndex].Size = editInstance.UpdateSize + list.Manifests[targetIndex].Size = editInstance.UpdateSize if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: if editInstance.AddPlatform == nil { // Should we create a struct with empty fields instead? @@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if len(addedEntries) != 0 { // slices.Clone() here to ensure a private backing array; // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) + list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) } return nil } -func (index *Schema2List) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) +func (list *Schema2List) EditInstances(editInstances []ListEdit) error { + return list.editInstances(editInstances) } func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { @@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { return &Schema2List{*public} } -func (index *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +func (list *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) } -func (index *Schema2List) Clone() ListPublic { - return index.CloneInternal() +func (list *Schema2List) Clone() ListPublic { + return list.CloneInternal() } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 4247a8db77..afd425483d 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -10,6 +10,7 @@ import ( compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageSourceInternalOnly is the part of private.ImageSource that is not @@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface { // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures // on unsupported formats. + // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, + // or an error obtaining that value (e.g. if the image is an artifact and not a container image). + // The destination can use it in its TryReusingBlob/PutBlob implementations + // (otherwise it only obtains the final config after all layers are written). + NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 1d1e26c84f..54b2e50566 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { return d.unpackedDest.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index 53827b11af..72af74e251 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -98,7 +98,7 @@ func ValidateScope(scope string) error { } func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) if !matched { return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 85374cecf0..cb2768d745 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -27,6 +27,7 @@ import ( type ociImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 61f69e93fd..bd5e77aa8f 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -22,6 +22,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) type openshiftImageDestination struct { @@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { return d.docker.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.docker.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index f7103d13eb..54e3d294b9 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -19,6 +19,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -138,6 +139,14 @@ func (d *blobCacheDestination) HasThreadSafePutBlob() bool { return d.destination.HasThreadSafePutBlob() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *blobCacheDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.destination.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go index c0b48dafa7..c0dc7b232b 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error { return errors.New("fulcio disabled at compile-time") } -func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, untrustedPayloadBytes []byte) (crypto.PublicKey, error) { return nil, errors.New("fulcio disabled at compile-time") diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go index 7c121cc2ee..2b20bbed2e 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -10,6 +10,6 @@ import ( // VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. // Returns bundle upload time on success. -func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 51fac71e44..bad11706b6 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -17,11 +17,13 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" + srcImpl "github.com/containers/image/v5/internal/imagesource/impl" + srcStubs "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" @@ -57,8 +59,9 @@ type storageImageDestination struct { imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 + manifest []byte // (Per-instance) manifest contents, or nil if not yet known. + manifestMIMEType string // Valid if manifest != nil + manifestDigest digest.Digest // Valid if manifest != nil untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet signatures []byte // Signature contents, temporary signatureses map[digest.Digest][]byte // Instance signature contents, temporary @@ -121,6 +124,9 @@ type storageImageDestinationLockProtected struct { filenames map[digest.Digest]string // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. fileSizes map[digest.Digest]int64 + + // Config + configDigest digest.Digest // "" if N/A or not known yet. } // addedLayerInfo records data about a layer to use in this image. @@ -201,6 +207,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1))) } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + if configErr != nil { + return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr) + } + s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig) + return nil +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -214,7 +232,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, err } - if options.IsConfig || options.LayerIndex == nil { + if options.IsConfig { + s.lock.Lock() + defer s.lock.Unlock() + if s.lockProtected.configDigest != "" { + return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q", + s.lockProtected.configDigest.String(), info.Digest.String()) + } + s.lockProtected.configDigest = info.Digest + return info, nil + } + if options.LayerIndex == nil { return info, nil } @@ -351,35 +379,41 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces blobDigest := srcInfo.Digest s.lock.Lock() - if out.UncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest - if out.TOCDigest != "" { - options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) - } - // Don’t set indexToTOCDigest on this path: - // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. - // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. - // That TOC is quite unlikely to match any other TOC value. - - // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is - // responsible for ensuring blobDigest has been validated. - if out.CompressedDigest != blobDigest { - return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", - out.CompressedDigest, blobDigest) - } - // So, record also information about blobDigest, that might benefit reuse. - // We trust PrepareStagedLayer to validate or create both values correctly. - s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest - options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) - } else { - // Use diffID for layer identity if it is known. - if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + if err := func() error { // A scope for defer + defer s.lock.Unlock() + + if out.UncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest + if out.TOCDigest != "" { + options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) + } + // Don’t set indexToTOCDigest on this path: + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match any other TOC value. + + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is + // responsible for ensuring blobDigest has been validated. + if out.CompressedDigest != blobDigest { + return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", + out.CompressedDigest, blobDigest) + } + // So, record also information about blobDigest, that might benefit reuse. + // We trust PrepareStagedLayer to validate or create both values correctly. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) + } else { + // Use diffID for layer identity if it is known. + if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + } + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest } - s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + s.lockProtected.diffOutputs[options.LayerIndex] = out + return nil + }(); err != nil { + return private.UploadedBlob{}, err } - s.lockProtected.diffOutputs[options.LayerIndex] = out - s.lock.Unlock() succeeded = true return private.UploadedBlob{ @@ -975,9 +1009,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest) } var trustedOriginalDigest digest.Digest // For storage.LayerOptions + var trustedOriginalSize *int64 if gotFilename { // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest. trustedOriginalDigest = trusted.blobDigest + trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost. } else { // Try to find the layer with contents matching the data we use. var layer *storage.Layer // = nil @@ -1032,22 +1068,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D if trusted.diffID == "" && layer.UncompressedDigest != "" { trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now. } - // The stream we have is uncompressed, and it matches trusted.diffID (if known). - // - // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse. - // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created - // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + + // Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse. + // But we don’t want to just use the size from the manifest if we never saw the compressed blob, + // so that we don’t propagate mistakes / attacks. // - // We can legitimately set storage.LayerOptions.OriginalDigest to "", - // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. - // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. - trustedOriginalDigest = trusted.diffID + // s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename. + // So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit + // to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to + // this new layer copy can help.) + if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 { + trustedOriginalDigest = trusted.blobDigest + sizeCopy := layer.CompressedSize + trustedOriginalSize = &sizeCopy + } else { + // The stream we have is uncompressed, and it matches trusted.diffID (if known). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = trusted.diffID + trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost. + } // Allow using the already-collected layer contents without extracting the layer again. // // This only matches against the uncompressed digest. - // We don’t have the original compressed data here to trivially set filenames[layerDigest]. - // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the + // same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config), + // we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate + // between “original files” and “possibly uncompressed files”. // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. if trusted.diffID != "" { s.lock.Lock() @@ -1067,6 +1117,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ OriginalDigest: trustedOriginalDigest, + OriginalSize: trustedOriginalSize, // nil in many cases // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream. UncompressedDigest: trusted.diffID, }, file) @@ -1076,52 +1127,110 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return layer, nil } +// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed, +// to allow using image.FromUnparsedImage. +type uncommittedImageSource struct { + srcImpl.Compat + srcImpl.PropertyMethodsInitialize + srcImpl.NoSignatures + srcImpl.DoesNotAffectLayerInfosForCopy + srcStubs.NoGetBlobAtInitialize + + d *storageImageDestination +} + +func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource { + s := &uncommittedImageSource{ + PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()), + + d: d, + } + s.Compat = srcImpl.AddCompat(s) + return s +} + +func (u *uncommittedImageSource) Reference() types.ImageReference { + return u.d.Reference() +} + +func (u *uncommittedImageSource) Close() error { + return nil +} + +func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return u.d.manifest, u.d.manifestMIMEType, nil +} + +func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + blob, err := u.d.getConfigBlob(info) + if err != nil { + return nil, -1, err + } + return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil +} + // untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. // If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). // WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { - // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and - // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, + // nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set + // by NoteOriginalOCIConfig and are not being updated any more; + // or PutManifest has been called and s.manifest != nil. // Either way this function does not need the protection of s.lock. - if s.manifest == nil { - return "", nil - } if s.untrustedDiffIDValues == nil { - mt := manifest.GuessMIMEType(s.manifest) - if mt != imgspecv1.MediaTypeImageManifest { - // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, - // and then use types.Image.OCIConfig so that we can parse the image. - // - // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), - // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). - // So it is not implemented yet. - return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + // Typically, we expect untrustedDiffIDValues to be set by the generic copy code + // via NoteOriginalOCIConfig; this is a compatibility fallback for external callers + // of the public types.ImageDestination. + if s.manifest == nil { + return "", nil } - man, err := manifest.FromBlob(s.manifest, mt) + + ctx := context.Background() // This is all happening in memory, no need to worry about cancellation. + unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil) + sourced, err := image.FromUnparsedImage(ctx, nil, unparsed) if err != nil { - return "", fmt.Errorf("parsing manifest: %w", err) + return "", fmt.Errorf("parsing image to be committed: %w", err) } - - cb, err := s.getConfigBlob(man.ConfigInfo()) + configOCI, err := sourced.OCIConfig(ctx) if err != nil { - return "", err + return "", fmt.Errorf("obtaining config of image to be committed: %w", err) } - // retrieve the expected uncompressed digest from the config blob. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return "", err - } - s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) - if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… - s.untrustedDiffIDValues = []digest.Digest{} - } + s.setUntrustedDiffIDValuesFromOCIConfig(configOCI) } + if layerIndex >= len(s.untrustedDiffIDValues) { return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex) } - return s.untrustedDiffIDValues[layerIndex], nil + res := s.untrustedDiffIDValues[layerIndex] + if res == "" { + // In practice, this should, right now, only matter for pulls of OCI images + // (this code path implies that we did a partial pull because a layer has an annotation), + // So, DiffIDs should always be set. + // + // It is, though, reachable by pulling an OCI image while converting to schema1, + // using a manual (skopeo copy) or something similar, not (podman pull). + // + // Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values. + // The current semantics of this function are that ("", nil) means "try again later", + // which is not what we want to happen; for now, turn that into an explicit error. + return "", fmt.Errorf("DiffID value for layer %d is unknown or explicitly empty", layerIndex) + } + return res, nil +} + +// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config. +// The caller must ensure s.lock does not need to be held. +func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) { + s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } } // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. @@ -1131,7 +1240,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. - if len(s.manifest) == 0 { + if s.manifest == nil { return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()") } toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx) @@ -1159,7 +1268,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options } } // Find the list of layer blobs. - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType) if err != nil { return fmt.Errorf("parsing manifest: %w", err) } @@ -1193,29 +1302,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.CreationDate = *inspect.Created } - // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := set.New[digest.Digest]() - for blob := range s.lockProtected.filenames { - dataBlobs.Add(blob) - } - for _, layerBlob := range layerBlobs { - dataBlobs.Delete(layerBlob.Digest) - } - for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.lockProtected.filenames[blob]) + // Set up to save the config as a data item. Since we only share layers, the config should be in a file. + if s.lockProtected.configDigest != "" { + v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest]) if err != nil { - return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err) } imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ - Key: blob.String(), + Key: s.lockProtected.configDigest.String(), Data: v, Digest: digest.Canonical.FromBytes(v), }) } // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. - if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + if !bytes.Equal(toplevelManifest, s.manifest) { manifestDigest, err := manifest.Digest(toplevelManifest) if err != nil { return fmt.Errorf("digesting top-level manifest: %w", err) @@ -1370,6 +1471,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob return err } s.manifest = bytes.Clone(manifestBlob) + if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil + s.manifest = []byte{} + } + s.manifestMIMEType = manifest.GuessMIMEType(s.manifest) s.manifestDigest = digest return nil } @@ -1392,7 +1497,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s if instanceDigest == nil { s.signatures = sigblob s.metadata.SignatureSizes = sizes - if len(s.manifest) > 0 { + if s.manifest != nil { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index acc4cb30e8..5775c4acbc 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag } if s.id == "" { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage) + // %.0w makes the error visible to error.Unwrap() without including any text. + // ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause. + return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage) } if loadedImage == nil { img, err := s.transport.store.Image(s.id) diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 55788f8877..ff76b20667 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -35,13 +35,14 @@ type storageImageSource struct { impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - metadata storageImageMetadata - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - getBlobMutexProtected getBlobMutexProtected + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + cachedManifestMIMEType string // Valid if cachedManifest != nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + getBlobMutexProtected getBlobMutexProtected } // getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. @@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } return blob, manifest.GuessMIMEType(blob), err } - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { // The manifest is stored as a big data item. // Prefer the manifest corresponding to the user-specified digest, if available. if s.imageRef.named != nil { @@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) if err != nil { return nil, "", err } s.cachedManifest = cachedBlob } + s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest) } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err + return s.cachedManifest, s.cachedManifestMIMEType, err } // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 3743721fc3..7a16c8181e 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 33 + VersionMinor = 34 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 0fed08c3b2..344b9a992d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-39" + FEDORA_NAME: "fedora-41" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20241010t105554z-f40f39d13" + IMAGE_SUFFIX: "c20241107t210000z-f41f40d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 64c01b67f8..090c9c2b76 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de # N/B: This value is managed by Renovate, manual changes are # possible, as long as they don't disturb the formatting # (i.e. DO NOT ADD A 'v' prefix!) -GOLANGCI_LINT_VERSION := 1.61.0 +GOLANGCI_LINT_VERSION := 1.62.2 default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 3ebf789f5a..a38254646f 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.56.0 +1.57.0-dev diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 13a458956c..2324373a87 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -792,19 +792,19 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L if _, found := xattrsToIgnore[xattrKey]; found { continue } - if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { + if err := setExtendedAttribute(path, xattrKey, []byte(value)); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. We also ignore EPERM errors if we are running in a - // user namespace. + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. errs = append(errs, err.Error()) continue } return err } - } // We defer setting flags on directories until the end of diff --git a/vendor/github.com/containers/storage/pkg/archive/xattr_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/xattr_freebsd.go new file mode 100644 index 0000000000..d4fb5f2ddc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/xattr_freebsd.go @@ -0,0 +1,38 @@ +//go:build freebsd + +package archive + +import ( + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +// setExtendedAttribute sets an extended attribute on a file. On FreeBSD, extended attributes are +// supported via the extattr system calls. +func setExtendedAttribute(path string, xattrKey string, value []byte) error { + namespace, attrname, err := xattrToExtattr(xattrKey) + if err != nil { + return err + } + return system.ExtattrSetLink(path, namespace, attrname, value) +} + +func xattrToExtattr(xattrname string) (namespace int, attrname string, err error) { + namespaceMap := map[string]int{ + "user": system.EXTATTR_NAMESPACE_USER, + "system": system.EXTATTR_NAMESPACE_SYSTEM, + } + + namespaceName, attrname, found := strings.Cut(xattrname, ".") + if !found { + return -1, "", syscall.ENOTSUP + } + + namespace, ok := namespaceMap[namespaceName] + if !ok { + return -1, "", syscall.ENOTSUP + } + return namespace, attrname, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/xattr_unix.go b/vendor/github.com/containers/storage/pkg/archive/xattr_unix.go new file mode 100644 index 0000000000..ccb1b610af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/xattr_unix.go @@ -0,0 +1,13 @@ +//go:build darwin || linux + +package archive + +import ( + "github.com/containers/storage/pkg/system" +) + +// setExtendedAttribute sets an extended attribute on a file. On Linux and Darwin, +// extended attributes are supported via the xattr system calls. +func setExtendedAttribute(path string, xattrKey string, value []byte) error { + return system.Lsetxattr(path, xattrKey, value, 0) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/xattr_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/xattr_unsupported.go new file mode 100644 index 0000000000..60aae2cc35 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/xattr_unsupported.go @@ -0,0 +1,12 @@ +//go:build !darwin && !linux && !freebsd + +package archive + +import ( + "github.com/containers/storage/pkg/system" +) + +// setExtendedAttribute is not supported on this platform. +func setExtendedAttribute(path string, xattrKey string, value []byte) error { + return system.ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 2dac463543..d0c7828463 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -20,6 +20,12 @@ import ( expMaps "golang.org/x/exp/maps" ) +const ( + // maxTocSize is the maximum size of a blob that we will attempt to process. + // It is used to prevent DoS attacks from layers that embed a very large TOC file. + maxTocSize = (1 << 20) * 50 +) + var typesToTar = map[string]byte{ TypeReg: tar.TypeReg, TypeLink: tar.TypeLink, @@ -44,25 +50,21 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, if blobSize <= footerSize { return nil, 0, errors.New("blob too small") } - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + + footer := make([]byte, footerSize) + streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)}) if err != nil { return nil, 0, err } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - defer reader.Close() - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err + + for soe := range streamsOrErrors { + if soe.stream != nil { + _, err = io.ReadFull(soe.stream, footer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } } /* Read the ToC offset: @@ -81,48 +83,54 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, size := int64(blobSize - footerSize - tocOffset) // set a reasonable limit - if size > (1<<20)*50 { + if size > maxTocSize { return nil, 0, errors.New("manifest too big") } - chunk = ImageSourceChunk{ - Offset: uint64(tocOffset), - Length: uint64(size), - } - parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - - var tocReader io.ReadCloser - select { - case r := <-parts: - tocReader = r - case err := <-errs: - return nil, 0, err - } - defer tocReader.Close() - - r, err := pgzip.NewReader(tocReader) + streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)}) if err != nil { return nil, 0, err } - defer r.Close() - - aTar := archivetar.NewReader(r) - header, err := aTar.Next() - if err != nil { - return nil, 0, err + var manifestUncompressed []byte + + for soe := range streamsOrErrors { + if soe.stream != nil { + err1 := func() error { + defer soe.stream.Close() + + r, err := pgzip.NewReader(soe.stream) + if err != nil { + return err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return err + } + // set a reasonable limit + if header.Size > maxTocSize { + return errors.New("manifest too big") + } + + manifestUncompressed = make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return err + } + return nil + }() + if err == nil { + err = err1 + } + } else if err == nil { + err = soe.err + } } - // set a reasonable limit - if header.Size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - manifestUncompressed := make([]byte, header.Size) - if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { - return nil, 0, err + if manifestUncompressed == nil { + return nil, 0, errors.New("manifest not found") } manifestDigester := digest.Canonical.Digester() @@ -140,7 +148,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. // Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset). -func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) { +func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *internal.TOC, _ []byte, _ int64, retErr error) { offsetMetadata := annotations[internal.ManifestInfoKey] if offsetMetadata == "" { return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey) @@ -164,10 +172,10 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di } // set a reasonable limit - if manifestChunk.Length > (1<<20)*50 { + if manifestChunk.Length > maxTocSize { return nil, nil, nil, 0, errors.New("manifest too big") } - if manifestLengthUncompressed > (1<<20)*50 { + if manifestLengthUncompressed > maxTocSize { return nil, nil, nil, 0, errors.New("manifest too big") } @@ -175,26 +183,31 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di if tarSplitChunk.Offset > 0 { chunks = append(chunks, tarSplitChunk) } - parts, errs, err := blobStream.GetBlobAt(chunks) + + streamsOrErrors, err := getBlobAt(blobStream, chunks...) if err != nil { return nil, nil, nil, 0, err } + defer func() { + err := ensureAllBlobsDone(streamsOrErrors) + if retErr == nil { + retErr = err + } + }() + readBlob := func(len uint64) ([]byte, error) { - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, err + soe, ok := <-streamsOrErrors + if !ok { + return nil, errors.New("stream closed") + } + if soe.err != nil { + return nil, soe.err } + defer soe.stream.Close() blob := make([]byte, len) - if _, err := io.ReadFull(reader, blob); err != nil { - reader.Close() - return nil, err - } - if err := reader.Close(); err != nil { + if _, err := io.ReadFull(soe.stream, blob); err != nil { return nil, err } return blob, nil diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8ecbfb9826..6a16d1db23 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -1141,41 +1141,119 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { return new, nil } -func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error +type streamOrErr struct { + stream io.ReadCloser + err error +} - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(c.blobSize), - }, +// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered. +func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) { + for soe := range streamsOrErrors { + if soe.stream != nil { + _ = soe.stream.Close() + } else if retErr == nil { + retErr = soe.err + } } + return +} - streams, errs, err = c.stream.GetBlobAt(chunksToRequest) - if err != nil { - return "", err +// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends +// either a stream or an error to the stream channel. The streams channel is closed when +// there are no more streams and errors to read. +// It ensures that no more than maxStreams streams are returned, and that every item from the +// streams and errs channels is consumed. +func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) { + tooManyStreams := false + streamsSoFar := 0 + + err := errors.New("Unexpected error in getBlobAtGoroutine") + + defer func() { + if err != nil { + stream <- streamOrErr{err: err} + } + close(stream) + }() + +loop: + for { + select { + case p, ok := <-streams: + if !ok { + streams = nil + break loop + } + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + case err, ok := <-errs: + if !ok { + errs = nil + break loop + } + stream <- streamOrErr{err: err} + } } - select { - case p := <-streams: - payload = p - case err := <-errs: - return "", err + if streams != nil { + for p := range streams { + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + } } - if payload == nil { - return "", errors.New("invalid stream returned") + if errs != nil { + for err := range errs { + stream <- streamOrErr{err: err} + } } - defer payload.Close() + if tooManyStreams { + stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)} + } + err = nil +} - originalRawDigester := digest.Canonical.Digester() +// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt. +// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel +// always succeeds in select. +// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`. +func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) { + streams, errs, err := is.GetBlobAt(chunksToRequest) + if err != nil { + return nil, err + } + stream := make(chan streamOrErr) + go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest)) + return stream, nil +} - r := io.TeeReader(payload, originalRawDigester.Hash()) +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)}) + if err != nil { + return "", err + } - // copy the entire tarball and compute its digest - _, err = io.CopyBuffer(destination, r, c.copyBuffer) + originalRawDigester := digest.Canonical.Digester() + for soe := range streamsOrErrors { + if soe.stream != nil { + r := io.TeeReader(soe.stream, originalRawDigester.Hash()) + // copy the entire tarball and compute its digest + _, err = io.CopyBuffer(destination, r, c.copyBuffer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } + } return originalRawDigester.Digest(), err } diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go new file mode 100644 index 0000000000..d9a822703d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go @@ -0,0 +1,96 @@ +//go:build freebsd + +package system + +import ( + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY + EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER + EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM +) + +// ExtattrGetLink retrieves the value of the extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is retrieved from the link itself. +// Returns a []byte slice if the extattr is set and nil otherwise. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + if errno == unix.ENOATTR { + return nil, nil + } + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + if size == 0 { + return []byte{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + + return dest[:size], nil +} + +// ExtattrSetLink sets the value of extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is set on the link itself. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + if len(data) == 0 { + data = []byte{} // ensure non-nil for empty data + } + if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil { + return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno} + } + + return nil +} + +// ExtattrListLink lists extended attributes associated with the given path +// in the specified namespace. If the path is a symbolic link, the attributes +// are listed from the link itself. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + size, errno := unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + if errno == unix.ENOATTR { + return nil, nil + } + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + if size == 0 { + return []string{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + + var attrs []string + for i := 0; i < size; { + // Each attribute is preceded by a single byte length + length := int(dest[i]) + i++ + if i+length > size { + break + } + attrs = append(attrs, string(dest[i:i+length])) + i += length + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go new file mode 100644 index 0000000000..07b67357f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go @@ -0,0 +1,24 @@ +//go:build !freebsd + +package system + +const ( + EXTATTR_NAMESPACE_EMPTY = 0 + EXTATTR_NAMESPACE_USER = 0 + EXTATTR_NAMESPACE_SYSTEM = 0 +) + +// ExtattrGetLink is not supported on platforms other than FreeBSD. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// ExtattrSetLink is not supported on platforms other than FreeBSD. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + return ErrNotSupportedPlatform +} + +// ExtattrListLink is not supported on platforms other than FreeBSD. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go new file mode 100644 index 0000000000..715f05b938 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe1653..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index fe238210cd..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/proglottis/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore index 0210b26e03..bc485bfc5b 100644 --- a/vendor/github.com/proglottis/gpgme/.gitignore +++ b/vendor/github.com/proglottis/gpgme/.gitignore @@ -1 +1,3 @@ testdata/gpghome/random_seed +testdata/gpghome/.gpg-v21-migrated +testdata/gpghome/private-keys-v1.d/ diff --git a/vendor/github.com/proglottis/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go deleted file mode 100644 index d1dc610d42..0000000000 --- a/vendor/github.com/proglottis/gpgme/callbacks.go +++ /dev/null @@ -1,42 +0,0 @@ -package gpgme - -import ( - "sync" -) - -var callbacks struct { - sync.Mutex - m map[uintptr]interface{} - c uintptr -} - -func callbackAdd(v interface{}) uintptr { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m == nil { - callbacks.m = make(map[uintptr]interface{}) - } - callbacks.c++ - ret := callbacks.c - callbacks.m[ret] = v - return ret -} - -func callbackLookup(c uintptr) interface{} { - callbacks.Lock() - defer callbacks.Unlock() - ret := callbacks.m[c] - if ret == nil { - panic("callback pointer not found") - } - return ret -} - -func callbackDelete(c uintptr) { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m[c] == nil { - panic("callback pointer not found") - } - delete(callbacks.m, c) -} diff --git a/vendor/github.com/proglottis/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go index eee32c0323..0e81c36d66 100644 --- a/vendor/github.com/proglottis/gpgme/data.go +++ b/vendor/github.com/proglottis/gpgme/data.go @@ -10,6 +10,7 @@ import ( "io" "os" "runtime" + "runtime/cgo" "unsafe" ) @@ -19,30 +20,32 @@ const ( SeekEnd = C.SEEK_END ) +var dataCallbacks = C.struct_gpgme_data_cbs{ + read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc), + write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc), + seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc), +} + //export gogpgme_readfunc func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - n, err := d.r.Read(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } - C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) return C.ssize_t(n) } //export gogpgme_writefunc func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) - n, err := d.w.Write(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -51,9 +54,11 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { //export gogpgme_seekfunc func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t { - d := callbackLookup(uintptr(handle)).(*Data) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) n, err := d.s.Seek(int64(offset), int(whence)) if err != nil { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -63,12 +68,11 @@ func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) // The Data buffer used to communicate with GPGME type Data struct { dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C - buf []byte - cbs C.struct_gpgme_data_cbs r io.Reader w io.Writer s io.Seeker - cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + err error } func newData() *Data { @@ -86,6 +90,7 @@ func NewData() (*Data, error) { // NewDataFile returns a new file based data buffer func NewDataFile(f *os.File) (*Data, error) { d := newData() + d.r = f return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) } @@ -103,20 +108,22 @@ func NewDataBytes(b []byte) (*Data, error) { func NewDataReader(r io.Reader) (*Data, error) { d := newData() d.r = r - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := r.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataWriter returns a new callback based data buffer func NewDataWriter(w io.Writer) (*Data, error) { d := newData() d.w = w - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := w.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriter returns a new callback based data buffer @@ -124,11 +131,11 @@ func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { d := newData() d.r = rw d.w = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := rw.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriteSeeker returns a new callback based data buffer @@ -137,12 +144,8 @@ func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { d.r = rw d.w = rw d.s = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // Close releases any resources associated with the data buffer @@ -151,7 +154,7 @@ func (d *Data) Close() error { return nil } if d.cbc > 0 { - callbackDelete(d.cbc) + d.cbc.Delete() } _, err := C.gpgme_data_release(d.dh) runtime.KeepAlive(d) @@ -160,24 +163,42 @@ func (d *Data) Close() error { } func (d *Data) Write(p []byte) (int, error) { - n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil } func (d *Data) Read(p []byte) (int, error) { - n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil @@ -186,7 +207,15 @@ func (d *Data) Read(p []byte) (int, error) { func (d *Data) Seek(offset int64, whence int) (int64, error) { n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence)) runtime.KeepAlive(d) - return int64(n), err + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + } + return int64(n), nil } // Name returns the associated filename if any diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c index 00da3ab304..f5b38ce7ed 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.c +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.c @@ -1,13 +1,5 @@ #include "go_gpgme.h" -gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { - return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); -} - -void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { - gpgme_set_passphrase_cb(ctx, cb, (void *)handle); -} - gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { return gpgme_data_seek(dh, offset, whence); } @@ -15,17 +7,17 @@ gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { gpgme_error_t gogpgme_op_assuan_transact_ext( gpgme_ctx_t ctx, char* cmd, - uintptr_t data_h, - uintptr_t inquiry_h, - uintptr_t status_h, + void* data_h, + void* inquiry_h, + void* status_h, gpgme_error_t *operr ){ return gpgme_op_assuan_transact_ext( ctx, cmd, - (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h, - (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h, - (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h, + (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h, + (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h, + (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h, operr ); } diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h index eb3a4ba88b..8eb5fddd9f 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -10,11 +10,9 @@ extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); -extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); -extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence); -extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr); +extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr); extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen ); extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args); diff --git a/vendor/github.com/proglottis/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go index 8f495ddf5d..15af69c865 100644 --- a/vendor/github.com/proglottis/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -7,11 +7,13 @@ package gpgme // #include // #include "go_gpgme.h" import "C" + import ( "fmt" "io" "os" "runtime" + "runtime/cgo" "time" "unsafe" ) @@ -27,7 +29,8 @@ type Callback func(uidHint string, prevWasBad bool, f *os.File) error //export gogpgme_passfunc func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { - c := callbackLookup(uintptr(hook)).(*Context) + h := *(*cgo.Handle)(hook) + c := h.Value().(*Context) go_uid_hint := C.GoString(uid_hint) f := os.NewFile(uintptr(fd), go_uid_hint) defer f.Close() @@ -233,6 +236,17 @@ func SetEngineInfo(proto Protocol, fileName, homeDir string) error { return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) } +func GetDirInfo(what string) string { + cwhat := C.CString(what) + defer C.free(unsafe.Pointer(cwhat)) + + cdir := C.gpgme_get_dirinfo(cwhat) + if cdir == nil { + return "" + } + return C.GoString(cdir) +} + func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { var keys []*Key ctx, err := New() @@ -243,7 +257,7 @@ func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { if err := ctx.KeyListStart(pattern, secretOnly); err != nil { return keys, err } - defer ctx.KeyListEnd() + defer func() { _ = ctx.KeyListEnd() }() for ctx.KeyListNext() { keys = append(keys, ctx.Key) } @@ -268,8 +282,10 @@ func Decrypt(r io.Reader) (*Data, error) { if err != nil { return nil, err } - err = ctx.Decrypt(cipher, plain) - plain.Seek(0, SeekSet) + if err := ctx.Decrypt(cipher, plain); err != nil { + return nil, err + } + _, err = plain.Seek(0, SeekSet) return plain, err } @@ -278,7 +294,7 @@ type Context struct { KeyError error callback Callback - cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C } @@ -295,7 +311,7 @@ func (c *Context) Release() { return } if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } C.gpgme_release(c.ctx) runtime.KeepAlive(c) @@ -364,15 +380,14 @@ func (c *Context) SetCallback(callback Callback) error { var err error c.callback = callback if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } if callback != nil { - cbc := callbackAdd(c) - c.cbc = cbc - _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) + c.cbc = cgo.NewHandle(c) + _, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc)) } else { c.cbc = 0 - _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) + _, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil) } runtime.KeepAlive(c) return err @@ -564,9 +579,11 @@ func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { return err } -type AssuanDataCallback func(data []byte) error -type AssuanInquireCallback func(name, args string) error -type AssuanStatusCallback func(status, args string) error +type ( + AssuanDataCallback func(data []byte) error + AssuanInquireCallback func(name, args string) error + AssuanStatusCallback func(status, args string) error +) // AssuanSend sends a raw Assuan command to gpg-agent func (c *Context) AssuanSend( @@ -577,17 +594,17 @@ func (c *Context) AssuanSend( ) error { var operr C.gpgme_error_t - dataPtr := callbackAdd(&data) - inquiryPtr := callbackAdd(&inquiry) - statusPtr := callbackAdd(&status) + dataPtr := cgo.NewHandle(&data) + inquiryPtr := cgo.NewHandle(&inquiry) + statusPtr := cgo.NewHandle(&status) cmdCStr := C.CString(cmd) defer C.free(unsafe.Pointer(cmdCStr)) err := C.gogpgme_op_assuan_transact_ext( c.ctx, cmdCStr, - C.uintptr_t(dataPtr), - C.uintptr_t(inquiryPtr), - C.uintptr_t(statusPtr), + unsafe.Pointer(&dataPtr), + unsafe.Pointer(&inquiryPtr), + unsafe.Pointer(&statusPtr), &operr, ) runtime.KeepAlive(c) @@ -600,11 +617,14 @@ func (c *Context) AssuanSend( //export gogpgme_assuan_data_callback func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t { - c := callbackLookup(uintptr(handle)).(*AssuanDataCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanDataCallback) if *c == nil { return 0 } - (*c)(C.GoBytes(data, C.int(datalen))) + if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -612,11 +632,14 @@ func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, da func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t { name := C.GoString(cName) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanInquireCallback) if *c == nil { return 0 } - (*c)(name, args) + if err := (*c)(name, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -624,11 +647,14 @@ func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t { status := C.GoString(cStatus) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanStatusCallback) if *c == nil { return 0 } - (*c)(status, args) + if err := (*c)(status, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } diff --git a/vendor/github.com/proglottis/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go index 986aca59f6..8add8ec877 100644 --- a/vendor/github.com/proglottis/gpgme/unset_agent_info.go +++ b/vendor/github.com/proglottis/gpgme/unset_agent_info.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package gpgme diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index dfc1f0c0e8..6714b3488e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL) fmt.Fprintf(i.GetOutput(), "Enter verification code: ") var code string - fmt.Fscanf(i.GetInput(), "%s", &code) + _, _ = fmt.Fscanf(i.GetInput(), "%s", &code) // New line in case read input doesn't move cursor to next line. fmt.Fprintln(i.GetOutput()) return code diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 91dd430c1c..6fc80512fd 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -23,21 +23,19 @@ var errAlignmentOverflow = errors.New("integer overflow when calculating alignme // nextAligned finds the next offset that satisfies alignment. func nextAligned(offset int64, alignment int) (int64, error) { - align64 := uint64(alignment) - offset64 := uint64(offset) + align64 := int64(alignment) - if align64 <= 0 || offset64%align64 == 0 { + if align64 <= 0 || offset%align64 == 0 { return offset, nil } - offset64 += (align64 - offset64%align64) + align64 -= offset % align64 - if offset64 > math.MaxInt64 { + if (math.MaxInt64 - offset) < align64 { return 0, errAlignmentOverflow } - //nolint:gosec // Overflow handled above. - return int64(offset64), nil + return offset + align64, nil } // writeDataObjectAt writes the data object described by di to ws, using time t, recording details diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c204..48dbb9d84c 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/modules.txt b/vendor/modules.txt index a08736dbde..3a41864553 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -121,8 +121,8 @@ github.com/containerd/log # github.com/containerd/platforms v0.2.1 ## explicit; go 1.20 github.com/containerd/platforms -# github.com/containerd/stargz-snapshotter/estargz v0.15.1 -## explicit; go 1.19 +# github.com/containerd/stargz-snapshotter/estargz v0.16.2 +## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/typeurl/v2 v2.2.0 @@ -247,8 +247,8 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.8.0 ## explicit; go 1.22.0 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.33.0 -## explicit; go 1.22.6 +# github.com/containers/image/v5 v5.33.1-0.20241128201909-5263462aa97a +## explicit; go 1.22.8 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -358,7 +358,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.56.0 +# github.com/containers/storage v1.56.1-0.20241127184055-88a8fd8ddb47 ## explicit; go 1.22.0 github.com/containers/storage github.com/containers/storage/drivers @@ -464,7 +464,6 @@ github.com/distribution/reference ## explicit github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 -github.com/docker/distribution/registry/client/auth/challenge # github.com/docker/docker v27.3.1+incompatible ## explicit github.com/docker/docker/api @@ -961,8 +960,8 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/proglottis/gpgme v0.1.3 -## explicit; go 1.11 +# github.com/proglottis/gpgme v0.1.4 +## explicit; go 1.17 github.com/proglottis/gpgme # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 @@ -1012,8 +1011,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey github.com/sigstore/rekor/pkg/generated/client/tlog github.com/sigstore/rekor/pkg/generated/models github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.9 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/oauth github.com/sigstore/sigstore/pkg/oauthflow @@ -1044,8 +1043,8 @@ github.com/stefanberger/go-pkcs11uri github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.19.1 -## explicit; go 1.22.5 +# github.com/sylabs/sif/v2 v2.20.0 +## explicit; go 1.22.8 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -1222,7 +1221,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.23.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal