From 7ad72a59e36c79e5df09de1777fa60be995a5195 Mon Sep 17 00:00:00 2001 From: Aditya R Date: Wed, 1 Nov 2023 23:30:32 +0530 Subject: [PATCH] vendor: bump c/image to 373c52a9466f [NO NEW TESTS NEEDED] Signed-off-by: Aditya R --- go.mod | 27 +- go.sum | 61 ++- .../stargz-snapshotter/estargz/build.go | 3 +- .../containers/image/v5/copy/encryption.go | 2 +- .../containers/image/v5/copy/multiple.go | 2 +- .../containers/image/v5/copy/single.go | 11 +- .../image/v5/docker/daemon/daemon_dest.go | 33 +- .../image/v5/docker/distribution_error.go | 3 +- .../image/v5/docker/docker_client.go | 5 + .../image/v5/docker/docker_image_dest.go | 65 ++- .../containers/image/v5/image/unparsed.go | 22 + .../image/v5/internal/blobinfocache/types.go | 9 +- .../containers/image/v5/internal/image/oci.go | 57 ++- .../internal/manifest/docker_schema2_list.go | 4 +- .../image/v5/internal/manifest/oci_index.go | 6 +- .../image/v5/oci/archive/oci_src.go | 12 + .../image/v5/oci/archive/oci_transport.go | 19 +- .../image/v5/oci/layout/oci_delete.go | 240 +++++++++++ .../image/v5/oci/layout/oci_dest.go | 15 +- .../containers/image/v5/oci/layout/oci_src.go | 2 +- .../image/v5/oci/layout/oci_transport.go | 50 ++- .../internal/prioritize/prioritize.go | 56 ++- .../v5/pkg/blobinfocache/memory/memory.go | 54 ++- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 48 ++- .../image/v5/pkg/shortnames/shortnames.go | 3 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 2 +- .../image/v5/storage/storage_reference.go | 27 ++ .../image/v5/storage/storage_transport.go | 29 ++ .../containers/image/v5/types/types.go | 4 +- .../containers/image/v5/version/version.go | 4 +- .../github.com/containers/ocicrypt/Makefile | 1 + .../keywrap/pkcs11/keywrapper_pkcs11.go | 6 +- .../containers/ocicrypt/spec/spec.go | 10 +- .../github.com/containers/storage/.cirrus.yml | 6 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/drivers/driver.go | 3 + .../storage/drivers/overlay/check.go | 33 ++ .../drivers/overlay/composefs_notsupported.go | 6 +- .../drivers/overlay/composefs_supported.go | 54 ++- .../storage/drivers/overlay/mount.go | 21 +- .../storage/drivers/overlay/overlay.go | 95 +++-- .../github.com/containers/storage/layers.go | 20 +- .../containers/storage/pkg/archive/archive.go | 19 +- .../storage/pkg/chunked/cache_linux.go | 5 + .../storage/pkg/chunked/dump/dump.go | 230 ++++++++++ .../storage/pkg/chunked/storage_linux.go | 22 +- .../storage/pkg/loopback/attach_loopback.go | 18 +- .../containers/storage/pkg/system/rm.go | 6 +- .../containers/storage/storage.conf | 5 +- vendor/github.com/containers/storage/store.go | 25 +- .../containers/storage/types/options.go | 5 +- .../klauspost/compress/.goreleaser.yml | 20 +- .../github.com/klauspost/compress/README.md | 19 + .../klauspost/compress/flate/deflate.go | 29 ++ .../klauspost/compress/flate/fast_encoder.go | 23 - .../klauspost/compress/flate/inflate.go | 66 ++- .../klauspost/compress/flate/inflate_gen.go | 34 +- .../klauspost/compress/flate/level5.go | 398 ++++++++++++++++++ .../compress/flate/matchlen_amd64.go | 16 + .../klauspost/compress/flate/matchlen_amd64.s | 68 +++ .../compress/flate/matchlen_generic.go | 33 ++ .../klauspost/compress/fse/bitwriter.go | 3 +- .../klauspost/compress/fse/compress.go | 3 +- .../klauspost/compress/huff0/bitwriter.go | 3 +- .../klauspost/compress/huff0/compress.go | 20 +- .../klauspost/compress/zstd/bitreader.go | 34 +- .../klauspost/compress/zstd/bitwriter.go | 3 +- .../klauspost/compress/zstd/blockenc.go | 29 +- .../klauspost/compress/zstd/dict.go | 379 ++++++++++++++++- .../klauspost/compress/zstd/enc_best.go | 11 +- .../klauspost/compress/zstd/encoder.go | 13 +- .../klauspost/compress/zstd/frameenc.go | 4 +- .../klauspost/compress/zstd/seqdec.go | 17 +- .../klauspost/compress/zstd/seqdec_amd64.s | 128 +++--- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/snappy.go | 5 +- .../sigstore/pkg/cryptoutils/privatekey.go | 2 +- .../sigstore/pkg/signature/payload/payload.go | 2 +- .../sylabs/sif/v2/pkg/sif/create.go | 126 +++--- .../theupdateframework/go-tuf/LICENSE | 27 -- .../go-tuf/encrypted/encrypted.go | 226 ---------- vendor/github.com/vbauerster/mpb/v8/bar.go | 33 +- .../vbauerster/mpb/v8/container_option.go | 6 +- .../github.com/vbauerster/mpb/v8/progress.go | 78 ++-- .../x/tools/cmd/stringer/stringer.go | 5 +- vendor/golang.org/x/tools/go/packages/doc.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 7 +- .../x/tools/go/packages/packages.go | 3 +- .../x/tools/internal/gcimporter/gcimporter.go | 3 +- .../x/tools/internal/typeparams/coretype.go | 8 +- .../x/tools/internal/typeparams/termlist.go | 2 +- .../x/tools/internal/typeparams/typeterm.go | 9 +- vendor/modules.txt | 34 +- 93 files changed, 2475 insertions(+), 925 deletions(-) create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_delete.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/dump/dump.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go delete mode 100644 vendor/github.com/theupdateframework/go-tuf/LICENSE delete mode 100644 vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go diff --git a/go.mod b/go.mod index c9c904176da..f4260b79e18 100644 --- a/go.mod +++ b/go.mod @@ -7,10 +7,10 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.3.0 github.com/containers/common v0.56.1-0.20231027162335-3c7f68ff6cc0 - github.com/containers/image/v5 v5.28.0 + github.com/containers/image/v5 v5.28.1-0.20231101173728-373c52a9466f github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b - github.com/containers/ocicrypt v1.1.8 - github.com/containers/storage v1.50.2 + github.com/containers/ocicrypt v1.1.9 + github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb github.com/cyphar/filepath-securejoin v0.2.4 github.com/docker/distribution v2.8.3+incompatible github.com/docker/docker v24.0.7+incompatible @@ -55,7 +55,7 @@ require ( github.com/container-orchestrated-devices/container-device-interface v0.6.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -88,7 +88,7 @@ require ( github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -113,28 +113,27 @@ require ( github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect - github.com/sigstore/fulcio v1.4.0 // indirect + github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.7.3 // indirect + github.com/sigstore/sigstore v1.7.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.13.0 // indirect + github.com/sylabs/sif/v2 v2.15.0 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect - github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.6.1 // indirect + github.com/vbauerster/mpb/v8 v8.6.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + golang.org/x/tools v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect diff --git a/go.sum b/go.sum index 0f5aaf44f0d..44fb0d6a035 100644 --- a/go.sum +++ b/go.sum @@ -48,24 +48,24 @@ github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48K github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= +github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= github.com/containers/common v0.56.1-0.20231027162335-3c7f68ff6cc0 h1:ojuof1NnYcvcj7K3c8J17ov085TG3RoezvVKzfpy8Vw= github.com/containers/common v0.56.1-0.20231027162335-3c7f68ff6cc0/go.mod h1:oGJwX+LF9QLUhT5dClaiaFRga1I9g/w/k8+dDrTs0Ws= -github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048= -github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU= +github.com/containers/image/v5 v5.28.1-0.20231101173728-373c52a9466f h1:x79xiC/Zs7yRzCWCT/fuf8J8LALTzVHzGT9T0HEx9FQ= +github.com/containers/image/v5 v5.28.1-0.20231101173728-373c52a9466f/go.mod h1:7+h9aIQgB6YzWxFzKAAYQ0CQZS0ks/bc+FMZQTJFoN8= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b h1:8XvNAm+g7ivwPUkyiHvBs7z356JWpK9a0FDaek86+sY= github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b/go.mod h1:menB9p4o5HckgcLW6cO0+dl6+axkVmSqKlrNcratsh4= -github.com/containers/ocicrypt v1.1.8 h1:saSBF0/8DyPUjzcxMVzL2OBUWCkvRvqIm75pu0ADSZk= -github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34= -github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE= -github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA= +github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= +github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= +github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb h1:g1IJUHmHZuHa1YPvIiYjWrhysb+qEiiImA8p8mENhiE= +github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb/go.mod h1:LpKczONfqahkVHFdZGPUg/xYZVjd/qqisRu0TkO4u8k= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -245,8 +245,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -334,6 +334,7 @@ github.com/openshift/imagebuilder v1.2.5 h1:dby0N3FTouXSBgWNf+gfTkj36fAb8g4iL/SR github.com/openshift/imagebuilder v1.2.5/go.mod h1:bF4w79W8nM+jH1QkAiHSUVaqHkMBJGijafZxCJEHH5o= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -343,11 +344,11 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -362,12 +363,12 @@ github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9g github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8= -github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI= +github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= +github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU= -github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw= +github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48= +github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -395,14 +396,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/sylabs/sif/v2 v2.13.0 h1:dK/PQ/ohLAA4hptbjNuU0qoqkJ9Kl07hiSHArMNSKsQ= -github.com/sylabs/sif/v2 v2.13.0/go.mod h1:qEFrmE29XNbW2uyBagTsw9dgM82MwsckNYUFPweF2ek= +github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= +github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= -github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -411,8 +410,8 @@ github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8= -github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0= +github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA= +github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -453,15 +452,15 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -543,8 +542,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -554,8 +553,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index b071cea51dd..6aba0ef1f69 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -436,9 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err != nil { if err == io.EOF { break - } else { - return nil, fmt.Errorf("failed to parse tar file, %w", err) } + return nil, fmt.Errorf("failed to parse tar file, %w", err) } switch cleanEntryName(h.Name) { case PrefetchLandmark, NoPrefetchLandmark: diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index b406b0c316c..1305676d7ad 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt } } -// bpdData contains data that the copy pipeline needs about the encryption step. +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. type bpEncryptionStepData struct { encrypting bool // We are actually encrypting the stream finalizer ocicrypt.EncryptLayerFinalizer diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 30f6da25112..f252e3476f4 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, if err != nil { return nil, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) c.Printf("Storing list signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 37b1bfe987a..67ca43f7bcf 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -277,7 +277,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar if err != nil { return copySingleImageResult{}, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) if len(sigs) > 0 { c.Printf("Storing signatures\n") @@ -380,8 +380,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - compression := compressionAlgorithmFromMIMEType(srcInfo) - compressionAlgos.Add(compression.Name()) + if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil { + compressionAlgos.Add(c.Name()) + } } algos, err := algorithmsByNames(compressionAlgos.Values()) @@ -743,7 +744,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { if srcInfo.Size != -1 { - bar.SetRefill(srcInfo.Size - bar.Current()) + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) } bar.mark100PercentComplete() hideProgressBar = false diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 59e02462f0c..55431db13aa 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -2,6 +2,7 @@ package daemon import ( "context" + "encoding/json" "errors" "fmt" "io" @@ -85,12 +86,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe } }() + err = imageLoad(ctx, c, reader) +} + +// imageLoad accepts tar stream on reader and sends it to c +func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error { resp, err := c.ImageLoad(ctx, reader, true) if err != nil { - err = fmt.Errorf("saving image to docker engine: %w", err) - return + return fmt.Errorf("starting a load operation in docker engine: %w", err) } defer resp.Body.Close() + + // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage, + // copied here to minimize dependencies. + type jsonError struct { + Message string `json:"message,omitempty"` + } + type jsonMessage struct { + Error *jsonError `json:"errorDetail,omitempty"` + } + + dec := json.NewDecoder(resp.Body) + for { + var msg jsonMessage + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("parsing docker load progress: %w", err) + } + if msg.Error != nil { + return fmt.Errorf("docker engine reported: %s", msg.Error.Message) + } + } + return nil // No error reported = success } // DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 0fe915249b7..11b42c6e003 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -24,6 +24,7 @@ import ( "github.com/docker/distribution/registry/api/errcode" dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" + "golang.org/x/exp/slices" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -105,7 +106,7 @@ func makeErrorList(err error) []error { } func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) + return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...)) } // handleErrorResponse returns error parsed from HTTP response for an diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 288dd1a93f1..6ce8f700838 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -363,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima hostname := registry if registry == dockerHostname { hostname = dockerV1Hostname + // A search term of library/foo does not find the library/foo image on the docker.io servers, + // which is surprising - and that Docker is modifying the search term client-side this same way, + // and it seems convenient to do the same thing. + // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334 + image = strings.TrimPrefix(image, "library/") } client, err := newDockerClient(sys, hostname, registry) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 0e7b154cc6e..774068c2769 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". - if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests { + if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests { logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) if err != nil { @@ -341,39 +341,58 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // Then try reusing blobs from other locations. candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) for _, candidate := range candidates { - candidateRepo, err := parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } + var err error compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) if err != nil { logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) continue } + var candidateRepo reference.Named + if !candidate.UnknownLocation { + candidateRepo, err = parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + } if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { requiredCompression := "nil" if compressionAlgorithm != nil { requiredCompression = compressionAlgorithm.Name() } - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + if !candidate.UnknownLocation { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + } else { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression) + } continue } - if candidate.CompressorName != blobinfocache.Uncompressed { - logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + if !candidate.UnknownLocation { + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) + } + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo + // (the "from" parameter); in that case we might try to use these candidates as well. + // + // OTOH that would mean we can’t do the “blobExists” check, and if there is no match + // we could get an upload request that we would have to cancel. + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } } else { - logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) - } - - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo - // (the "from" parameter); in that case we might try to use these candidates as well. - // - // OTOH that would mean we can’t do the “blobExists” check, and if there is no match - // we could get an upload request that we would have to cancel. - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) + } + // This digest is a known variant of this blob but we don’t + // have a recorded location in this registry, let’s try looking + // for it in the current repo. + candidateRepo = reference.TrimNamed(d.ref.ref) } if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") @@ -688,6 +707,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. } } + // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it. + ociManifest.Layers = slices.Clone(ociManifest.Layers) + // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to + // the slice in the original object (or in a newly allocated object). for _, sig := range signatures { mimeType := sig.UntrustedMIMEType() payloadBlob := sig.UntrustedPayload() diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go index 123f6ce6f1c..f2ebb929a2f 100644 --- a/vendor/github.com/containers/image/v5/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -2,6 +2,8 @@ package image import ( "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -17,3 +19,23 @@ type UnparsedImage = image.UnparsedImage func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { return image.UnparsedInstance(src, instanceDigest) } + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index fdd245812bf..429d6826355 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -32,7 +32,7 @@ type BlobInfoCache2 interface { // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) - // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // @@ -46,7 +46,8 @@ type BlobInfoCache2 interface { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression - Location types.BICLocationReference + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index 6629967be94..df0e8e41716 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -196,14 +196,12 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti return m.convertToManifestSchema2(ctx, options) } -// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions. +// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format. // If not, it returns (nil, nil). // If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, // and edits *options to not try decryption again. -func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { - if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool { - return info.CryptoOperation == types.Decrypt - }) { +func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { + if options == nil || options.LayerInfos == nil { return nil, nil } @@ -212,19 +210,35 @@ func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.Manife return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) } - res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate. - updatedEdits := slices.Clone(options.LayerInfos) - for i, info := range options.LayerInfos { - if info.CryptoOperation == types.Decrypt { - res[i].CryptoOperation = types.Decrypt - updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate. + laterEdits := slices.Clone(options.LayerInfos) + needsOCIOnlyEdits := false + for i, edit := range options.LayerInfos { + // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. + ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal + ociOnlyEdits[i].CompressionAlgorithm = nil + + if edit.CryptoOperation == types.Decrypt { + needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas + ociOnlyEdits[i].CryptoOperation = types.Decrypt + laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + } + + if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd || + originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas. + ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation + ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm + laterEdits[i].CompressionOperation = types.PreserveOriginal + laterEdits[i].CompressionAlgorithm = nil } - // Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. - res[i].CompressionOperation = types.PreserveOriginal - res[i].CompressionAlgorithm = nil } - options.LayerInfos = updatedEdits - return res, nil + if !needsOCIOnlyEdits { + return nil, nil + } + + options.LayerInfos = laterEdits + return ociOnlyEdits, nil } // convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. @@ -238,15 +252,15 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits // which remove OCI-specific features, because trying to convert those layers would fail. - // So, do the layer updates for decryption. + // So, do the layer updates for decryption, and for conversions from Zstd. ociManifest := m.m - layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options) + ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options) if err != nil { return nil, err } - if layerDecryptEdits != nil { + if ociOnlyEdits != nil { ociManifest = manifest.OCI1Clone(ociManifest) - if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil { + if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil { return nil, err } } @@ -275,9 +289,8 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - // FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, - ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc: + ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 357e2f3d3e3..7ce5bb0696e 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -133,7 +133,9 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure a private backing array; + // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } return nil } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index dcd2646d138..d8d06513b5f 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure the slice uses a private backing array; + // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } if len(addedEntries) != 0 || updatedAnnotations { slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { @@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip case ic.manifestPosition != other.manifestPosition: return ic.manifestPosition < other.manifestPosition } - panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. + panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. } // chooseInstance is a private equivalent to ChooseInstanceByCompression, diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go index 6c9ee334027..ee8409896cb 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string { return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) } +// ArchiveFileNotFoundError occurs when the archive file does not exist. +type ArchiveFileNotFoundError struct { + // ref is the image reference + ref ociArchiveReference + // path is the file path that was not present + path string +} + +func (e ArchiveFileNotFoundError) Error() string { + return fmt.Sprintf("archive file not found: %q", e.path) +} + type ociArchiveImageSource struct { impl.Compat diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 2a03feeeaca..d5fee36310f 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "strings" @@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) // creates the temporary directory and copies the tarred content to it func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + src := ref.resolvedFile + arch, err := os.Open(src) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src} + } else { + return tempDirOCIRef{}, err + } + } + defer arch.Close() + tempDirRef, err := createOCIRef(sys, ref.image) if err != nil { return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err) } - src := ref.resolvedFile dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - arch, err := os.Open(src) - if err != nil { - return tempDirOCIRef{}, err - } - defer arch.Close() if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { if err := tempDirRef.deleteTempDir(); err != nil { return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go new file mode 100644 index 00000000000..8dd54f255ae --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -0,0 +1,240 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + var blobsUsedByImage map[digest.Digest]int + + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir) + case imgspecv1.MediaTypeImageIndex: + blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir) + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + if err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + manifest, err := ref.getManifest(descriptor, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest) + blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference + + return blobsUsedInManifest, nil +} + +func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + index, err := parseIndex(blobPath) + if err != nil { + return nil, err + } + + blobsUsedInImageRefIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference + + return blobsUsedInImageRefIndex, nil +} + +// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map +func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + destination[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := ref.getManifest(&descriptor, sharedBlobsDir) + if err != nil { + return err + } + for digest, count := range ref.getBlobsUsedInManifest(manifest) { + destination[digest] += count + } + case imgspecv1.MediaTypeImageIndex: + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + index, err := parseIndex(blobPath) + if err != nil { + return err + } + err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + } + + return nil +} + +func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int { + blobsUsedInManifest := make(map[digest.Digest]int, 0) + + blobsUsedInManifest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + blobsUsedInManifest[layer.Digest]++ + } + + return blobsUsedInManifest +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for _, digest := range blobsToDelete.Values() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) error { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + defer file.Close() + + return json.NewEncoder(file).Encode(content) +} + +func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) { + manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + + manifest, err := parseJSON[imgspecv1.Manifest](manifestPath) + if err != nil { + return nil, err + } + + return manifest, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 8ff43d4480f..100d16763f4 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -19,6 +19,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) type ociImageDestination struct { @@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, // but it MAY be empty (e.g. if we never end up calling PutBlob) // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { return nil, err } return d, nil @@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { return } } - // It's a new entry to be added to the index. - d.index.Manifests = append(d.index.Manifests, *desc) + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 6b423f3b05d..f5f1debc9fe 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo client := &http.Client{} client.Transport = tr - descriptor, err := ref.getManifestDescriptor() + descriptor, _, err := ref.getManifestDescriptor() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 6586b84402e..1e26dc52443 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together // with an error. func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) if err != nil { return nil, err } - defer indexJSON.Close() + defer content.Close() - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { return nil, err } - return index, nil + return obj, nil } -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { index, err := ref.getIndex() if err != nil { - return imgspecv1.Descriptor{}, err + return imgspecv1.Descriptor{}, -1, err } if ref.image == "" { // return manifest if only one image is in the oci directory if len(index.Manifests) != 1 { // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, ErrMoreThanOneImage + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage } - return index.Manifests[0], nil + return index.Manifests[0], 0, nil } else { // if image specified, look through all manifests for a match var unsupportedMIMETypes []string - for _, md := range index.Manifests { + for i, md := range index.Manifests { if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { - return md, nil + return md, i, nil } unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) } } if len(unsupportedMIMETypes) != 0 { - return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) } } - return imgspecv1.Descriptor{}, ImageNotFoundError{ref} + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name @@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, if !ok { return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") } - return ociRef.getManifestDescriptor() + md, _, err := ociRef.getManifestDescriptor() + return md, err } // NewImageSource returns a types.ImageSource for this reference. @@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst return newImageDestination(sys, ref) } -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.New("Deleting images not implemented for oci: images") -} - // ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) } // indexPath returns a path for the index.json within a directory using OCI conventions. func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. @@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st if err := digest.Validate(); err != nil { return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) } - blobDir := filepath.Join(ref.dir, "blobs") + var blobDir string if sharedBlobDir != "" { blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) } return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index bc9315f6ef6..97562687c86 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -10,15 +10,20 @@ import ( "github.com/opencontainers/go-digest" ) -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, // and therefore ultimately by types.BlobInfoCache.CandidateLocations. // This is a heuristic/guess, and could well use a different value. const replacementAttempts = 5 +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) { css.cs[i], css.cs[j] = css.cs[j], css.cs[i] } -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original +// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them seperately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. @@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest: primaryDigest, uncompressedDigest: uncompressedDigest, }) + for _, candidate := range cs { + if candidate.Candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates))) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].Candidate } - res := make([]blobinfocache.BICReplacementCandidate2, resLength) - for i := range res { - res[i] = cs[i].Candidate + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].Candidate) } return res } // DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 9e5c4256ba2..cfad16b2ec4 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -133,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso mem.compressors[blobDigest] = compressorName } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. v2Output allows including candidates with unknown location, and filters out +// candidates with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime { + compressorName := blobinfocache.UnknownCompression + if v, ok := mem.compressors[digest]; ok { + compressorName = v + } + if compressorName == blobinfocache.UnknownCompression && v2Output { + return candidates + } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - compressorName, compressorKnown := mem.compressors[digest] - if !compressorKnown { - if requireCompressionInfo { - continue - } - compressorName = blobinfocache.UnknownCompression + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, + }, + LastSeen: t, + }) } + } else if v2Output { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: l, + Digest: digest, + CompressorName: compressorName, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, - LastSeen: t, + LastSeen: time.Time{}, }) } return candidates @@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, @@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) } -func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { for _, d := range otherDigests.Values() { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output) } } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 276913d6aba..2b446a61c48 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -57,7 +57,7 @@ type cache struct { // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool. // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily - // the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have // a Close method, so creating a lot of single-use caches could leak data. // @@ -117,7 +117,7 @@ func (sqc *cache) Open() { if sqc.refCount == 0 { db, err := rawOpen(sqc.path) if err != nil { - logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err) + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) db = nil // But still increase sqc.refCount, because a .Close() will happen } sqc.db = db @@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { // dbTransaction calls fn within a read-write transaction in db. func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { - // Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion. + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion. var zeroRes T // A zero value of T @@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. // // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra - // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups) + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) // is probably costlier than comparing a few more bytes of data. // // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without @@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. v2Output allows including candidates with unknown +// location, and filters out candidates with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) { var rows *sql.Rows var err error - if requireCompressionInfo { + if v2Output { rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ "ON KnownLocations.digest = DigestCompressors.digest "+ "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", @@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW } defer rows.Close() + res := []prioritize.CandidateWithTime{} for rows.Next() { var location string var time time.Time @@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Scan(&location, &time, &compressorName); err != nil { return nil, fmt.Errorf("scanning candidate: %w", err) } - candidates = append(candidates, prioritize.CandidateWithTime{ + res = append(res, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ Digest: digest, CompressorName: compressorName, @@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterating through locations: %w", err) } + + if len(res) == 0 && v2Output { + compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) + if err != nil { + return nil, fmt.Errorf("scanning compressorName: %w", err) + } + if found { + res = append(res, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressor, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + LastSeen: time.Time{}, + }) + } + } + candidates = append(candidates, res...) return candidates, nil } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // @@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) } -func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { var uncompressedDigest digest.Digest // = "" res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { res := []prioritize.CandidateWithTime{} - res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo) + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output) if err != nil { return nil, err } @@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types return nil, err } if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output) if err != nil { return nil, err } @@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types } if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index eeb7c1effdb..a15b2b56e19 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/types" "github.com/manifoldco/promptui" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" "golang.org/x/term" ) @@ -169,7 +170,7 @@ func (r *Resolved) Description() string { // pull errors must equal the amount of pull candidates. func (r *Resolved) FormatPullErrors(pullErrors []error) error { if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) { - pullErrors = append(pullErrors, + pullErrors = append(slices.Clone(pullErrors), fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) } diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 56b0d49390a..c6ec84bd5ae 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { if err != nil { return err } - tlsc.Certificates = append(tlsc.Certificates, cert) + tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 49f7d03c851..ba230d1fddc 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" @@ -283,3 +284,29 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, s) } + +// ResolveReference finds the underlying storage image for a storage.Transport reference. +// It returns that image, and an updated reference which can be used to refer back to the _same_ +// image again. +// +// This matters if the input reference contains a tagged name; the destination of the tag can +// move in local storage. The updated reference returned by this function contains the resolved +// image ID, so later uses of that updated reference will either continue to refer to the same +// image, or fail. +// +// Note that it _is_ possible for the later uses to fail, either because the image was removed +// completely, or because the name used in the reference was untaged (even if the underlying image +// ID still exists in local storage). +func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) { + sref, ok := ref.(*storageReference) + if !ok { + return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(), + transports.ImageName(ref)) + } + clone := *sref // A shallow copy we can update + img, err := clone.resolveImage(nil) + if err != nil { + return nil, nil, err + } + return clone, img, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 58ba3ee651e..e9f42dc0a8c 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -48,9 +48,24 @@ type StoreTransport interface { GetStoreIfSet() storage.Store // GetImage retrieves the image from the transport's store that's named // by the reference. + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Use storage.ResolveReference instead. GetImage(types.ImageReference) (*storage.Image, error) // GetStoreImage retrieves the image from a specified store that's named // by the reference. + // + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Also, a StoreTransport reference already contains a store, so providing another one is redundant. + // + // Use storage.ResolveReference instead. GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) // ParseStoreReference parses a reference, overriding any store // specification that it may contain. @@ -290,6 +305,14 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc return s.ParseStoreReference(store, reference) } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Also, a StoreTransport reference already contains a store, so providing another one is redundant. +// +// Use storage.ResolveReference instead. func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() if dref != nil { @@ -306,6 +329,12 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe return nil, storage.ErrImageUnknown } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Use storage.ResolveReference instead. func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { store, err := s.GetStore() if err != nil { diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 33adb5f1dfa..7de93bb37fe 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -445,7 +445,7 @@ type ImageCloser interface { Close() error } -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage type ManifestUpdateOptions struct { LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named @@ -457,7 +457,7 @@ type ManifestUpdateOptions struct { // ManifestUpdateInformation is a component of ManifestUpdateOptions, named here // only to make writing struct literals possible. type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. } diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 27d034dc55d..e93746e569a 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -8,10 +8,10 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 28 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile index dc9d9853754..97ddeefbb9b 100644 --- a/vendor/github.com/containers/ocicrypt/Makefile +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -28,6 +28,7 @@ vendor: go mod tidy test: + go clean -testcache go test ./... -test.v generate-protobuf: diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go index 236764d2338..b9a83c53606 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -41,7 +41,11 @@ func NewKeyWrapper() keywrap.KeyWrapper { // WrapKeys wraps the session key for recpients and encrypts the optsData, which // describe the symmetric key used for encrypting the layer func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...)) + // append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image) + // can't race writing to the same backing array. + pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"]) + pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...) + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go index 8665f6f21c4..c0c171824f7 100644 --- a/vendor/github.com/containers/ocicrypt/spec/spec.go +++ b/vendor/github.com/containers/ocicrypt/spec/spec.go @@ -9,8 +9,12 @@ const ( MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted" // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. + // MediaTypeLayerNonDistributableGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" - // MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. - MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" + // MediaTypeLayerNonDistributableZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. + MediaTypeLayerNonDistributableZstdEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" + // MediaTypeLayerNonDistributableZsdtEnc is MIME type used for non distributable encrypted zstd-compressed layers. + // + // Deprecated: Use [MediaTypeLayerNonDistributableZstdEnc]. + MediaTypeLayerNonDistributableZsdtEnc = MediaTypeLayerNonDistributableZstdEnc ) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 00c69d7f128..c41dd5da2c5 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-38" + FEDORA_NAME: "fedora-39ß" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20230816t191118z-f38f37d13" + IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -113,8 +113,6 @@ debian_testing_task: &debian_testing TEST_DRIVER: "fuse-overlay-whiteout" - env: TEST_DRIVER: "btrfs" - - env: - TEST_DRIVER: "zfs" lint_task: diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 7ef40656729..c3bcae0809f 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.50.2 +1.50.3-dev diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 1fb04dc3edb..ab32d652e7d 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -189,6 +189,9 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // Artifacts is a collection of additional artifacts + // generated by the differ that the storage driver can use. + Artifacts map[string]interface{} } type DifferOutputFormat int diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 60980994b2c..d8139f65665 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -275,3 +275,36 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { }() return true, nil } + +// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system +// that uses data-only layers. +func supportsDataOnlyLayers(home string) (bool, error) { + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerDirDataOnly := filepath.Join(layerDir, "lower-data") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + + opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go index 5cdbcff6c46..347e4d35c49 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go @@ -11,7 +11,7 @@ func composeFsSupported() bool { return false } -func generateComposeFsBlob(toc []byte, composefsDir string) error { +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { return fmt.Errorf("composefs is not supported") } @@ -19,6 +19,6 @@ func mountComposefsBlob(dataDir, mountPoint string) error { return fmt.Errorf("composefs is not supported") } -func enableVerityRecursive(path string) error { - return fmt.Errorf("composefs is not supported") +func enableVerityRecursive(path string) (map[string]string, error) { + return nil, fmt.Errorf("composefs is not supported") } diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go index aaf76913172..26dd36866cc 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go @@ -4,7 +4,6 @@ package overlay import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -16,6 +15,7 @@ import ( "syscall" "unsafe" + "github.com/containers/storage/pkg/chunked/dump" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -29,7 +29,7 @@ var ( func getComposeFsHelper() (string, error) { composeFsHelperOnce.Do(func() { - composeFsHelperPath, composeFsHelperErr = exec.LookPath("composefs-from-json") + composeFsHelperPath, composeFsHelperErr = exec.LookPath("mkcomposefs") }) return composeFsHelperPath, composeFsHelperErr } @@ -53,7 +53,23 @@ func enableVerity(description string, fd int) error { return nil } -func enableVerityRecursive(path string) error { +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +func measureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} + +func enableVerityRecursive(root string) (map[string]string, error) { + digests := make(map[string]string) walkFn := func(path string, d fs.DirEntry, err error) error { if err != nil { return err @@ -71,29 +87,47 @@ func enableVerityRecursive(path string) error { if err := enableVerity(path, int(f.Fd())); err != nil { return err } + + verity, err := measureVerity(path, int(f.Fd())) + if err != nil { + return err + } + + relPath, err := filepath.Rel(root, path) + if err != nil { + return err + } + + digests[relPath] = verity return nil } - return filepath.WalkDir(path, walkFn) + err := filepath.WalkDir(root, walkFn) + return digests, err } func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } -func generateComposeFsBlob(toc []byte, composefsDir string) error { +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { if err := os.MkdirAll(composefsDir, 0o700); err != nil { return err } + dumpReader, err := dump.GenerateDump(toc, verityDigests) + if err != nil { + return err + } + destFile := getComposefsBlob(composefsDir) writerJson, err := getComposeFsHelper() if err != nil { - return fmt.Errorf("failed to find composefs-from-json: %w", err) + return fmt.Errorf("failed to find mkcomposefs: %w", err) } fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) if err != nil { - return fmt.Errorf("failed to open output file: %w", err) + return fmt.Errorf("failed to open output file %q: %w", destFile, err) } outFd := os.NewFile(uintptr(fd), "outFd") @@ -109,10 +143,10 @@ func generateComposeFsBlob(toc []byte, composefsDir string) error { // a scope to close outFd before setting fsverity on the read-only fd. defer outFd.Close() - cmd := exec.Command(writerJson, "--format=erofs", "--out=/proc/self/fd/3", "/proc/self/fd/0") + cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") cmd.ExtraFiles = []*os.File{outFd} cmd.Stderr = os.Stderr - cmd.Stdin = bytes.NewReader(toc) + cmd.Stdin = dumpReader if err := cmd.Run(); err != nil { return fmt.Errorf("failed to convert json to erofs: %w", err) } @@ -166,7 +200,7 @@ func hasACL(path string) (bool, error) { func mountComposefsBlob(dataDir, mountPoint string) error { blobFile := getComposefsBlob(dataDir) - loop, err := loopback.AttachLoopDevice(blobFile) + loop, err := loopback.AttachLoopDeviceRO(blobFile) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 33e60b1189a..8829e55e989 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -141,14 +141,27 @@ func mountOverlayFromMain() { // the new value for the list of lowers, because it's shorter. if lowerv != "" { lowers := strings.Split(lowerv, ":") - for i := range lowers { - lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0) + var newLowers []string + dataOnly := false + for _, lowerPath := range lowers { + if lowerPath == "" { + dataOnly = true + continue + } + lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0) if err != nil { fatal(err) } - lowers[i] = fmt.Sprintf("%d", lowerFd) + var lower string + if dataOnly { + lower = fmt.Sprintf(":%d", lowerFd) + dataOnly = false + } else { + lower = fmt.Sprintf("%d", lowerFd) + } + newLowers = append(newLowers, lower) } - lowerv = strings.Join(lowers, ":") + lowerv = strings.Join(newLowers, ":") } // Reconstruct the Label field. diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 0f6d74021fb..04ecf871fd0 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,7 @@ const ( lowerFile = "lower" maxDepth = 500 - zstdChunkedManifest = "zstd-chunked-manifest" + tocArtifact = "toc" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -1003,8 +1003,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } } if parent != "" { - parentBase, parentImageStore, _ := d.dir2(parent) - if parentImageStore != "" { + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { parentBase = parentImageStore } st, err := system.Stat(filepath.Join(parentBase, "diff")) @@ -1079,12 +1081,13 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } if parent != "" { - parentDir, parentImageStore, _ := d.dir2(parent) - base := parentDir - if parentImageStore != "" { - base = parentImageStore + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { + parentBase = parentImageStore } - st, err := system.Stat(filepath.Join(base, "diff")) + st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err } @@ -1447,7 +1450,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" if len(optsList) == 0 { - optsList = strings.Split(d.options.mountOptions, ",") + if d.options.mountOptions != "" { + optsList = strings.Split(d.options.mountOptions, ",") + } } else { // If metacopy=on is present in d.options.mountOptions it must be present in the mount // options otherwise the kernel refuses to follow the metacopy xattr. @@ -1524,15 +1529,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO defer cleanupFunc() } - composefsLayers := filepath.Join(workDirBase, "composefs-layers") - if err := os.MkdirAll(composefsLayers, 0o700); err != nil { - return "", err - } - skipIDMappingLayers := make(map[string]string) - composeFsLayers := []string{} - composefsMounts := []string{} defer func() { for _, m := range composefsMounts { @@ -1540,7 +1538,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - maybeAddComposefsMount := func(lowerID string, i int) (string, error) { + composeFsLayers := []string{} + composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) _, err = os.Stat(composefsBlob) if err != nil { @@ -1551,7 +1551,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID) - dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i)) + if readWrite && i == 0 { + return "", fmt.Errorf("cannot mount a composefs layer as writeable") + } + + dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i)) if err := os.MkdirAll(dest, 0o700); err != nil { return "", err } @@ -1571,7 +1575,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO diffDir := path.Join(workDirBase, "diff") - if dest, err := maybeAddComposefsMount(id, 0); err != nil { + if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { return "", err } else if dest != "" { diffDir = dest @@ -1623,7 +1627,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", err } lowerID := filepath.Base(filepath.Dir(linkContent)) - composefsMount, err := maybeAddComposefsMount(lowerID, i+1) + composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite) if err != nil { return "", err } @@ -1655,8 +1659,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - absLowers = append(absLowers, composeFsLayers...) - if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) } @@ -1750,11 +1752,20 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = newAbsDir } + lowerDirs := strings.Join(absLowers, ":") + if len(composeFsLayers) > 0 { + composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::") + lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs + } + // absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent + // its usage. + absLowers = nil //nolint:ineffassign + var opts string if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) } if len(optsList) > 0 { opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) @@ -1798,9 +1809,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if readWrite { diffDir := path.Join(id, "diff") workDir := path.Join(id, "work") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir) } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) } if len(optsList) > 0 { opts = strings.Join(append([]string{opts}, optsList...), ",") @@ -2007,11 +2018,34 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { return os.RemoveAll(stagingDirectory) } +func (d *Driver) supportsDataOnlyLayers() (bool, error) { + feature := "dataonly-layers" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsDataOnly, err := supportsDataOnlyLayers(d.home) + if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil { + return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2) + } + return supportsDataOnly, err +} + func (d *Driver) useComposeFs() bool { if !composeFsSupported() || unshare.IsRootless() { return false } - return true + supportsDataOnlyLayers, err := d.supportsDataOnlyLayers() + if err != nil { + logrus.Debugf("Check for data-only layers failed with: %v", err) + return false + } + return supportsDataOnlyLayers } // ApplyDiff applies the changes in the new layer using the specified function @@ -2074,11 +2108,12 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri if d.useComposeFs() { // FIXME: move this logic into the differ so we don't have to open // the file twice. - if err := enableVerityRecursive(stagingDirectory); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + verityDigests, err := enableVerityRecursive(stagingDirectory) + if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } - toc := diffOutput.BigData[zstdChunkedManifest] - if err := generateComposeFsBlob(toc, d.getComposefsData(id)); err != nil { + toc := diffOutput.Artifacts[tocArtifact] + if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index e5835757f6a..d105e73f6cd 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -1245,8 +1245,8 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if parentLayer != nil { parent = parentLayer.ID } - var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings var ( + templateIDMappings *idtools.IDMappings templateMetadata string templateCompressedDigest digest.Digest templateCompressedSize int64 @@ -1274,11 +1274,6 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } else { templateIDMappings = &idtools.IDMappings{} } - if parentLayer != nil { - parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) - } else { - parentMappings = &idtools.IDMappings{} - } if mountLabel != "" { selinux.ReserveLabel(mountLabel) } @@ -1353,6 +1348,12 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount IDMappings: idMappings, } + var parentMappings, oldMappings *idtools.IDMappings + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } if moreOptions.TemplateLayer != "" { if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer) @@ -1371,10 +1372,13 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err) } } - oldMappings = parentMappings + if parentLayer != nil { + oldMappings = parentMappings + } } - if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { + if oldMappings != nil && + (!reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs())) { if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 29f800b2af5..05d25711827 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -955,14 +955,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err if options.ForceMask != nil { // if ForceMask is in place, make sure lchown is disabled. doChown = false - uid, gid, mode, err := GetFileOwner(dest) - if err == nil { - value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { - return err - } - } } + var rootHdr *tar.Header // Iterate through the files in the archive. loop: @@ -1007,6 +1001,9 @@ loop: if err != nil { return err } + if rel == "." { + rootHdr = hdr + } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } @@ -1080,6 +1077,14 @@ loop: return err } } + + if options.ForceMask != nil && rootHdr != nil { + value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode) + if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 56c30e2675c..5d4befc2348 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -26,6 +26,8 @@ import ( const ( cacheKey = "chunked-manifest-cache" cacheVersion = 1 + + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) type metadata struct { @@ -650,6 +652,9 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter.Skip() } } + if m.Type == TypeReg && m.Size == 0 && m.Digest == "" { + m.Digest = digestSha256Empty + } toc.Entries = append(toc.Entries, m) } break diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go new file mode 100644 index 00000000000..a08928034ad --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -0,0 +1,230 @@ +package dump + +import ( + "bufio" + "fmt" + "io" + "strings" + "time" + "unicode" + + "github.com/containers/storage/pkg/chunked/internal" + "golang.org/x/sys/unix" +) + +const ( + ESCAPE_STANDARD = 0 + NOESCAPE_SPACE = 1 << iota + ESCAPE_EQUAL + ESCAPE_LONE_DASH +) + +func escaped(val string, escape int) string { + noescapeSpace := escape&NOESCAPE_SPACE != 0 + escapeEqual := escape&ESCAPE_EQUAL != 0 + escapeLoneDash := escape&ESCAPE_LONE_DASH != 0 + + length := len(val) + + if escapeLoneDash && val == "-" { + return fmt.Sprintf("\\x%.2x", val[0]) + } + + var result string + for i := 0; i < length; i++ { + c := val[i] + hexEscape := false + var special string + + switch c { + case '\\': + special = "\\\\" + case '\n': + special = "\\n" + case '\r': + special = "\\r" + case '\t': + special = "\\t" + case '=': + hexEscape = escapeEqual + default: + if noescapeSpace { + hexEscape = !unicode.IsPrint(rune(c)) + } else { + hexEscape = !unicode.IsGraphic(rune(c)) + } + } + + if special != "" { + result += special + } else if hexEscape { + result += fmt.Sprintf("\\x%.2x", c) + } else { + result += string(c) + } + } + return result +} + +func escapedOptional(val string, escape int) string { + if val == "" { + return "-" + } + return escaped(val, escape) +} + +func getStMode(mode uint32, typ string) (uint32, error) { + switch typ { + case internal.TypeReg, internal.TypeLink: + mode |= unix.S_IFREG + case internal.TypeChar: + mode |= unix.S_IFCHR + case internal.TypeBlock: + mode |= unix.S_IFBLK + case internal.TypeDir: + mode |= unix.S_IFDIR + case internal.TypeFifo: + mode |= unix.S_IFIFO + case internal.TypeSymlink: + mode |= unix.S_IFLNK + default: + return 0, fmt.Errorf("unknown type %s", typ) + } + return mode, nil +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := entry.Name + if path == "" { + path = "/" + } else if path[0] != '/' { + path = "/" + path + } + + if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { + return err + } + + nlinks := links[entry.Name] + links[entry.Linkname] + 1 + link := "" + if entry.Type == internal.TypeLink { + link = "@" + } + + rdev := unix.Mkdev(uint32(entry.Devmajor), uint32(entry.Devminor)) + + entryTime := entry.ModTime + if entryTime == nil { + t := time.Unix(0, 0) + entryTime = &t + } + + mode, err := getStMode(uint32(entry.Mode), entry.Type) + if err != nil { + return err + } + + if _, err := fmt.Fprintf(out, " %d %s%o %d %d %d %d %d.%d ", entry.Size, + link, mode, + nlinks, entry.UID, entry.GID, rdev, + entryTime.Unix(), entryTime.Nanosecond()); err != nil { + return err + } + + var payload string + if entry.Linkname != "" { + payload = entry.Linkname + if entry.Type == internal.TypeLink && payload[0] != '/' { + payload = "/" + payload + } + } else { + if len(entry.Digest) > 10 { + d := strings.Replace(entry.Digest, "sha256:", "", 1) + payload = d[:2] + "/" + d[2:] + } + } + + if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil { + return err + } + + /* inline content. */ + if _, err := fmt.Fprint(out, " -"); err != nil { + return err + } + + /* store the digest. */ + if _, err := fmt.Fprint(out, " "); err != nil { + return err + } + digest := verityDigests[payload] + if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil { + return err + } + + for k, v := range entry.Xattrs { + name := escaped(k, ESCAPE_EQUAL) + value := escaped(v, ESCAPE_EQUAL) + + if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil { + return err + } + } + if _, err := fmt.Fprint(out, "\n"); err != nil { + return err + } + return nil +} + +// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump` +func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) { + toc, ok := tocI.(*internal.TOC) + if !ok { + return nil, fmt.Errorf("invalid TOC type") + } + pipeR, pipeW := io.Pipe() + go func() { + closed := false + w := bufio.NewWriter(pipeW) + defer func() { + if !closed { + w.Flush() + pipeW.Close() + } + }() + + links := make(map[string]int) + for _, e := range toc.Entries { + if e.Linkname == "" { + continue + } + links[e.Linkname] = links[e.Linkname] + 1 + } + + if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + root := &internal.FileMetadata{ + Name: "/", + Type: internal.TypeDir, + Mode: 0o755, + } + + if err := dumpNode(w, links, verityDigests, root); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + + for _, e := range toc.Entries { + if e.Type == internal.TypeChunk { + continue + } + if err := dumpNode(w, links, verityDigests, &e); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + }() + return pipeR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 088c92782cb..8493a2c19aa 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -45,6 +45,7 @@ const ( bigDataKey = "zstd-chunked-manifest" chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" + tocKey = "toc" fileTypeZstdChunked = iota fileTypeEstargz @@ -1470,10 +1471,7 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta continue } if mergedEntries[i].Digest == "" { - if mergedEntries[i].Size != 0 { - return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) - } - continue + return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) } digest, err := digest.Parse(mergedEntries[i].Digest) if err != nil { @@ -1542,6 +1540,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return graphdriver.DriverWithDifferOutput{}, err } + + // Generate the manifest + toc, err := unmarshalToc(c.manifest) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + output := graphdriver.DriverWithDifferOutput{ Differ: c, TarSplit: c.tarSplit, @@ -1549,6 +1554,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff bigDataKey: c.manifest, chunkedLayerDataKey: lcdBigData, }, + Artifacts: map[string]interface{}{ + tocKey: toc, + }, TOCDigest: c.contentDigest, } @@ -1563,12 +1571,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // List of OSTree repositories to use for deduplication ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":") - // Generate the manifest - toc, err := unmarshalToc(c.manifest) - if err != nil { - return output, err - } - whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) var missingParts []missingPart diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index b8bfa589775..40d8fd2b89a 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -114,6 +114,16 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // AttachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, false) +} + +// AttachLoopDeviceRO attaches the given sparse file opened read-only to +// the next available loopback device. It returns an opened *os.File. +func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, true) +} + +func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start looping for a // loopback from index 0. @@ -122,8 +132,14 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { logrus.Debugf("Error retrieving the next available loopback: %s", err) } + var sparseFile *os.File + // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644) + if readonly { + sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644) + } else { + sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644) + } if err != nil { logrus.Errorf("Opening sparse file: %v", err) return nil, ErrAttachLoopbackDevice diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 5917fa251d2..12243707ac5 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -28,7 +28,7 @@ func EnsureRemoveAll(dir string) error { // track retries exitOnErr := make(map[string]int) - maxRetry := 100 + maxRetry := 1000 // Attempt a simple remove all first, this avoids the more expensive // RecursiveUnmount call if not needed. @@ -38,7 +38,7 @@ func EnsureRemoveAll(dir string) error { // Attempt to unmount anything beneath this dir first if err := mount.RecursiveUnmount(dir); err != nil { - logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err) } for { @@ -94,6 +94,6 @@ func EnsureRemoveAll(dir string) error { return err } exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) } } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 7082c0b7f40..cb4525f27d9 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -27,9 +27,8 @@ runroot = "/run/containers/storage" # restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" -# Optional value for image storage location -# If set, it must be different than graphroot. - +# Optional alternate location of image store if a location separate from the +# container store is required. If set, it must be different than graphroot. # imagestore = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index dc9d09b89c6..6753b296ff4 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2666,34 +2666,23 @@ func (s *store) DeleteContainer(id string) error { } var wg multierror.Group - wg.Go(func() error { return s.containerStore.Delete(id) }) middleDir := s.graphDriverName + "-containers" wg.Go(func() error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - // attempt a simple rm -rf first - if err := os.RemoveAll(gcpath); err == nil { - return nil - } - // and if it fails get to the more complicated cleanup return system.EnsureRemoveAll(gcpath) }) wg.Go(func() error { rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - // attempt a simple rm -rf first - if err := os.RemoveAll(rcpath); err == nil { - return nil - } - // and if it fails get to the more complicated cleanup return system.EnsureRemoveAll(rcpath) }) if multierr := wg.Wait(); multierr != nil { return multierr.ErrorOrNil() } - return nil + return s.containerStore.Delete(id) }) } @@ -3418,16 +3407,16 @@ func (s *store) Shutdown(force bool) ([]string, error) { err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) } if err == nil { - err = s.graphDriver.Cleanup() // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), // so that we reload after a .Shutdown() the same way other processes would. // Shutdown() is basically an error path, so reliability is more important than performance. if _, err2 := s.graphLock.RecordWrite(); err2 != nil { - if err == nil { - err = err2 - } else { - err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err) - } + err = fmt.Errorf("graphLock.RecordWrite failed: %w", err2) + } + // Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that + // the next user picks it. + if err == nil { + err = s.graphDriver.Cleanup() } } return mounted, err diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index ab041a07069..5ae667a4935 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -220,9 +220,8 @@ type StoreOptions struct { // GraphRoot is the filesystem path under which we will store the // contents of layers, images, and containers. GraphRoot string `json:"root,omitempty"` - // Image Store is the location of image store which is seperated from the - // container store. Usually this is not recommended unless users wants - // seperate store for image and containers. + // Image Store is the alternate location of image store if a location + // separate from the container store is required. ImageStore string `json:"imagestore,omitempty"` // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 7a008a4d23e..4c28dff4655 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.9.3 + - go install mvdan.cc/garble@v0.10.1 builds: - @@ -92,16 +92,7 @@ builds: archives: - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows format: zip @@ -125,7 +116,7 @@ changelog: nfpms: - - file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" vendor: Klaus Post homepage: https://github.com/klauspost/compress maintainer: Klaus Post @@ -134,8 +125,3 @@ nfpms: formats: - deb - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 4002a16a637..43de4867758 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,18 @@ This package provides various compression algorithms. # changelog +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + * June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 @@ -50,6 +62,9 @@ This package provides various compression algorithms. * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ See changes to v1.15.x + * Jan 21st, 2023 (v1.15.15) * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 @@ -176,6 +191,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati While the release has been extensively tested, it is recommended to testing when upgrading. +
+
See changes to v1.14.x @@ -636,6 +653,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. # license diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 5faea0b2b3e..de912e187c0 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -7,6 +7,7 @@ package flate import ( "encoding/binary" + "errors" "fmt" "io" "math" @@ -833,6 +834,12 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.initDeflate() d.fill = (*compressor).fillDeflate d.step = (*compressor).deflateLazy + case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: + d.w.logNewTablePenalty = 7 + d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } @@ -929,6 +936,28 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { return zw, err } +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = 32 + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = windowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("flate: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") + } + var dw Writer + if err := dw.d.init(w, -windowSize); err != nil { + return nil, err + } + return &dw, nil +} + // A Writer takes data written to it and writes the compressed // form of that data to an underlying writer (see NewWriter). type Writer struct { diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 24caf5f70b0..c8124b5c49a 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -8,7 +8,6 @@ package flate import ( "encoding/binary" "fmt" - "math/bits" ) type fastEnc interface { @@ -192,25 +191,3 @@ func (e *fastGen) Reset() { } e.hist = e.hist[:0] } - -// matchLen returns the maximum length. -// 'a' must be the shortest of the two. -func matchLen(a, b []byte) int { - var checked int - - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] - } - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 414c0bea9fa..2f410d64f5a 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -120,8 +120,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { const sanity = false if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint16{} + h.chunks = new([huffmanNumChunks]uint16) } + if h.maxRead != 0 { *h = huffmanDecoder{chunks: h.chunks, links: h.links} } @@ -175,6 +176,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { } h.maxRead = min + chunks := h.chunks[:] for i := range chunks { chunks[i] = 0 @@ -202,8 +204,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { if cap(h.links[off]) < numLinks { h.links[off] = make([]uint16, numLinks) } else { - links := h.links[off][:0] - h.links[off] = links[:numLinks] + h.links[off] = h.links[off][:numLinks] } } } else { @@ -277,7 +278,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { return true } -// The actual read interface needed by NewReader. +// Reader is the actual read interface needed by NewReader. // If the passed in io.Reader does not also have ReadByte, // the NewReader will introduce its own buffering. type Reader interface { @@ -285,6 +286,18 @@ type Reader interface { io.ByteReader } +type step uint8 + +const ( + copyData step = iota + 1 + nextBlock + huffmanBytesBuffer + huffmanBytesReader + huffmanBufioReader + huffmanStringsReader + huffmanGenericReader +) + // Decompress state. type decompressor struct { // Input source. @@ -303,7 +316,7 @@ type decompressor struct { // Next step in the decompression, // and decompression state. - step func(*decompressor) + step step stepState int err error toRead []byte @@ -342,7 +355,7 @@ func (f *decompressor) nextBlock() { // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil - f.huffmanBlockDecoder()() + f.huffmanBlockDecoder() if debugDecode { fmt.Println("predefinied huffman block") } @@ -353,7 +366,7 @@ func (f *decompressor) nextBlock() { } f.hl = &f.h1 f.hd = &f.h2 - f.huffmanBlockDecoder()() + f.huffmanBlockDecoder() if debugDecode { fmt.Println("dynamic huffman block") } @@ -379,14 +392,16 @@ func (f *decompressor) Read(b []byte) (int, error) { if f.err != nil { return 0, f.err } - f.step(f) + + f.doStep() + if f.err != nil && len(f.toRead) == 0 { f.toRead = f.dict.readFlush() // Flush what's left in case of error } } } -// Support the io.WriteTo interface for io.Copy and friends. +// WriteTo implements the io.WriteTo interface for io.Copy and friends. func (f *decompressor) WriteTo(w io.Writer) (int64, error) { total := int64(0) flushed := false @@ -410,7 +425,7 @@ func (f *decompressor) WriteTo(w io.Writer) (int64, error) { return total, f.err } if f.err == nil { - f.step(f) + f.doStep() } if len(f.toRead) == 0 && f.err != nil && !flushed { f.toRead = f.dict.readFlush() // Flush what's left in case of error @@ -631,7 +646,7 @@ func (f *decompressor) copyData() { if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).copyData + f.step = copyData return } f.finishBlock() @@ -644,7 +659,28 @@ func (f *decompressor) finishBlock() { } f.err = io.EOF } - f.step = (*decompressor).nextBlock + f.step = nextBlock +} + +func (f *decompressor) doStep() { + switch f.step { + case copyData: + f.copyData() + case nextBlock: + f.nextBlock() + case huffmanBytesBuffer: + f.huffmanBytesBuffer() + case huffmanBytesReader: + f.huffmanBytesReader() + case huffmanBufioReader: + f.huffmanBufioReader() + case huffmanStringsReader: + f.huffmanStringsReader() + case huffmanGenericReader: + f.huffmanGenericReader() + default: + panic("BUG: unexpected step state") + } } // noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. @@ -747,7 +783,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { h1: f.h1, h2: f.h2, dict: f.dict, - step: (*decompressor).nextBlock, + step: nextBlock, } f.dict.init(maxMatchOffset, dict) return nil @@ -768,7 +804,7 @@ func NewReader(r io.Reader) io.ReadCloser { f.r = makeReader(r) f.bits = new([maxNumLit + maxNumDist]int) f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock + f.step = nextBlock f.dict.init(maxMatchOffset, nil) return &f } @@ -787,7 +823,7 @@ func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { f.r = makeReader(r) f.bits = new([maxNumLit + maxNumDist]int) f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock + f.step = nextBlock f.dict.init(maxMatchOffset, dict) return &f } diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index 61342b6b88f..2b2f993f753 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -85,7 +85,7 @@ readLiteral: dict.writeByte(byte(v)) if dict.availWrite() == 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer + f.step = huffmanBytesBuffer f.stepState = stateInit f.b, f.nb = fb, fnb return @@ -251,7 +251,7 @@ copyHistory: if dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work + f.step = huffmanBytesBuffer // We need to continue this work f.stepState = stateDict f.b, f.nb = fb, fnb return @@ -336,7 +336,7 @@ readLiteral: dict.writeByte(byte(v)) if dict.availWrite() == 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader + f.step = huffmanBytesReader f.stepState = stateInit f.b, f.nb = fb, fnb return @@ -502,7 +502,7 @@ copyHistory: if dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader // We need to continue this work + f.step = huffmanBytesReader // We need to continue this work f.stepState = stateDict f.b, f.nb = fb, fnb return @@ -587,7 +587,7 @@ readLiteral: dict.writeByte(byte(v)) if dict.availWrite() == 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader + f.step = huffmanBufioReader f.stepState = stateInit f.b, f.nb = fb, fnb return @@ -753,7 +753,7 @@ copyHistory: if dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader // We need to continue this work + f.step = huffmanBufioReader // We need to continue this work f.stepState = stateDict f.b, f.nb = fb, fnb return @@ -838,7 +838,7 @@ readLiteral: dict.writeByte(byte(v)) if dict.availWrite() == 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + f.step = huffmanStringsReader f.stepState = stateInit f.b, f.nb = fb, fnb return @@ -1004,7 +1004,7 @@ copyHistory: if dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.step = huffmanStringsReader // We need to continue this work f.stepState = stateDict f.b, f.nb = fb, fnb return @@ -1089,7 +1089,7 @@ readLiteral: dict.writeByte(byte(v)) if dict.availWrite() == 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader + f.step = huffmanGenericReader f.stepState = stateInit f.b, f.nb = fb, fnb return @@ -1255,7 +1255,7 @@ copyHistory: if dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader // We need to continue this work + f.step = huffmanGenericReader // We need to continue this work f.stepState = stateDict f.b, f.nb = fb, fnb return @@ -1265,19 +1265,19 @@ copyHistory: // Not reached } -func (f *decompressor) huffmanBlockDecoder() func() { +func (f *decompressor) huffmanBlockDecoder() { switch f.r.(type) { case *bytes.Buffer: - return f.huffmanBytesBuffer + f.huffmanBytesBuffer() case *bytes.Reader: - return f.huffmanBytesReader + f.huffmanBytesReader() case *bufio.Reader: - return f.huffmanBufioReader + f.huffmanBufioReader() case *strings.Reader: - return f.huffmanStringsReader + f.huffmanStringsReader() case Reader: - return f.huffmanGenericReader + f.huffmanGenericReader() default: - return f.huffmanGenericReader + f.huffmanGenericReader() } } diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 83ef50ba45f..1f61ec1829d 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -308,3 +308,401 @@ emitRemainder: emitLiteral(dst, src[nextEmit:]) } } + +// fastEncL5Window is a level 5 encoder, +// but with a custom window size. +type fastEncL5Window struct { + hist []byte + cur int32 + maxOffset int32 + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + maxMatchOffset := e.maxOffset + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// Reset the encoding table. +func (e *fastEncL5Window) Reset() { + // We keep the same allocs, since we are compressing the same block sizes. + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= int32(bufferReset) { + e.cur += e.maxOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +func (e *fastEncL5Window) addBlock(src []byte) int32 { + // check if we have space already + maxMatchOffset := e.maxOffset + + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < int(maxMatchOffset*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go new file mode 100644 index 00000000000..4bd3885841f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s new file mode 100644 index 00000000000..9a7655c0f76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go new file mode 100644 index 00000000000..ad5cd814b91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index 43e463611b1..e82fa3bb7b6 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index dac97e58a2d..65d777357aa 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error { c2.flush(s.actualTableLog) c1.flush(s.actualTableLog) - return s.bw.close() + s.bw.close() + return nil } // writeCount will write the normalized histogram count to header. diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index b4d7164e3fd..0ebc9aaac76 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -94,10 +94,9 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 4ee4fa18dda..518436cf3d4 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err } func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) + return s.compress1xDo(s.Out, src), nil } -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { +func (s *Scratch) compress1xDo(dst, src []byte) []byte { var bw = bitWriter{out: dst} // N is length divisible by 4. @@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { bw.encTwoSymbols(cTable, tmp[1], tmp[0]) } } - err := bw.close() - return bw.out, err + bw.close() + return bw.out } var sixZeros [6]byte @@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { } src = src[len(toDo):] - var err error idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } + s.Out = s.compress1xDo(s.Out, toDo) if len(s.Out)-idx > math.MaxUint16 { // We cannot store the size in the jump table return nil, ErrIncompressible @@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup - var errs [4]error wg.Add(4) for i := 0; i < 4; i++ { toDo := src @@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Separate goroutine for each block. go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) wg.Done() }(i) } wg.Wait() for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 97299d499cf..25ca983941d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -17,7 +17,6 @@ import ( // for aligning the input. type bitReader struct { in []byte - off uint // next byte to read is at in[off - 1] value uint64 // Maybe use [16]byte, but shifting is awkward. bitsRead uint8 } @@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error { return errors.New("corrupt stream: too short") } b.in = in - b.off = uint(len(in)) // The highest bit of the last byte indicates where to start v := in[len(in)-1] if v == 0 { @@ -69,21 +67,19 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 - b.off -= 4 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) b.bitsRead = 0 - b.off -= 8 } // fill() will make sure at least 32 bits are available. @@ -91,25 +87,25 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 - b.off -= 4 return } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 + return len(b.in) == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -119,7 +115,7 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 78b3c61be3e..1952f175b0d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd4a36f730c..2cfe925ade5 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { if len(lits) >= 1024 { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { + } else if len(lits) > 16 { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(lits, b.litEnc) } else { err = huff0.ErrIncompressible } - + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } switch err { case huff0.ErrIncompressible: if debugEncoder { @@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.literals) >= 1024 && !raw { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { + } else if len(b.literals) > 16 && !raw { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) @@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { err = huff0.ErrIncompressible } + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } switch err { case huff0.ErrIncompressible: lh.setType(literalsBlockRaw) @@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { ml.flush(mlEnc.actualTableLog) of.flush(ofEnc.actualTableLog) ll.flush(llEnc.actualTableLog) - err = wr.close() - if err != nil { - return err - } + wr.close() b.output = wr.out // Maybe even add a bigger margin. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index ca0951452e6..8d5567fe64c 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,10 +1,13 @@ package zstd import ( + "bytes" "encoding/binary" "errors" "fmt" "io" + "math" + "sort" "github.com/klauspost/compress/huff0" ) @@ -14,9 +17,8 @@ type dict struct { litEnc *huff0.Scratch llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte + offsets [3]int + content []byte } const dictMagic = "\x37\xa4\x30\xec" @@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface { d, err := loadDict(b) return d, err } + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 9819d414536..858f8f43a56 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -197,12 +197,13 @@ encodeLoop: // Set m to a match at offset if it looks like that will improve compression. improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } if debugAsserts { - if offset <= 0 { - panic(offset) + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) @@ -343,8 +344,8 @@ encodeLoop: if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s <= nextEmit { - panic("s <= nextEmit") + if debugAsserts && s < nextEmit { + panic("s < nextEmit") } addLiterals(&seq, best.s) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 4de0aed0d0d..72af7ef0fe0 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error { DictID: e.o.dict.ID(), } - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } + dst := fh.appendTo(tmp[:0]) s.headerWritten = true s.wWg.Wait() var n2 int @@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { Checksum: false, DictID: 0, } - dst, _ = fh.appendTo(dst) + dst = fh.appendTo(dst) // Write raw block as last one only. var blk blockHeader @@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } + dst = fh.appendTo(dst) // If we can do everything in one block, prefer that. if len(src) <= e.o.blockSize { @@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // Add padding with content from crypto/rand.Reader if e.o.pad > 0 { add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error dst, err = skippableFrame(dst, add, rand.Reader) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 4ef7f5a3e3d..2f5d5ed4546 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -22,7 +22,7 @@ type frameHeader struct { const maxHeaderSize = 14 -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { +func (f frameHeader) appendTo(dst []byte) []byte { dst = append(dst, frameMagic...) var fhd uint8 if f.Checksum { @@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { default: panic("invalid fcs") } - return dst, nil + return dst } const skippableFrameHeader = 4 + 4 diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9405fcf1016..d7fe6d82d93 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) @@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) // extra bits are stored in reverse order. br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) + mo += br.getBits(moB) + if s.maxBits > 32 { br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) mo = s.adjustOffset(mo, ll, moB) return } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b6f4ba6fc59..974b99725fd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -5,11 +5,11 @@ // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -336,11 +336,11 @@ error_overread: // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -638,11 +638,11 @@ error_overread: // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -927,11 +927,11 @@ error_overread: // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1797,11 +1797,11 @@ empty_seqs: // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2295,9 +2295,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2362,11 +2362,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2818,9 +2818,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2885,11 +2885,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -3485,9 +3485,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3552,11 +3552,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -4110,9 +4110,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index ac2a80d2911..2fb35b788c1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 9e1baad73be..ec13594e89b 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { var written int64 var readHeader bool { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + var n int n, r.err = w.Write(header) if r.err != nil { return written, r.err diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go index b1a0dad05ea..325813d6923 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go @@ -26,7 +26,7 @@ import ( "errors" "fmt" - "github.com/theupdateframework/go-tuf/encrypted" + "github.com/secure-systems-lab/go-securesystemslib/encrypted" ) const ( diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index 2764b4b3153..cab6f5b98a7 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -27,7 +27,7 @@ import ( const CosignSignatureType = "cosign container image signature" // SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: -// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format +// https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format type SimpleContainerImage struct { Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 0e1f9103dfe..be3f6dc10f8 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -8,6 +8,7 @@ package sif import ( + "encoding" "encoding/binary" "errors" "fmt" @@ -321,51 +322,6 @@ func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { return f, nil } -func zeroData(fimg *FileImage, descr *rawDescriptor) error { - // first, move to data object offset - if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil { - return err - } - - var zero [4096]byte - n := descr.Size - upbound := int64(4096) - for { - if n < 4096 { - upbound = n - } - - if _, err := fimg.rw.Write(zero[:upbound]); err != nil { - return err - } - n -= 4096 - if n <= 0 { - break - } - } - - return nil -} - -func resetDescriptor(fimg *FileImage, index int) error { - // If we remove the primary partition, set the global header Arch field to HdrArchUnknown - // to indicate that the SIF file doesn't include a primary partition and no dependency - // on any architecture exists. - if fimg.rds[index].isPartitionOfType(PartPrimSys) { - fimg.h.Arch = hdrArchUnknown - } - - offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0])) - - // first, move to descriptor offset - if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil { - return err - } - - var emptyDesc rawDescriptor - return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc) -} - // addOpts accumulates object add options. type addOpts struct { t time.Time @@ -447,6 +403,26 @@ func (f *FileImage) isLast(d *rawDescriptor) bool { return isLast } +// zeroReader is an io.Reader that returns a stream of zero-bytes. +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// zero overwrites the data object described by d with a stream of zero bytes. +func (f *FileImage) zero(d *rawDescriptor) error { + if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil { + return err + } + + _, err := io.CopyN(f.rw, zeroReader{}, d.Size) + return err +} + // truncateAt truncates f at the start of the padded data object described by d. func (f *FileImage) truncateAt(d *rawDescriptor) error { start := d.Offset + d.Size - d.SizeWithPadding @@ -530,7 +506,7 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { } if do.zero { - if err := zeroData(f, d); err != nil { + if err := f.zero(d); err != nil { return fmt.Errorf("%w", err) } } @@ -546,15 +522,17 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { f.h.DescriptorsFree++ f.h.ModifiedAt = do.t.Unix() - index := 0 - for i, od := range f.rds { - if od.ID == id { - index = i - break - } + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if d.isPartitionOfType(PartPrimSys) { + f.h.Arch = hdrArchUnknown } - if err := resetDescriptor(f, index); err != nil { + // Reset rawDescripter with empty struct + *d = rawDescriptor{} + + if err := f.writeDescriptors(); err != nil { return fmt.Errorf("%w", err) } @@ -676,3 +654,45 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { return nil } + +// SetMetadata sets the metadata of the data object with id to md, according to opts. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error { + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + rd, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if err := rd.setExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + + rd.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/LICENSE b/vendor/github.com/theupdateframework/go-tuf/LICENSE deleted file mode 100644 index 38163dd4bd1..00000000000 --- a/vendor/github.com/theupdateframework/go-tuf/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Prime Directive, Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go deleted file mode 100644 index 4d174d61f93..00000000000 --- a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go +++ /dev/null @@ -1,226 +0,0 @@ -// Package encrypted provides a simple, secure system for encrypting data -// symmetrically with a passphrase. -// -// It uses scrypt derive a key from the passphrase and the NaCl secret box -// cipher for authenticated encryption. -package encrypted - -import ( - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/scrypt" -) - -const saltSize = 32 - -const ( - boxKeySize = 32 - boxNonceSize = 24 -) - -const ( - // N parameter was chosen to be ~100ms of work using the default implementation - // on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook - // Pro (it takes ~113ms). - scryptN = 32768 - scryptR = 8 - scryptP = 1 -) - -const ( - nameScrypt = "scrypt" - nameSecretBox = "nacl/secretbox" -) - -type data struct { - KDF scryptKDF `json:"kdf"` - Cipher secretBoxCipher `json:"cipher"` - Ciphertext []byte `json:"ciphertext"` -} - -type scryptParams struct { - N int `json:"N"` - R int `json:"r"` - P int `json:"p"` -} - -func newScryptKDF() (scryptKDF, error) { - salt := make([]byte, saltSize) - if err := fillRandom(salt); err != nil { - return scryptKDF{}, err - } - return scryptKDF{ - Name: nameScrypt, - Params: scryptParams{ - N: scryptN, - R: scryptR, - P: scryptP, - }, - Salt: salt, - }, nil -} - -type scryptKDF struct { - Name string `json:"name"` - Params scryptParams `json:"params"` - Salt []byte `json:"salt"` -} - -func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { - return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) -} - -// CheckParams checks that the encoded KDF parameters are what we expect them to -// be. If we do not do this, an attacker could cause a DoS by tampering with -// them. -func (s *scryptKDF) CheckParams() error { - if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP { - return errors.New("encrypted: unexpected kdf parameters") - } - return nil -} - -func newSecretBoxCipher() (secretBoxCipher, error) { - nonce := make([]byte, boxNonceSize) - if err := fillRandom(nonce); err != nil { - return secretBoxCipher{}, err - } - return secretBoxCipher{ - Name: nameSecretBox, - Nonce: nonce, - }, nil -} - -type secretBoxCipher struct { - Name string `json:"name"` - Nonce []byte `json:"nonce"` - - encrypted bool -} - -func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - panic("incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - // ensure that we don't re-use nonces - if s.encrypted { - panic("Encrypt must only be called once for each cipher instance") - } - s.encrypted = true - - return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) -} - -func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - // return an error instead of panicking since the nonce is user input - return nil, errors.New("encrypted: incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) - if !ok { - return nil, errors.New("encrypted: decryption failed") - } - return res, nil -} - -// Encrypt takes a passphrase and plaintext, and returns a JSON object -// containing ciphertext and the details necessary to decrypt it. -func Encrypt(plaintext, passphrase []byte) ([]byte, error) { - k, err := newScryptKDF() - if err != nil { - return nil, err - } - key, err := k.Key(passphrase) - if err != nil { - return nil, err - } - - c, err := newSecretBoxCipher() - if err != nil { - return nil, err - } - - data := &data{ - KDF: k, - Cipher: c, - } - data.Ciphertext = c.Encrypt(plaintext, key) - - return json.Marshal(data) -} - -// Marshal encrypts the JSON encoding of v using passphrase. -func Marshal(v interface{}, passphrase []byte) ([]byte, error) { - data, err := json.MarshalIndent(v, "", "\t") - if err != nil { - return nil, err - } - return Encrypt(data, passphrase) -} - -// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and -// tries to decrypt it using passphrase. If successful, it returns the -// plaintext. -func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { - data := &data{} - if err := json.Unmarshal(ciphertext, data); err != nil { - return nil, err - } - - if data.KDF.Name != nameScrypt { - return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) - } - if data.Cipher.Name != nameSecretBox { - return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) - } - if err := data.KDF.CheckParams(); err != nil { - return nil, err - } - - key, err := data.KDF.Key(passphrase) - if err != nil { - return nil, err - } - - return data.Cipher.Decrypt(data.Ciphertext, key) -} - -// Unmarshal decrypts the data using passphrase and unmarshals the resulting -// plaintext into the value pointed to by v. -func Unmarshal(data []byte, v interface{}, passphrase []byte) error { - decrypted, err := Decrypt(data, passphrase) - if err != nil { - return err - } - return json.Unmarshal(decrypted, v) -} - -func fillRandom(b []byte) error { - _, err := io.ReadFull(rand.Reader, b) - return err -} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 7d83268ce0b..5f814121095 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -61,7 +61,6 @@ type renderFrame struct { shutdown int rmOnComplete bool noPop bool - done bool err error } @@ -82,10 +81,10 @@ func newBar(ctx context.Context, container *Progress, bs *bState) *Bar { return bar } -// ProxyReader wraps io.Reader with metrics required for progress tracking. -// If `r` is 'unknown total/size' reader it's mandatory to call -// (*Bar).SetTotal(-1, true) method after (io.Reader).Read returns io.EOF. -// If bar is already completed or aborted, returns nil. +// ProxyReader wraps io.Reader with metrics required for progress +// tracking. If `r` is 'unknown total/size' reader it's mandatory +// to call `(*Bar).SetTotal(-1, true)` after the wrapper returns +// `io.EOF`. If bar is already completed or aborted, returns nil. // Panics if `r` is nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { @@ -177,11 +176,10 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { } } -// EnableTriggerComplete enables triggering complete event. It's -// effective only for bars which were constructed with `total <= 0` and -// after total has been set with (*Bar).SetTotal(int64, false). If bar -// has been incremented to the total, complete event is triggered right -// away. +// EnableTriggerComplete enables triggering complete event. It's effective +// only for bars which were constructed with `total <= 0` and after total +// has been set with `(*Bar).SetTotal(int64, false)`. If `curren >= total` +// at the moment of call, complete event is triggered right away. func (b *Bar) EnableTriggerComplete() { select { case b.operateState <- func(s *bState) { @@ -200,11 +198,11 @@ func (b *Bar) EnableTriggerComplete() { } } -// SetTotal sets total to an arbitrary value. It's effective only for -// bar which was constructed with `total <= 0`. Setting total to negative -// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool) but faster. -// If triggerCompletion is true, total value is set to current and -// complete event is triggered right away. +// SetTotal sets total to an arbitrary value. It's effective only for bar +// which was constructed with `total <= 0`. Setting total to negative value +// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. If +// triggerCompletion is true, total value is set to current and complete +// event is triggered right away. func (b *Bar) SetTotal(total int64, triggerCompletion bool) { select { case b.operateState <- func(s *bState) { @@ -344,7 +342,7 @@ func (b *Bar) SetPriority(priority int) { // Abort interrupts bar's running goroutine. Abort won't be engaged // if bar is already in complete state. If drop is true bar will be // removed as well. To make sure that bar has been removed call -// (*Bar).Wait method. +// `(*Bar).Wait()` method. func (b *Bar) Abort(drop bool) { select { case b.operateState <- func(s *bState) { @@ -415,7 +413,6 @@ func (b *Bar) serve(ctx context.Context, bs *bState) { } func (b *Bar) render(tw int) { - var done bool fn := func(s *bState) { var rows []io.Reader stat := newStatistics(tw, s) @@ -437,7 +434,6 @@ func (b *Bar) render(tw int) { shutdown: s.shutdown, rmOnComplete: s.rmOnComplete, noPop: s.noPop, - done: done, } if s.completed || s.aborted { // post increment makes sure OnComplete decorators are rendered @@ -448,7 +444,6 @@ func (b *Bar) render(tw int) { select { case b.operateState <- fn: case <-b.done: - done = true fn(b.bs) } } diff --git a/vendor/github.com/vbauerster/mpb/v8/container_option.go b/vendor/github.com/vbauerster/mpb/v8/container_option.go index f2ab01ee029..177620e0632 100644 --- a/vendor/github.com/vbauerster/mpb/v8/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/container_option.go @@ -93,9 +93,9 @@ func WithAutoRefresh() ContainerOption { } } -// PopCompletedMode will pop completed bars to the top. -// To stop rendering bar after it has been popped, use -// mpb.BarRemoveOnComplete() option on that bar. +// PopCompletedMode pop completed bars out of progress container. +// In this mode completed bars get moved to the top and stop +// participating in rendering cycle. func PopCompletedMode() ContainerOption { return func(s *pState) { s.popCompleted = true diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index f275be3e5a0..8c609b463fd 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -18,8 +18,8 @@ const ( defaultRefreshRate = 150 * time.Millisecond ) -// DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. -var DoneError = fmt.Errorf("%T instance can't be reused after it's done", (*Progress)(nil)) +// DoneError represents use after `(*Progress).Wait()` error. +var DoneError = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { @@ -55,13 +55,13 @@ type pState struct { } // New creates new Progress container instance. It's not possible to -// reuse instance after (*Progress).Wait method has been called. +// reuse instance after `(*Progress).Wait` method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided -// context. It's not possible to reuse instance after (*Progress).Wait +// context. It's not possible to reuse instance after `(*Progress).Wait` // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { if ctx == nil { @@ -133,8 +133,7 @@ func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOpti // MustAdd creates a bar which renders itself by provided BarFiller. // If `total <= 0` triggering complete event by increment methods is -// disabled. Panics if *Progress instance is done, i.e. called after -// (*Progress).Wait(). +// disabled. Panics if called after `(*Progress).Wait()`. func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) *Bar { bar, err := p.Add(total, filler, options...) if err != nil { @@ -145,8 +144,8 @@ func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) // Add creates a bar which renders itself by provided BarFiller. // If `total <= 0` triggering complete event by increment methods -// is disabled. If *Progress instance is done, i.e. called after -// (*Progress).Wait(), return error == DoneError. +// is disabled. If called after `(*Progress).Wait()` then +// `(nil, DoneError)` is returned. func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) { if filler == nil { filler = NopStyle().Build() @@ -203,7 +202,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { // UpdateBarPriority either immediately or lazy. // With lazy flag order is updated after the next refresh cycle. -// If you don't care about laziness just use *Bar.SetPriority(int). +// If you don't care about laziness just use `(*Bar).SetPriority(int)`. func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) { if b == nil { return @@ -215,9 +214,9 @@ func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) { } // Write is implementation of io.Writer. -// Writing to `*mpb.Progress` will print lines above a running bar. +// Writing to `*Progress` will print lines above a running bar. // Writes aren't flushed immediately, but at next refresh cycle. -// If Write is called after `*mpb.Progress` is done, `mpb.DoneError` +// If called after `(*Progress).Wait()` then `(0, DoneError)` // is returned. func (p *Progress) Write(b []byte) (int, error) { type result struct { @@ -238,7 +237,7 @@ func (p *Progress) Write(b []byte) (int, error) { } // Wait waits for all bars to complete and finally shutdowns container. After -// this method has been called, there is no way to reuse (*Progress) instance. +// this method has been called, there is no way to reuse `*Progress` instance. func (p *Progress) Wait() { // wait for user wg, if any if p.uwg != nil { @@ -249,9 +248,9 @@ func (p *Progress) Wait() { p.Shutdown() } -// Shutdown cancels any running bar immediately and then shutdowns (*Progress) +// Shutdown cancels any running bar immediately and then shutdowns `*Progress` // instance. Normally this method shouldn't be called unless you know what you -// are doing. Proper way to shutdown is to call (*Progress).Wait() instead. +// are doing. Proper way to shutdown is to call `(*Progress).Wait()` instead. func (p *Progress) Shutdown() { p.cancel() p.pwg.Wait() @@ -357,7 +356,7 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { func (s *pState) flush(cw *cwriter.Writer, height int) error { var wg sync.WaitGroup - defer wg.Wait() // waiting for all s.hm.push to complete + defer wg.Wait() // waiting for all s.push to complete var popCount int var rows []io.Reader @@ -381,40 +380,34 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { _, _ = io.Copy(io.Discard, row) } } - if frame.shutdown != 0 && !frame.done { + + switch frame.shutdown { + case 1: + b.cancel() if qb, ok := s.queueBars[b]; ok { - b.cancel() delete(s.queueBars, b) qb.priority = b.priority wg.Add(1) - go func(b *Bar) { - s.hm.push(b, true) - wg.Done() - }(qb) - continue + go s.push(&wg, qb, true) + } else if s.popCompleted && !frame.noPop { + b.priority = s.popPriority + s.popPriority++ + wg.Add(1) + go s.push(&wg, b, false) + } else if !frame.rmOnComplete { + wg.Add(1) + go s.push(&wg, b, false) } + case 2: if s.popCompleted && !frame.noPop { - switch frame.shutdown { - case 1: - b.priority = s.popPriority - s.popPriority++ - default: - b.cancel() - popCount += usedRows - continue - } - } else if frame.rmOnComplete { - b.cancel() + popCount += usedRows continue - } else { - b.cancel() } + fallthrough + default: + wg.Add(1) + go s.push(&wg, b, false) } - wg.Add(1) - go func(b *Bar) { - s.hm.push(b, false) - wg.Done() - }(b) } for i := len(rows) - 1; i >= 0; i-- { @@ -427,6 +420,11 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { return cw.Flush(len(rows) - popCount) } +func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { + s.hm.push(b, sync) + wg.Done() +} + func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { bs := &bState{ id: s.idCount, diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 998d1a51bfd..2b19c93e8ea 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -188,6 +188,8 @@ type Generator struct { trimPrefix string lineComment bool + + logf func(format string, args ...interface{}) // test logging hook; nil when not testing } func (g *Generator) Printf(format string, args ...interface{}) { @@ -221,13 +223,14 @@ func (g *Generator) parsePackage(patterns []string, tags []string) { // in a separate pass? For later. Tests: false, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + Logf: g.logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } if len(pkgs) != 1 { - log.Fatalf("error: %d packages found", len(pkgs)) + log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) } g.addPackage(pkgs[0]) } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe63..a7a8f73e3d1 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -35,7 +35,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index b5de9cf9f21..1f1eade0ac8 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "log" "os" "path" @@ -1109,7 +1108,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1128,7 +1127,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1146,7 +1145,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 124a6fe143b..ece0e7c603e 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -1127,7 +1126,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index b1223713b94..2d078ccb19c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -29,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -221,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec90e..71248209ee5 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23dd..cbd12f80131 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d987..7350bb702a1 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/modules.txt b/vendor/modules.txt index fbebfece78c..0ca8c9797ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -75,7 +75,7 @@ github.com/containerd/containerd/platforms # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log -# github.com/containerd/stargz-snapshotter/estargz v0.14.3 +# github.com/containerd/stargz-snapshotter/estargz v0.15.1 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -142,7 +142,7 @@ github.com/containers/common/pkg/umask github.com/containers/common/pkg/util github.com/containers/common/pkg/version github.com/containers/common/version -# github.com/containers/image/v5 v5.28.0 +# github.com/containers/image/v5 v5.28.1-0.20231101173728-373c52a9466f ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -215,7 +215,7 @@ github.com/containers/libtrust # github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b ## explicit; go 1.20 github.com/containers/luksy -# github.com/containers/ocicrypt v1.1.8 +# github.com/containers/ocicrypt v1.1.9 ## explicit; go 1.20 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher @@ -233,7 +233,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.50.2 +# github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb ## explicit; go 1.19 github.com/containers/storage github.com/containers/storage/drivers @@ -252,6 +252,7 @@ github.com/containers/storage/pkg/archive github.com/containers/storage/pkg/chrootarchive github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor +github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal github.com/containers/storage/pkg/config github.com/containers/storage/pkg/devicemapper @@ -446,7 +447,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.16.7 +# github.com/klauspost/compress v1.17.2 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -623,14 +624,14 @@ github.com/seccomp/libseccomp-golang # github.com/secure-systems-lab/go-securesystemslib v0.7.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted -# github.com/sigstore/fulcio v1.4.0 +# github.com/sigstore/fulcio v1.4.3 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/rekor v1.2.2 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.7.3 -## explicit; go 1.19 +# github.com/sigstore/sigstore v1.7.5 +## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/options @@ -651,7 +652,7 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.13.0 +# github.com/sylabs/sif/v2 v2.15.0 ## explicit; go 1.20 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -660,9 +661,6 @@ github.com/syndtr/gocapability/capability # github.com/tchap/go-patricia/v2 v2.3.1 ## explicit; go 1.16 github.com/tchap/go-patricia/v2/patricia -# github.com/theupdateframework/go-tuf v0.5.2 -## explicit; go 1.18 -github.com/theupdateframework/go-tuf/encrypted # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck @@ -677,7 +675,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.6.1 +# github.com/vbauerster/mpb/v8 v8.6.2 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -743,13 +741,13 @@ golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 +# golang.org/x/exp v0.0.0-20231006140011-7918f672742d ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.12.0 -## explicit; go 1.17 +# golang.org/x/mod v0.13.0 +## explicit; go 1.18 golang.org/x/mod/semver # golang.org/x/net v0.17.0 ## explicit; go 1.17 @@ -799,7 +797,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 +# golang.org/x/tools v0.14.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -818,7 +816,7 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.58.3