From c81c77109bd3ed4e4dbf4a9c58d6760daa999392 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 24 Sep 2024 14:05:58 +0200 Subject: [PATCH 1/2] vendor: update containers/storage Signed-off-by: Giuseppe Scrivano --- go.mod | 5 +- go.sum | 10 +- pkg/rootless/rootless_linux.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 2 +- vendor/github.com/containers/storage/Makefile | 2 +- vendor/github.com/containers/storage/check.go | 16 +- .../containers/storage/containers.go | 41 +- .../containers/storage/drivers/driver.go | 4 +- .../storage/drivers/overlay/mount.go | 12 +- .../storage/drivers/overlay/overlay.go | 109 ++-- .../storage/drivers/windows/windows.go | 4 +- .../github.com/containers/storage/images.go | 67 +-- .../github.com/containers/storage/layers.go | 120 +--- .../storage/pkg/archive/archive_linux.go | 3 +- .../containers/storage/pkg/archive/changes.go | 8 +- .../storage/pkg/archive/changes_other.go | 2 +- .../storage/pkg/archive/fflags_bsd.go | 4 +- .../storage/pkg/chrootarchive/chroot_linux.go | 2 +- .../storage/pkg/chunked/cache_linux.go | 27 +- .../storage/pkg/chunked/storage_linux.go | 2 +- .../storage/pkg/directory/directory_unix.go | 7 +- .../pkg/directory/directory_windows.go | 7 +- .../containers/storage/pkg/mount/flags.go | 12 +- .../storage/pkg/mount/mounter_freebsd.go | 10 +- .../storage/pkg/mount/unmount_unix.go | 2 +- .../containers/storage/pkg/parsers/parsers.go | 14 +- .../storage/pkg/stringutils/stringutils.go | 6 +- .../storage/pkg/unshare/unshare_linux.go | 2 +- vendor/github.com/containers/storage/store.go | 83 ++- .../containers/storage/types/options.go | 4 +- .../github.com/containers/storage/userns.go | 12 +- vendor/github.com/containers/storage/utils.go | 15 +- .../github.com/klauspost/compress/README.md | 22 +- .../klauspost/compress/flate/deflate.go | 2 +- .../klauspost/compress/flate/inflate.go | 74 ++- .../klauspost/compress/fse/decompress.go | 2 +- .../klauspost/compress/huff0/decompress.go | 4 +- .../klauspost/compress/zstd/blockdec.go | 4 +- .../klauspost/compress/zstd/enc_better.go | 32 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/encoder.go | 19 +- .../klauspost/compress/zstd/framedec.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.s | 8 +- .../moby/sys/capability/.codespellrc | 3 + .../moby/sys/capability/.golangci.yml | 6 + .../moby/sys/capability/CHANGELOG.md | 72 +++ vendor/github.com/moby/sys/capability/LICENSE | 25 + .../github.com/moby/sys/capability/README.md | 11 + .../moby/sys/capability/capability.go | 134 +++++ .../moby/sys/capability/capability_linux.go | 546 ++++++++++++++++++ .../moby/sys/capability/capability_noop.go | 20 + vendor/github.com/moby/sys/capability/enum.go | 303 ++++++++++ .../moby/sys/capability/enum_gen.go | 138 +++++ .../moby/sys/capability/syscall_linux.go | 153 +++++ vendor/modules.txt | 11 +- 56 files changed, 1771 insertions(+), 458 deletions(-) create mode 100644 vendor/github.com/moby/sys/capability/.codespellrc create mode 100644 vendor/github.com/moby/sys/capability/.golangci.yml create mode 100644 vendor/github.com/moby/sys/capability/CHANGELOG.md create mode 100644 vendor/github.com/moby/sys/capability/LICENSE create mode 100644 vendor/github.com/moby/sys/capability/README.md create mode 100644 vendor/github.com/moby/sys/capability/capability.go create mode 100644 vendor/github.com/moby/sys/capability/capability_linux.go create mode 100644 vendor/github.com/moby/sys/capability/capability_noop.go create mode 100644 vendor/github.com/moby/sys/capability/enum.go create mode 100644 vendor/github.com/moby/sys/capability/enum_gen.go create mode 100644 vendor/github.com/moby/sys/capability/syscall_linux.go diff --git a/go.mod b/go.mod index addf917682..9d6eba0048 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/containers/libhvee v0.7.1 github.com/containers/ocicrypt v1.2.0 github.com/containers/psgo v1.9.0 - github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483 + github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 github.com/containers/winquit v1.1.0 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/stream-metadata-go v0.4.4 @@ -48,6 +48,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.23 github.com/mdlayher/vsock v1.2.1 + github.com/moby/sys/capability v0.2.0 github.com/moby/sys/user v0.3.0 github.com/moby/term v0.5.0 github.com/nxadm/tail v1.4.11 @@ -153,7 +154,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.10 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect diff --git a/go.sum b/go.sum index 0fbc30cce9..a3f89b3b48 100644 --- a/go.sum +++ b/go.sum @@ -99,8 +99,8 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483 h1:hQOAlIad+xjukeGFHQbH/x5I2zuPNCXmjvSrxX5ERF4= -github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483/go.mod h1:fRTU33KP5BXpOIWDxDgU5LpHbrOzWxmVmtm/3PYLlgE= +github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPFzQUKXVq+oQG6NFQcBwtbs2luxl/bVulbPs= +github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= @@ -310,8 +310,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -365,6 +365,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/capability v0.2.0 h1:OJtbqfthavtfh1kycvEhMvY7/M2BHscP2fiXgzKI3sk= +github.com/moby/sys/capability v0.2.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go index 3ad3906241..9ecc5df9e4 100644 --- a/pkg/rootless/rootless_linux.go +++ b/pkg/rootless/rootless_linux.go @@ -20,9 +20,9 @@ import ( "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" + "github.com/moby/sys/capability" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 887147040d..1c93587dc3 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -171,7 +171,7 @@ vendor_task: cross_task: alias: cross container: - image: golang:1.21 + image: golang:1.22 build_script: make cross diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 6f20e059d5..a619694fd0 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de # N/B: This value is managed by Renovate, manual changes are # possible, as long as they don't disturb the formatting # (i.e. DO NOT ADD A 'v' prefix!) -GOLANGCI_LINT_VERSION := 1.60.3 +GOLANGCI_LINT_VERSION := 1.61.0 default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go index 7176ba361f..396648e7fa 100644 --- a/vendor/github.com/containers/storage/check.go +++ b/vendor/github.com/containers/storage/check.go @@ -8,6 +8,7 @@ import ( "os" "path" "path/filepath" + "slices" "sort" "strings" "sync" @@ -769,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { return d } isUnaccounted := func(errs []error) bool { - for _, err := range errs { - if errors.Is(err, ErrLayerUnaccounted) { - return true - } - } - return false + return slices.ContainsFunc(errs, func(err error) bool { + return errors.Is(err, ErrLayerUnaccounted) + }) } sort.Slice(layersToDelete, func(i, j int) bool { // we've not heard of either of them, so remove them in the order the driver suggested @@ -1005,12 +1003,12 @@ func (c *checkDirectory) remove(path string) { func (c *checkDirectory) header(hdr *tar.Header) { name := path.Clean(hdr.Name) dir, base := path.Split(name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { + if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok { if base == archive.WhiteoutOpaqueDir { c.remove(path.Clean(dir)) c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) } else { - c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):])) + c.remove(path.Join(dir, file)) } } else { if hdr.Typeflag == tar.TypeLink { @@ -1044,7 +1042,7 @@ func (c *checkDirectory) header(hdr *tar.Header) { // headers updates a checkDirectory using information from the passed-in header slice func (c *checkDirectory) headers(hdrs []*tar.Header) { - hdrs = append([]*tar.Header{}, hdrs...) + hdrs = slices.Clone(hdrs) // sort the headers from the diff to ensure that whiteouts appear // before content when they both appear in the same directory, per // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index a7dfb405ba..c669ce7b0c 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -3,8 +3,10 @@ package storage import ( "errors" "fmt" + "maps" "os" "path/filepath" + "slices" "sync" "time" @@ -162,17 +164,17 @@ type containerStore struct { func copyContainer(c *Container) *Container { return &Container{ ID: c.ID, - Names: copyStringSlice(c.Names), + Names: slices.Clone(c.Names), ImageID: c.ImageID, LayerID: c.LayerID, Metadata: c.Metadata, - BigDataNames: copyStringSlice(c.BigDataNames), - BigDataSizes: copyStringInt64Map(c.BigDataSizes), - BigDataDigests: copyStringDigestMap(c.BigDataDigests), + BigDataNames: slices.Clone(c.BigDataNames), + BigDataSizes: maps.Clone(c.BigDataSizes), + BigDataDigests: maps.Clone(c.BigDataDigests), Created: c.Created, UIDMap: copyIDMap(c.UIDMap), GIDMap: copyIDMap(c.GIDMap), - Flags: copyStringInterfaceMap(c.Flags), + Flags: maps.Clone(c.Flags), volatileStore: c.volatileStore, } } @@ -696,7 +698,7 @@ func (r *containerStore) create(id string, names []string, image, layer string, volatileStore: options.Volatile, } if options.MountOpts != nil { - container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...) + container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts) } if options.Volatile { container.Flags[volatileFlag] = true @@ -788,13 +790,6 @@ func (r *containerStore) Delete(id string) error { return ErrContainerUnknown } id = container.ID - toDeleteIndex := -1 - for i, candidate := range r.containers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } delete(r.byid, id) // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. @@ -803,14 +798,9 @@ func (r *containerStore) Delete(id string) error { for _, name := range container.Names { delete(r.byname, name) } - if toDeleteIndex != -1 { - // delete the container at toDeleteIndex - if toDeleteIndex == len(r.containers)-1 { - r.containers = r.containers[:len(r.containers)-1] - } else { - r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) - } - } + r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool { + return candidate.ID == id + }) if err := r.saveFor(container); err != nil { return err } @@ -948,14 +938,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - addName := true - for _, name := range c.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(c.BigDataNames, key) { c.BigDataNames = append(c.BigDataNames, key) save = true } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index b62234e57e..91b240c450 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -254,8 +254,8 @@ type Differ interface { type DriverWithDiffer interface { Driver // ApplyDiffWithDiffer applies the changes using the callback function. - // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) + // The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory. ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 8829e55e98..82c1a460b8 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -103,20 +103,20 @@ func mountOverlayFromMain() { // paths, but we don't want to mess with other options. var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string for _, arg := range strings.Split(options.Label, ",") { - kv := strings.SplitN(arg, "=", 2) - switch kv[0] { + key, val, _ := strings.Cut(arg, "=") + switch key { case "upperdir": upperk = "upperdir=" - upperv = kv[1] + upperv = val case "workdir": workk = "workdir=" - workv = kv[1] + workv = val case "lowerdir": lowerk = "lowerdir=" - lowerv = kv[1] + lowerv = val case "label": labelk = "label=" - labelv = kv[1] + labelv = val default: if others == "" { others = arg diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 63777fe470..ee3703affe 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -14,6 +14,7 @@ import ( "os/exec" "path" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -158,30 +159,7 @@ func init() { } func hasMetacopyOption(opts []string) bool { - for _, s := range opts { - if s == "metacopy=on" { - return true - } - } - return false -} - -func stripOption(opts []string, option string) []string { - for i, s := range opts { - if s == option { - return stripOption(append(opts[:i], opts[i+1:]...), option) - } - } - return opts -} - -func hasVolatileOption(opts []string) bool { - for _, s := range opts { - if s == "volatile" { - return true - } - } - return false + return slices.Contains(opts, "metacopy=on") } func getMountProgramFlagFile(path string) string { @@ -1526,14 +1504,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it") } } - optsList = stripOption(optsList, "metacopy=on") + optsList = slices.DeleteFunc(optsList, func(opt string) bool { + return opt == "metacopy=on" + }) } - for _, o := range optsList { - if o == "ro" { - readWrite = false - break - } + if slices.Contains(optsList, "ro") { + readWrite = false } lowers, err := os.ReadFile(path.Join(dir, lowerFile)) @@ -1732,7 +1709,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "userxattr") } - if options.Volatile && !hasVolatileOption(optsList) { + if options.Volatile && !slices.Contains(optsList, "volatile") { supported, err := d.getSupportsVolatile() if err != nil { return "", err @@ -1896,7 +1873,9 @@ func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string { // and since the rundir cannot be shared for different stores, it is safe to assume the // current process has exclusive access to it. // - // LOCKING BUG? the .DiffSize operation does not currently hold an exclusive lock on the primary store. + // TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store. + // (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize, + // but DiffSize probably needs to remain for computing sizes of container’s RW layers.) if inAdditionalStore { return path.Join(d.runhome, id, "merged") } @@ -2187,7 +2166,7 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { } // ApplyDiffWithDiffer applies the changes in the new layer using the specified function -func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { +func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { var idMappings *idtools.IDMappings var forceMask *os.FileMode @@ -2205,44 +2184,36 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string - if id == "" { - stagingDir := d.getStagingDir(id) - err := os.MkdirAll(stagingDir, 0o700) - if err != nil && !os.IsExist(err) { - return graphdriver.DriverWithDifferOutput{}, err - } - layerDir, err := os.MkdirTemp(stagingDir, "") - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } - perms := defaultPerms - if forceMask != nil { - perms = *forceMask - } - applyDir = filepath.Join(layerDir, "dir") - if err := os.Mkdir(applyDir, perms); err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } + stagingDir := d.getStagingDir("") + err := os.MkdirAll(stagingDir, 0o700) + if err != nil && !os.IsExist(err) { + return graphdriver.DriverWithDifferOutput{}, err + } + layerDir, err := os.MkdirTemp(stagingDir, "") + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + perms := defaultPerms + if forceMask != nil { + perms = *forceMask + } + applyDir = filepath.Join(layerDir, "dir") + if err := os.Mkdir(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } - lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } - defer func() { - if errRet != nil { - delete(d.stagingDirsLocks, layerDir) - lock.Unlock() - } - }() - d.stagingDirsLocks[layerDir] = lock - lock.Lock() - } else { - var err error - applyDir, err = d.getDiffPath(id) - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err - } + lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err } + defer func() { + if errRet != nil { + delete(d.stagingDirsLocks, layerDir) + lock.Unlock() + } + }() + d.stagingDirsLocks[layerDir] = lock + lock.Lock() logrus.Debugf("Applying differ in %s", applyDir) diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 18f90fdc53..d38e74534e 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, buf := bufio.NewWriter(nil) for err == nil { base := path.Base(hdr.Name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok { + name := path.Join(path.Dir(hdr.Name), rm) err = w.Remove(filepath.FromSlash(name)) if err != nil { return 0, err diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index d71eab08bb..8593c03c8e 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -2,8 +2,10 @@ package storage import ( "fmt" + "maps" "os" "path/filepath" + "slices" "strings" "sync" "time" @@ -181,18 +183,18 @@ func copyImage(i *Image) *Image { return &Image{ ID: i.ID, Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), - Names: copyStringSlice(i.Names), - NamesHistory: copyStringSlice(i.NamesHistory), + Digests: slices.Clone(i.Digests), + Names: slices.Clone(i.Names), + NamesHistory: slices.Clone(i.NamesHistory), TopLayer: i.TopLayer, - MappedTopLayers: copyStringSlice(i.MappedTopLayers), + MappedTopLayers: slices.Clone(i.MappedTopLayers), Metadata: i.Metadata, - BigDataNames: copyStringSlice(i.BigDataNames), - BigDataSizes: copyStringInt64Map(i.BigDataSizes), - BigDataDigests: copyStringDigestMap(i.BigDataDigests), + BigDataNames: slices.Clone(i.BigDataNames), + BigDataSizes: maps.Clone(i.BigDataSizes), + BigDataDigests: maps.Clone(i.BigDataDigests), Created: i.Created, ReadOnly: i.ReadOnly, - Flags: copyStringInterfaceMap(i.Flags), + Flags: maps.Clone(i.Flags), } } @@ -863,12 +865,6 @@ func (r *imageStore) Delete(id string) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } id = image.ID - toDeleteIndex := -1 - for i, candidate := range r.images { - if candidate.ID == id { - toDeleteIndex = i - } - } delete(r.byid, id) // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. @@ -877,21 +873,18 @@ func (r *imageStore) Delete(id string) error { delete(r.byname, name) } for _, digest := range image.Digests { - prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool { + return i == image + }) if len(prunedList) == 0 { delete(r.bydigest, digest) } else { r.bydigest[digest] = prunedList } } - if toDeleteIndex != -1 { - // delete the image at toDeleteIndex - if toDeleteIndex == len(r.images)-1 { - r.images = r.images[:len(r.images)-1] - } else { - r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) - } - } + r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool { + return candidate.ID == id + }) if err := r.Save(); err != nil { return err } @@ -977,17 +970,6 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { return copyStringSlice(image.BigDataNames), nil } -func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { - modified := make([]*Image, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified -} - // Requires startWriting. func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { if !r.lockfile.IsReadWrite() { @@ -1037,21 +1019,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - addName := true - for _, name := range image.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(image.BigDataNames, key) { image.BigDataNames = append(image.BigDataNames, key) save = true } for _, oldDigest := range image.Digests { // remove the image from the list of images in the digest-based index if list, ok := r.bydigest[oldDigest]; ok { - prunedList := imageSliceWithoutValue(list, image) + prunedList := slices.DeleteFunc(list, func(i *Image) bool { + return i == image + }) if len(prunedList) == 0 { delete(r.bydigest, oldDigest) } else { @@ -1066,9 +1043,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest // add the image to the list of images in the digest-based index which // corresponds to the new digest for this item, unless it's already there list := r.bydigest[newDigest] - if len(list) == len(imageSliceWithoutValue(list, image)) { - // the list isn't shortened by trying to prune this image from it, - // so it's not in there yet + if !slices.Contains(list, image) { r.bydigest[newDigest] = append(list, image) } } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 8ae969894d..c65be5f449 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -5,10 +5,12 @@ import ( "errors" "fmt" "io" + "maps" "os" "path" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" @@ -312,9 +314,8 @@ type rwLayerStore interface { // applies its changes to a specified layer. ApplyDiff(to string, diff io.Reader) (int64, error) - // ApplyDiffWithDiffer applies the changes through the differ callback function. - // If to is the empty string, then a staging directory is created by the driver. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // applyDiffWithDifferNoLock applies the changes through the differ callback function. + applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error @@ -435,7 +436,7 @@ func layerLocation(l *Layer) layerLocations { func copyLayer(l *Layer) *Layer { return &Layer{ ID: l.ID, - Names: copyStringSlice(l.Names), + Names: slices.Clone(l.Names), Parent: l.Parent, Metadata: l.Metadata, MountLabel: l.MountLabel, @@ -450,8 +451,8 @@ func copyLayer(l *Layer) *Layer { CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, volatileStore: l.volatileStore, - BigDataNames: copyStringSlice(l.BigDataNames), - Flags: copyStringInterfaceMap(l.Flags), + BigDataNames: slices.Clone(l.BigDataNames), + Flags: maps.Clone(l.Flags), UIDMap: copyIDMap(l.UIDMap), GIDMap: copyIDMap(l.GIDMap), UIDs: copyUint32Slice(l.UIDs), @@ -1372,7 +1373,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateCompressionType = templateLayer.CompressionType - templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs) templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID)) if err != nil && !errors.Is(err, os.ErrNotExist) { return nil, -1, err @@ -1564,19 +1565,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) // - r.bymount (via loadMounts / saveMounts) - // check whether options include ro option - hasReadOnlyOpt := func(opts []string) bool { - for _, item := range opts { - if item == "ro" { - return true - } - } - return false - } - // You are not allowed to mount layers from readonly stores if they // are not mounted read/only. - if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") { return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() @@ -1836,14 +1827,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error return fmt.Errorf("closing bigdata file for the layer: %w", err) } - addName := true - for _, name := range layer.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { + if !slices.Contains(layer.BigDataNames, key) { layer.BigDataNames = append(layer.BigDataNames, key) return r.saveFor(layer) } @@ -1938,32 +1922,13 @@ func (r *layerStore) deleteInternal(id string) error { delete(r.bymount, layer.MountPoint) } r.deleteInDigestMap(id) - toDeleteIndex := -1 - for i, candidate := range r.layers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } - if toDeleteIndex != -1 { - // delete the layer at toDeleteIndex - if toDeleteIndex == len(r.layers)-1 { - r.layers = r.layers[:len(r.layers)-1] - } else { - r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) - } - } - if mountLabel != "" { - var found bool - for _, candidate := range r.layers { - if candidate.MountLabel == mountLabel { - found = true - break - } - } - if !found { - selinux.ReleaseLabel(mountLabel) - } + r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool { + return candidate.ID == id + }) + if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool { + return candidate.MountLabel == mountLabel + }) { + selinux.ReleaseLabel(mountLabel) } return nil } @@ -1971,21 +1936,15 @@ func (r *layerStore) deleteInternal(id string) error { // Requires startWriting. func (r *layerStore) deleteInDigestMap(id string) { for digest, layers := range r.bycompressedsum { - for i, layerID := range layers { - if layerID == id { - layers = append(layers[:i], layers[i+1:]...) - r.bycompressedsum[digest] = layers - break - } + if i := slices.Index(layers, id); i != -1 { + layers = slices.Delete(layers, i, i+1) + r.bycompressedsum[digest] = layers } } for digest, layers := range r.byuncompressedsum { - for i, layerID := range layers { - if layerID == id { - layers = append(layers[:i], layers[i+1:]...) - r.byuncompressedsum[digest] = layers - break - } + if i := slices.Index(layers, id); i != -1 { + layers = slices.Delete(layers, i, i+1) + r.byuncompressedsum[digest] = layers } } } @@ -2545,9 +2504,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - for k, v := range options.Flags { - layer.Flags[k] = v - } + maps.Copy(layer.Flags, options.Flags) } if err = r.saveFor(layer); err != nil { return err @@ -2585,37 +2542,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver return err } -// Requires startWriting. -func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +// It must be called without any c/storage locks held to allow differ to make c/storage calls. +func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return nil, ErrNotSupported } - if to == "" { - output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ) - return &output, err - } - - layer, ok := r.lookup(to) - if !ok { - return nil, ErrLayerUnknown - } - if options == nil { - options = &drivers.ApplyDiffWithDifferOpts{ - ApplyDiffOpts: drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, - }, - } - } - output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) - if err != nil { - return nil, err - } - layer.UIDs = output.UIDs - layer.GIDs = output.GIDs - err = r.saveFor(layer) + output, err := ddriver.ApplyDiffWithDiffer(options, differ) return &output, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index eae60a3055..b9d718b60f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str } // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] + if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok { originalPath := filepath.Join(dir, originalBase) if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 4487845497..3075c27bbf 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -5,6 +5,7 @@ import ( "bytes" "fmt" "io" + "maps" "os" "path/filepath" "reflect" @@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { f := filepath.Base(path) // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] + if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok { return filepath.Join(filepath.Dir(path), originalFile), nil } @@ -319,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } + maps.Copy(oldChildren, oldInfo.children) } for name, newChild := range info.children { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index ca272e68fc..a23bdf84b5 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -31,7 +31,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool }() // block until both routines have returned - for i := 0; i < 2; i++ { + for range 2 { if err := <-errs; err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go index 92b8d05ed8..5b8dc84e29 100644 --- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -80,9 +80,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) { var set, clear uint32 = 0, 0 for _, fflag := range strings.Split(fflags, ",") { isClear := false - if strings.HasPrefix(fflag, "no") { + if clean, ok := strings.CutPrefix(fflag, "no"); ok { isClear = true - fflag = strings.TrimPrefix(fflag, "no") + fflag = clean } if value, ok := flagNameToValue[fflag]; ok { if isClear { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 5b8acdaba5..3ca99a2c21 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -8,7 +8,7 @@ import ( "path/filepath" "github.com/containers/storage/pkg/mount" - "github.com/syndtr/gocapability/capability" + "github.com/moby/sys/capability" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 1d823c8d49..a7dc18be42 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -65,11 +65,10 @@ type layer struct { } type layersCache struct { - layers []*layer - refs int - store storage.Store - mutex sync.RWMutex - created time.Time + layers []*layer + refs int + store storage.Store + mutex sync.RWMutex } var ( @@ -83,6 +82,7 @@ func (c *layer) release() { if err := unix.Munmap(c.mmapBuffer); err != nil { logrus.Warnf("Error Munmap: layer %q: %v", c.id, err) } + c.mmapBuffer = nil } } @@ -107,14 +107,13 @@ func (c *layersCache) release() { func getLayersCacheRef(store storage.Store) *layersCache { cacheMutex.Lock() defer cacheMutex.Unlock() - if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + if cache != nil && cache.store == store { cache.refs++ return cache } - cache := &layersCache{ - store: store, - refs: 1, - created: time.Now(), + cache = &layersCache{ + store: store, + refs: 1, } return cache } @@ -291,7 +290,7 @@ func (c *layersCache) load() error { if r.ReadOnly { // If the layer is coming from a read-only store, do not attempt // to write to it. - // Therefore,we won’t find any matches in read-only-store layers, + // Therefore, we won’t find any matches in read-only-store layers, // unless the read-only store layer comes prepopulated with cacheKey data. continue } @@ -781,14 +780,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, return "", "", -1, nil } + c.mutex.RLock() + defer c.mutex.RUnlock() + binaryDigest, err := makeBinaryDigest(digest) if err != nil { return "", "", 0, err } - c.mutex.RLock() - defer c.mutex.RUnlock() - for _, layer := range c.layers { if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) { continue diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 403d7d5aa3..60cada2cc5 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -1331,7 +1331,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff wg.Wait() }() - for i := 0; i < copyGoRoutines; i++ { + for range copyGoRoutines { wg.Add(1) jobs := copyFileJobs diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index dd6c02a774..bed040e0cb 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -4,8 +4,8 @@ package directory import ( + "errors" "io/fs" - "os" "path/filepath" "syscall" ) @@ -27,7 +27,7 @@ func Usage(dir string) (usage *DiskUsage, err error) { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if errors.Is(err, fs.ErrNotExist) && d != dir { return nil } return err @@ -35,6 +35,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { fileInfo, err := entry.Info() if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index 482bc51a26..3c92b9567f 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -4,8 +4,8 @@ package directory import ( + "errors" "io/fs" - "os" "path/filepath" ) @@ -25,7 +25,7 @@ func Usage(dir string) (usage *DiskUsage, err error) { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && path != dir { + if errors.Is(err, fs.ErrNotExist) && path != dir { return nil } return err @@ -40,6 +40,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { fileInfo, err := d.Info() if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } usage.Size += fileInfo.Size() diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go index 5de3a671dd..40a229932b 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) { } continue } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { + opt, _, ok := strings.Cut(option, "=") + if !ok || !validFlags[opt] { return nil, fmt.Errorf("invalid tmpfs option %q", opt) } - if !dataCollisions[opt[0]] { + if !dataCollisions[opt] { // We prepend the option and add to collision map newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true + dataCollisions[opt] = true } } @@ -140,8 +140,8 @@ func ParseOptions(options string) (int, string) { func ParseTmpfsOptions(options string) (int, string, error) { flags, data := ParseOptions(options) for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { + opt, _, _ := strings.Cut(o, "=") + if !validFlags[opt] { return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) } } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index c70b0bf991..afd3210418 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -40,13 +40,9 @@ func mount(device, target, mType string, flag uintptr, data string) error { isNullFS = true continue } - opt := strings.SplitN(x, "=", 2) - options = append(options, opt[0]) - if len(opt) == 2 { - options = append(options, opt[1]) - } else { - options = append(options, "") - } + name, val, _ := strings.Cut(x, "=") + options = append(options, name) + options = append(options, val) } } diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go index a2a1d40723..a29e920904 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -11,7 +11,7 @@ import ( func unmount(target string, flags int) error { var err error - for i := 0; i < 50; i++ { + for range 50 { err = unix.Unmount(target, flags) switch err { case unix.EBUSY: diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go index 3fb0c36b88..7b20b06287 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -11,11 +11,11 @@ import ( // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { + k, v, ok := strings.Cut(opt, "=") + if !ok { return "", "", fmt.Errorf("unable to parse key/value option: %s", opt) } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil + return strings.TrimSpace(k), strings.TrimSpace(v), nil } // ParseUintList parses and validates the specified string as the value @@ -42,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) { errInvalidFormat := fmt.Errorf("invalid format: %s", val) for _, r := range split { - if !strings.Contains(r, "-") { + minS, maxS, ok := strings.Cut(r, "-") + if !ok { v, err := strconv.Atoi(r) if err != nil { return nil, errInvalidFormat } availableInts[v] = true } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) + min, err := strconv.Atoi(minS) if err != nil { return nil, errInvalidFormat } - max, err := strconv.Atoi(split[1]) + max, err := strconv.Atoi(maxS) if err != nil { return nil, errInvalidFormat } diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go index 66a59c85d5..f63c3e4444 100644 --- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -3,7 +3,7 @@ package stringutils import ( "bytes" - "math/rand" + "math/rand/v2" "strings" ) @@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string { letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) for i := range b { - b[i] = letters[rand.Intn(len(letters))] + b[i] = letters[rand.IntN(len(letters))] } return string(b) } @@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string { "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " res := make([]byte, n) for i := 0; i < n; i++ { - res[i] = chars[rand.Intn(len(chars))] + res[i] = chars[rand.IntN(len(chars))] } return string(res) } diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 32e8d7dca3..98b810e9d3 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -21,9 +21,9 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" + "github.com/moby/sys/capability" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" ) // Cmd wraps an exec.Cmd created by the reexec package in unshare(), and diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index bd4da7a468..692bf35314 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -6,9 +6,11 @@ import ( "errors" "fmt" "io" + "maps" "os" "path/filepath" "reflect" + "slices" "strings" "sync" "syscall" @@ -339,11 +341,17 @@ type Store interface { // } ApplyDiff(to string, diff io.Reader) (int64, error) - // ApplyDiffer applies a diff to a layer. + // ApplyDiffWithDiffer applies a diff to a layer. // It is the caller responsibility to clean the staging directory if it is not - // successfully applied with ApplyDiffFromStagingDirectory. + // successfully applied with ApplyStagedLayer. + // Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // PrepareStagedLayer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyStagedLayer. + PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + // ApplyStagedLayer combines the functions of creating a layer and using the staging // directory to populate it. // It marks the layer for automatic removal if applying the diff fails for any reason. @@ -939,9 +947,7 @@ func (s *store) GraphOptions() []string { func (s *store) PullOptions() map[string]string { cp := make(map[string]string, len(s.pullOptions)) - for k, v := range s.pullOptions { - cp[k] = v - } + maps.Copy(cp, s.pullOptions) return cp } @@ -1464,7 +1470,7 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare if lOptions != nil { options = *lOptions options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData) - options.Flags = copyStringInterfaceMap(lOptions.Flags) + options.Flags = maps.Clone(lOptions.Flags) } if options.HostUIDMapping { options.UIDMap = nil @@ -1605,7 +1611,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i CreationDate: i.Created, Digest: i.Digest, Digests: copyDigestSlice(i.Digests), - NamesHistory: copyStringSlice(i.NamesHistory), + NamesHistory: slices.Clone(i.NamesHistory), } for _, key := range i.BigDataNames { data, err := store.BigData(id, key) @@ -1622,7 +1628,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i Digest: dataDigest, }) } - namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) + namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names)) break } } @@ -1636,18 +1642,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i if iOptions.Digest != "" { options.Digest = iOptions.Digest } - options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) + options.Digests = append(options.Digests, iOptions.Digests...) if iOptions.Metadata != "" { options.Metadata = iOptions.Metadata } options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) - options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) + options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...) if options.Flags == nil { options.Flags = make(map[string]interface{}) } - for k, v := range iOptions.Flags { - options.Flags[k] = v - } + maps.Copy(options.Flags, iOptions.Flags) } if options.CreationDate.IsZero() { @@ -1782,7 +1786,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap) options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap) options.LabelOpts = copyStringSlice(cOptions.LabelOpts) - options.Flags = copyStringInterfaceMap(cOptions.Flags) + options.Flags = maps.Clone(cOptions.Flags) options.MountOpts = copyStringSlice(cOptions.MountOpts) options.StorageOpt = copyStringStringMap(cOptions.StorageOpt) options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData) @@ -3105,13 +3109,19 @@ func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) e return err } +func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + rlstore, err := s.getLayerStore() + if err != nil { + return nil, err + } + return rlstore.applyDiffWithDifferNoLock(options, differ) +} + func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { - if to != "" && !rlstore.Exists(to) { - return nil, ErrLayerUnknown - } - return rlstore.ApplyDiffWithDiffer(to, options, differ) - }) + if to != "" { + return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter") + } + return s.PrepareStagedLayer(options, differ) } func (s *store) DifferTarget(id string) (string, error) { @@ -3683,22 +3693,6 @@ func copyStringSlice(slice []string) []string { return ret } -func copyStringInt64Map(m map[string]int64) map[string]int64 { - ret := make(map[string]int64, len(m)) - for k, v := range m { - ret[k] = v - } - return ret -} - -func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { - ret := make(map[string]digest.Digest, len(m)) - for k, v := range m { - ret[k] = v - } - return ret -} - func copyStringStringMap(m map[string]string) map[string]string { ret := make(map[string]string, len(m)) for k, v := range m { @@ -3736,7 +3730,7 @@ func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOptio ret := make([]ImageBigDataOption, len(slice)) for i := range slice { ret[i].Key = slice[i].Key - ret[i].Data = append([]byte{}, slice[i].Data...) + ret[i].Data = slices.Clone(slice[i].Data) ret[i].Digest = slice[i].Digest } return ret @@ -3746,7 +3740,7 @@ func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []Container ret := make([]ContainerBigDataOption, len(slice)) for i := range slice { ret[i].Key = slice[i].Key - ret[i].Data = append([]byte{}, slice[i].Data...) + ret[i].Data = slices.Clone(slice[i].Data) } return ret } @@ -3800,10 +3794,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro return nil, err } key = strings.ToLower(key) - for _, m := range mountOpts { - if m == key { - return strings.Split(val, ","), nil - } + if slices.Contains(mountOpts, key) { + return strings.Split(val, ","), nil } } return nil, nil @@ -3811,11 +3803,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro // Free removes the store from the list of stores func (s *store) Free() { - for i := 0; i < len(stores); i++ { - if stores[i] == s { - stores = append(stores[:i], stores[i+1:]...) - return - } + if i := slices.Index(stores, s); i != -1 { + stores = slices.Delete(stores, i, i+1) } } diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f1a900b8d8..efc08c4763 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -344,8 +344,8 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { dirEntries, err := os.ReadDir(opts.GraphRoot) if err == nil { for _, entry := range dirEntries { - if strings.HasSuffix(entry.Name(), "-images") { - opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images") + if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok { + opts.GraphDriverName = name break } } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 57120731be..1b494ef12c 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -89,7 +89,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { passwdFile = filepath.Join(containerMount, "etc/passwd") } if groupFile == "" { - groupFile = filepath.Join(groupFile, "etc/group") + groupFile = filepath.Join(containerMount, "etc/group") } size := 0 @@ -99,14 +99,14 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { for _, u := range users { // Skip the "nobody" user otherwise we end up with 65536 // ids with most images - if u.Name == "nobody" { + if u.Name == "nobody" || u.Name == "nogroup" { continue } if u.Uid > size && u.Uid != nobodyUser { - size = u.Uid + size = u.Uid + 1 } if u.Gid > size && u.Gid != nobodyUser { - size = u.Gid + size = u.Gid + 1 } } } @@ -114,11 +114,11 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { groups, err := libcontainerUser.ParseGroupFile(groupFile) if err == nil { for _, g := range groups { - if g.Name == "nobody" { + if g.Name == "nobody" || g.Name == "nogroup" { continue } if g.Gid > size && g.Gid != nobodyUser { - size = g.Gid + size = g.Gid + 1 } } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 5bade6ffe3..c61d798378 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "slices" "github.com/containers/storage/types" ) @@ -41,22 +42,12 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO // remove given names from old names result = make([]string, 0, len(oldNames)) for _, name := range oldNames { - // only keep names in final result which do not intersect with input names - // basically `result = oldNames - opParameters` - nameShouldBeRemoved := false - for _, opName := range opParameters { - if name == opName { - nameShouldBeRemoved = true - } - } - if !nameShouldBeRemoved { + if !slices.Contains(opParameters, name) { result = append(result, name) } } case addNames: - result = make([]string, 0, len(opParameters)+len(oldNames)) - result = append(result, opParameters...) - result = append(result, oldNames...) + result = slices.Concat(opParameters, oldNames) default: return result, errInvalidUpdateNameOperation } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e48..684a30853a 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,20 @@ This package provides various compression algorithms. # changelog +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657d2c..af53fb860c 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d64f5..0d7b437f1c 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor @@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser { f.codebits = new([numCodes]int) f.step = nextBlock f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + return &f } +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderOpts(r) +} + // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7ea..0c7dd4ffef 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25c..0f56b02d74 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc76..9c28840c3b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91fc..84a79fde76 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f74..d36be7bd8c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0fe..a79c4a527c 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -469,6 +469,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e5..e47af66e7c 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd8287..c59f17e07a 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b89..f5591fa1e8 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/moby/sys/capability/.codespellrc b/vendor/github.com/moby/sys/capability/.codespellrc new file mode 100644 index 0000000000..e874be5634 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = ./.git +ignore-words-list = nd diff --git a/vendor/github.com/moby/sys/capability/.golangci.yml b/vendor/github.com/moby/sys/capability/.golangci.yml new file mode 100644 index 0000000000..d775aadd6f --- /dev/null +++ b/vendor/github.com/moby/sys/capability/.golangci.yml @@ -0,0 +1,6 @@ +linters: + enable: + - unconvert + - unparam + - gofumpt + - errorlint diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md new file mode 100644 index 0000000000..c508d03416 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/CHANGELOG.md @@ -0,0 +1,72 @@ +# Changelog +This file documents all notable changes made to this project since the initial fork +from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 0.2.0 - 2024-09-16 + +This is the first release after the move to a new home in +github.com/moby/sys/capability. + +### Fixed + * Fixed URLs in documentation to reflect the new home. + +## [0.1.1] - 2024-08-01 + +This is a maintenance release, fixing a few minor issues. + +### Fixed + * Fixed future kernel compatibility, for real this time. [#11] + * Fixed [LastCap] to be a function. [#12] + +## [0.1.0] - 2024-07-31 + +This is an initial release since the fork. + +### Breaking changes + + * The `CAP_LAST_CAP` variable is removed; users need to modify the code to + use [LastCap] to get the value. [#6] + * The code now requires Go >= 1.21. + +### Added + * `go.mod` and `go.sum` files. [#2] + * New [LastCap] function. [#6] + * Basic CI using GHA infra. [#8], [#9] + * README and CHANGELOG. [#10] + +### Fixed + * Fixed ambient capabilities error handling in [Apply]. [#3] + * Fixed future kernel compatibility. [#1] + * Fixed various linter warnings. [#4], [#7] + +### Changed + * Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2] + +### Removed + * Removed support for capabilities v1 and v2. [#1] + * Removed init function so programs that use this package start faster. [#6] + * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6] + + +[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply +[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap + + +[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1 +[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0 + + +[#1]: https://github.com/kolyshkin/capability/pull/1 +[#2]: https://github.com/kolyshkin/capability/pull/2 +[#3]: https://github.com/kolyshkin/capability/pull/3 +[#4]: https://github.com/kolyshkin/capability/pull/4 +[#6]: https://github.com/kolyshkin/capability/pull/6 +[#7]: https://github.com/kolyshkin/capability/pull/7 +[#8]: https://github.com/kolyshkin/capability/pull/8 +[#9]: https://github.com/kolyshkin/capability/pull/9 +[#10]: https://github.com/kolyshkin/capability/pull/10 +[#11]: https://github.com/kolyshkin/capability/pull/11 +[#12]: https://github.com/kolyshkin/capability/pull/12 diff --git a/vendor/github.com/moby/sys/capability/LICENSE b/vendor/github.com/moby/sys/capability/LICENSE new file mode 100644 index 0000000000..08adcd6ecf --- /dev/null +++ b/vendor/github.com/moby/sys/capability/LICENSE @@ -0,0 +1,25 @@ +Copyright 2023 The Capability Authors. +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/sys/capability/README.md b/vendor/github.com/moby/sys/capability/README.md new file mode 100644 index 0000000000..47489f9082 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/README.md @@ -0,0 +1,11 @@ +This is a fork of (apparently no longer maintained) +https://github.com/syndtr/gocapability package. It provides basic primitives to +work with [Linux capabilities][capabilities(7)]. + +[![Go Reference](https://pkg.go.dev/badge/github.com/moby/sys/capability/capability.svg)](https://pkg.go.dev/github.com/moby/sys/capability) + +## Alternatives + + * https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap + +[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go new file mode 100644 index 0000000000..2c46b8e06e --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability.go @@ -0,0 +1,134 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package capability provides utilities for manipulating POSIX capabilities. +package capability + +type Capabilities interface { + // Get check whether a capability present in the given + // capabilities set. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Get(which CapType, what Cap) bool + + // Empty check whether all capability bits of the given capabilities + // set are zero. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Empty(which CapType) bool + + // Full check whether all capability bits of the given capabilities + // set are one. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Full(which CapType) bool + + // Set sets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Set(which CapType, caps ...Cap) + + // Unset unsets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Unset(which CapType, caps ...Cap) + + // Fill sets all bits of the given capabilities kind to one. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Fill(kind CapType) + + // Clear sets all bits of the given capabilities kind to zero. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Clear(kind CapType) + + // String return current capabilities state of the given capabilities + // set as string. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE BOUNDING or AMBIENT + StringCap(which CapType) string + + // String return current capabilities state as string. + String() string + + // Load load actual capabilities value. This will overwrite all + // outstanding changes. + Load() error + + // Apply apply the capabilities settings, so all changes will take + // effect. + Apply(kind CapType) error +} + +// NewPid initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. +// +// Deprecated: Replace with NewPid2. For example, replace: +// +// c, err := NewPid(0) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewPid2(0) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewPid(pid int) (Capabilities, error) { + c, err := newPid(pid) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewPid2 initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. This +// does not load the process's current capabilities; to do that you +// must call Load explicitly. +func NewPid2(pid int) (Capabilities, error) { + return newPid(pid) +} + +// NewFile initializes a new Capabilities object for given file path. +// +// Deprecated: Replace with NewFile2. For example, replace: +// +// c, err := NewFile(path) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewFile2(path) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewFile(path string) (Capabilities, error) { + c, err := newFile(path) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewFile2 creates a new initialized Capabilities object for given +// file path. This does not load the process's current capabilities; +// to do that you must call Load explicitly. +func NewFile2(path string) (Capabilities, error) { + return newFile(path) +} diff --git a/vendor/github.com/moby/sys/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go new file mode 100644 index 0000000000..d30b6f8e5f --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability_linux.go @@ -0,0 +1,546 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "syscall" +) + +const ( + linuxCapVer1 = 0x19980330 // No longer supported. + linuxCapVer2 = 0x20071026 // No longer supported. + linuxCapVer3 = 0x20080522 +) + +// LastCap returns highest valid capability of the running kernel. +func LastCap() (Cap, error) { + return lastCap() +} + +var lastCap = sync.OnceValues(func() (Cap, error) { + f, err := os.Open("/proc/sys/kernel/cap_last_cap") + if err != nil { + return 0, err + } + + buf := make([]byte, 11) + l, err := f.Read(buf) + f.Close() + if err != nil { + return 0, err + } + buf = buf[:l] + + last, err := strconv.Atoi(strings.TrimSpace(string(buf))) + if err != nil { + return 0, err + } + return Cap(last), nil +}) + +func capUpperMask() uint32 { + last, err := lastCap() + if err != nil || last < 32 { + return 0 + } + return (uint32(1) << (uint(last) - 31)) - 1 +} + +func mkStringCap(c Capabilities, which CapType) (ret string) { + last, err := lastCap() + if err != nil { + return "" + } + for i, first := Cap(0), true; i <= last; i++ { + if !c.Get(which, i) { + continue + } + if first { + first = false + } else { + ret += ", " + } + ret += i.String() + } + return +} + +func mkString(c Capabilities, max CapType) (ret string) { + ret = "{" + for i := CapType(1); i <= max; i <<= 1 { + ret += " " + i.String() + "=\"" + if c.Empty(i) { + ret += "empty" + } else if c.Full(i) { + ret += "full" + } else { + ret += c.StringCap(i) + } + ret += "\"" + } + ret += " }" + return +} + +var capVersion = sync.OnceValues(func() (uint32, error) { + var hdr capHeader + err := capget(&hdr, nil) + return hdr.version, err +}) + +func newPid(pid int) (c Capabilities, retErr error) { + ver, err := capVersion() + if err != nil { + retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err) + return + } + switch ver { + case linuxCapVer1, linuxCapVer2: + retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)") + default: + // Either linuxCapVer3, or an unknown/future version (such as v4). + // In the latter case, we fall back to v3 as the latest version known + // to this package, as kernel should be backward-compatible to v3. + p := new(capsV3) + p.hdr.version = linuxCapVer3 + p.hdr.pid = int32(pid) + c = p + } + return +} + +type capsV3 struct { + hdr capHeader + data [2]capData + bounds [2]uint32 + ambient [2]uint32 +} + +func (c *capsV3) Get(which CapType, what Cap) bool { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable |= 1 << uint(what) + } + if which&BOUNDING != 0 { + c.bounds[i] |= 1 << uint(what) + } + if which&AMBIENT != 0 { + c.ambient[i] |= 1 << uint(what) + } + } +} + +func (c *capsV3) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable &= ^(1 << uint(what)) + } + if which&BOUNDING != 0 { + c.bounds[i] &= ^(1 << uint(what)) + } + if which&AMBIENT != 0 { + c.ambient[i] &= ^(1 << uint(what)) + } + } +} + +func (c *capsV3) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0xffffffff + c.data[0].permitted = 0xffffffff + c.data[0].inheritable = 0 + c.data[1].effective = 0xffffffff + c.data[1].permitted = 0xffffffff + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0xffffffff + c.bounds[1] = 0xffffffff + } + if kind&AMBS == AMBS { + c.ambient[0] = 0xffffffff + c.ambient[1] = 0xffffffff + } +} + +func (c *capsV3) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0 + c.data[0].permitted = 0 + c.data[0].inheritable = 0 + c.data[1].effective = 0 + c.data[1].permitted = 0 + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0 + c.bounds[1] = 0 + } + if kind&AMBS == AMBS { + c.ambient[0] = 0 + c.ambient[1] = 0 + } +} + +func (c *capsV3) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV3) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV3) Load() (err error) { + err = capget(&c.hdr, &c.data[0]) + if err != nil { + return + } + + path := "/proc/self/status" + if c.hdr.pid != 0 { + path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) + } + + f, err := os.Open(path) + if err != nil { + return + } + b := bufio.NewReader(f) + for { + line, e := b.ReadString('\n') + if e != nil { + if e != io.EOF { + err = e + } + break + } + if strings.HasPrefix(line, "CapB") { + _, err = fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) + if err != nil { + break + } + continue + } + if strings.HasPrefix(line, "CapA") { + _, err = fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) + if err != nil { + break + } + continue + } + } + f.Close() + + return +} + +func (c *capsV3) Apply(kind CapType) (err error) { + last, err := LastCap() + if err != nil { + return err + } + if kind&BOUNDS == BOUNDS { + var data [2]capData + err = capget(&c.hdr, &data[0]) + if err != nil { + return + } + if (1< 31 { + if c.data.version == 1 { + return false + } + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable |= 1 << uint(what) + } + } +} + +func (c *capsFile) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsFile) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0xffffffff + c.data.data[0].permitted = 0xffffffff + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0xffffffff + c.data.data[1].permitted = 0xffffffff + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0 + c.data.data[0].permitted = 0 + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0 + c.data.data[1].permitted = 0 + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsFile) String() (ret string) { + return mkString(c, INHERITABLE) +} + +func (c *capsFile) Load() (err error) { + return getVfsCap(c.path, &c.data) +} + +func (c *capsFile) Apply(kind CapType) (err error) { + if kind&CAPS == CAPS { + return setVfsCap(c.path, &c.data) + } + return +} diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go new file mode 100644 index 0000000000..2e836fbcea --- /dev/null +++ b/vendor/github.com/moby/sys/capability/capability_noop.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package capability + +import "errors" + +func newPid(pid int) (Capabilities, error) { + return nil, errors.New("not supported") +} + +func newFile(path string) (Capabilities, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/moby/sys/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go new file mode 100644 index 0000000000..bbbc84dbaf --- /dev/null +++ b/vendor/github.com/moby/sys/capability/enum.go @@ -0,0 +1,303 @@ +// Copyright 2024 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +type CapType uint + +func (c CapType) String() string { + switch c { + case EFFECTIVE: + return "effective" + case PERMITTED: + return "permitted" + case INHERITABLE: + return "inheritable" + case BOUNDING: + return "bounding" + case CAPS: + return "caps" + case AMBIENT: + return "ambient" + } + return "unknown" +} + +const ( + EFFECTIVE CapType = 1 << iota + PERMITTED + INHERITABLE + BOUNDING + AMBIENT + + CAPS = EFFECTIVE | PERMITTED | INHERITABLE + BOUNDS = BOUNDING + AMBS = AMBIENT +) + +//go:generate go run enumgen/gen.go +type Cap int + +// POSIX-draft defined capabilities and Linux extensions. +// +// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h +const ( + // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this + // overrides the restriction of changing file ownership and group + // ownership. + CAP_CHOWN = Cap(0) + + // Override all DAC access, including ACL execute access if + // [_POSIX_ACL] is defined. Excluding DAC access covered by + // CAP_LINUX_IMMUTABLE. + CAP_DAC_OVERRIDE = Cap(1) + + // Overrides all DAC restrictions regarding read and search on files + // and directories, including ACL restrictions if [_POSIX_ACL] is + // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. + CAP_DAC_READ_SEARCH = Cap(2) + + // Overrides all restrictions about allowed operations on files, where + // file owner ID must be equal to the user ID, except where CAP_FSETID + // is applicable. It doesn't override MAC and DAC restrictions. + CAP_FOWNER = Cap(3) + + // Overrides the following restrictions that the effective user ID + // shall match the file owner ID when setting the S_ISUID and S_ISGID + // bits on that file; that the effective group ID (or one of the + // supplementary group IDs) shall match the file owner ID when setting + // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are + // cleared on successful return from chown(2) (not implemented). + CAP_FSETID = Cap(4) + + // Overrides the restriction that the real or effective user ID of a + // process sending a signal must match the real or effective user ID + // of the process receiving the signal. + CAP_KILL = Cap(5) + + // Allows setgid(2) manipulation + // Allows setgroups(2) + // Allows forged gids on socket credentials passing. + CAP_SETGID = Cap(6) + + // Allows set*uid(2) manipulation (including fsuid). + // Allows forged pids on socket credentials passing. + CAP_SETUID = Cap(7) + + // Linux-specific capabilities + + // Without VFS support for capabilities: + // Transfer any capability in your permitted set to any pid, + // remove any capability in your permitted set from any pid + // With VFS support for capabilities (neither of above, but) + // Add any capability from current's capability bounding set + // to the current process' inheritable set + // Allow taking bits out of capability bounding set + // Allow modification of the securebits for a process + CAP_SETPCAP = Cap(8) + + // Allow modification of S_IMMUTABLE and S_APPEND file attributes + CAP_LINUX_IMMUTABLE = Cap(9) + + // Allows binding to TCP/UDP sockets below 1024 + // Allows binding to ATM VCIs below 32 + CAP_NET_BIND_SERVICE = Cap(10) + + // Allow broadcasting, listen to multicast + CAP_NET_BROADCAST = Cap(11) + + // Allow interface configuration + // Allow administration of IP firewall, masquerading and accounting + // Allow setting debug option on sockets + // Allow modification of routing tables + // Allow setting arbitrary process / process group ownership on + // sockets + // Allow binding to any address for transparent proxying (also via NET_RAW) + // Allow setting TOS (type of service) + // Allow setting promiscuous mode + // Allow clearing driver statistics + // Allow multicasting + // Allow read/write of device-specific registers + // Allow activation of ATM control sockets + CAP_NET_ADMIN = Cap(12) + + // Allow use of RAW sockets + // Allow use of PACKET sockets + // Allow binding to any address for transparent proxying (also via NET_ADMIN) + CAP_NET_RAW = Cap(13) + + // Allow locking of shared memory segments + // Allow mlock and mlockall (which doesn't really have anything to do + // with IPC) + CAP_IPC_LOCK = Cap(14) + + // Override IPC ownership checks + CAP_IPC_OWNER = Cap(15) + + // Insert and remove kernel modules - modify kernel without limit + CAP_SYS_MODULE = Cap(16) + + // Allow ioperm/iopl access + // Allow sending USB messages to any device via /proc/bus/usb + CAP_SYS_RAWIO = Cap(17) + + // Allow use of chroot() + CAP_SYS_CHROOT = Cap(18) + + // Allow ptrace() of any process + CAP_SYS_PTRACE = Cap(19) + + // Allow configuration of process accounting + CAP_SYS_PACCT = Cap(20) + + // Allow configuration of the secure attention key + // Allow administration of the random device + // Allow examination and configuration of disk quotas + // Allow setting the domainname + // Allow setting the hostname + // Allow calling bdflush() + // Allow mount() and umount(), setting up new smb connection + // Allow some autofs root ioctls + // Allow nfsservctl + // Allow VM86_REQUEST_IRQ + // Allow to read/write pci config on alpha + // Allow irix_prctl on mips (setstacksize) + // Allow flushing all cache on m68k (sys_cacheflush) + // Allow removing semaphores + // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores + // and shared memory + // Allow locking/unlocking of shared memory segment + // Allow turning swap on/off + // Allow forged pids on socket credentials passing + // Allow setting readahead and flushing buffers on block devices + // Allow setting geometry in floppy driver + // Allow turning DMA on/off in xd driver + // Allow administration of md devices (mostly the above, but some + // extra ioctls) + // Allow tuning the ide driver + // Allow access to the nvram device + // Allow administration of apm_bios, serial and bttv (TV) device + // Allow manufacturer commands in isdn CAPI support driver + // Allow reading non-standardized portions of pci configuration space + // Allow DDI debug ioctl on sbpcd driver + // Allow setting up serial ports + // Allow sending raw qic-117 commands + // Allow enabling/disabling tagged queuing on SCSI controllers and sending + // arbitrary SCSI commands + // Allow setting encryption key on loopback filesystem + // Allow setting zone reclaim policy + // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility + CAP_SYS_ADMIN = Cap(21) + + // Allow use of reboot() + CAP_SYS_BOOT = Cap(22) + + // Allow raising priority and setting priority on other (different + // UID) processes + // Allow use of FIFO and round-robin (realtime) scheduling on own + // processes and setting the scheduling algorithm used by another + // process. + // Allow setting cpu affinity on other processes + CAP_SYS_NICE = Cap(23) + + // Override resource limits. Set resource limits. + // Override quota limits. + // Override reserved space on ext2 filesystem + // Modify data journaling mode on ext3 filesystem (uses journaling + // resources) + // NOTE: ext2 honors fsuid when checking for resource overrides, so + // you can override using fsuid too + // Override size restrictions on IPC message queues + // Allow more than 64hz interrupts from the real-time clock + // Override max number of consoles on console allocation + // Override max number of keymaps + // Control memory reclaim behavior + CAP_SYS_RESOURCE = Cap(24) + + // Allow manipulation of system clock + // Allow irix_stime on mips + // Allow setting the real-time clock + CAP_SYS_TIME = Cap(25) + + // Allow configuration of tty devices + // Allow vhangup() of tty + CAP_SYS_TTY_CONFIG = Cap(26) + + // Allow the privileged aspects of mknod() + CAP_MKNOD = Cap(27) + + // Allow taking of leases on files + CAP_LEASE = Cap(28) + + CAP_AUDIT_WRITE = Cap(29) + CAP_AUDIT_CONTROL = Cap(30) + CAP_SETFCAP = Cap(31) + + // Override MAC access. + // The base kernel enforces no MAC policy. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based overrides of that policy, this is + // the capability it should use to do so. + CAP_MAC_OVERRIDE = Cap(32) + + // Allow MAC configuration or state changes. + // The base kernel requires no MAC configuration. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based checks on modifications to that + // policy or the data required to maintain it, this is the + // capability it should use to do so. + CAP_MAC_ADMIN = Cap(33) + + // Allow configuring the kernel's syslog (printk behaviour) + CAP_SYSLOG = Cap(34) + + // Allow triggering something that will wake the system + CAP_WAKE_ALARM = Cap(35) + + // Allow preventing system suspends + CAP_BLOCK_SUSPEND = Cap(36) + + // Allow reading the audit log via multicast netlink socket + CAP_AUDIT_READ = Cap(37) + + // Allow system performance and observability privileged operations + // using perf_events, i915_perf and other kernel subsystems + CAP_PERFMON = Cap(38) + + // CAP_BPF allows the following BPF operations: + // - Creating all types of BPF maps + // - Advanced verifier features + // - Indirect variable access + // - Bounded loops + // - BPF to BPF function calls + // - Scalar precision tracking + // - Larger complexity limits + // - Dead code elimination + // - And potentially other features + // - Loading BPF Type Format (BTF) data + // - Retrieve xlated and JITed code of BPF programs + // - Use bpf_spin_lock() helper + // + // CAP_PERFMON relaxes the verifier checks further: + // - BPF progs can use of pointer-to-integer conversions + // - speculation attack hardening measures are bypassed + // - bpf_probe_read to read arbitrary kernel memory is allowed + // - bpf_trace_printk to print kernel memory is allowed + // + // CAP_SYS_ADMIN is required to use bpf_probe_write_user. + // + // CAP_SYS_ADMIN is required to iterate system wide loaded + // programs, maps, links, BTFs and convert their IDs to file descriptors. + // + // CAP_PERFMON and CAP_BPF are required to load tracing programs. + // CAP_NET_ADMIN and CAP_BPF are required to load networking programs. + CAP_BPF = Cap(39) + + // Allow checkpoint/restore related operations. + // Introduced in kernel 5.9 + CAP_CHECKPOINT_RESTORE = Cap(40) +) diff --git a/vendor/github.com/moby/sys/capability/enum_gen.go b/vendor/github.com/moby/sys/capability/enum_gen.go new file mode 100644 index 0000000000..2ff9bf4d88 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/enum_gen.go @@ -0,0 +1,138 @@ +// generated file; DO NOT EDIT - use go generate in directory with source + +package capability + +func (c Cap) String() string { + switch c { + case CAP_CHOWN: + return "chown" + case CAP_DAC_OVERRIDE: + return "dac_override" + case CAP_DAC_READ_SEARCH: + return "dac_read_search" + case CAP_FOWNER: + return "fowner" + case CAP_FSETID: + return "fsetid" + case CAP_KILL: + return "kill" + case CAP_SETGID: + return "setgid" + case CAP_SETUID: + return "setuid" + case CAP_SETPCAP: + return "setpcap" + case CAP_LINUX_IMMUTABLE: + return "linux_immutable" + case CAP_NET_BIND_SERVICE: + return "net_bind_service" + case CAP_NET_BROADCAST: + return "net_broadcast" + case CAP_NET_ADMIN: + return "net_admin" + case CAP_NET_RAW: + return "net_raw" + case CAP_IPC_LOCK: + return "ipc_lock" + case CAP_IPC_OWNER: + return "ipc_owner" + case CAP_SYS_MODULE: + return "sys_module" + case CAP_SYS_RAWIO: + return "sys_rawio" + case CAP_SYS_CHROOT: + return "sys_chroot" + case CAP_SYS_PTRACE: + return "sys_ptrace" + case CAP_SYS_PACCT: + return "sys_pacct" + case CAP_SYS_ADMIN: + return "sys_admin" + case CAP_SYS_BOOT: + return "sys_boot" + case CAP_SYS_NICE: + return "sys_nice" + case CAP_SYS_RESOURCE: + return "sys_resource" + case CAP_SYS_TIME: + return "sys_time" + case CAP_SYS_TTY_CONFIG: + return "sys_tty_config" + case CAP_MKNOD: + return "mknod" + case CAP_LEASE: + return "lease" + case CAP_AUDIT_WRITE: + return "audit_write" + case CAP_AUDIT_CONTROL: + return "audit_control" + case CAP_SETFCAP: + return "setfcap" + case CAP_MAC_OVERRIDE: + return "mac_override" + case CAP_MAC_ADMIN: + return "mac_admin" + case CAP_SYSLOG: + return "syslog" + case CAP_WAKE_ALARM: + return "wake_alarm" + case CAP_BLOCK_SUSPEND: + return "block_suspend" + case CAP_AUDIT_READ: + return "audit_read" + case CAP_PERFMON: + return "perfmon" + case CAP_BPF: + return "bpf" + case CAP_CHECKPOINT_RESTORE: + return "checkpoint_restore" + } + return "unknown" +} + +// List returns list of all supported capabilities +func List() []Cap { + return []Cap{ + CAP_CHOWN, + CAP_DAC_OVERRIDE, + CAP_DAC_READ_SEARCH, + CAP_FOWNER, + CAP_FSETID, + CAP_KILL, + CAP_SETGID, + CAP_SETUID, + CAP_SETPCAP, + CAP_LINUX_IMMUTABLE, + CAP_NET_BIND_SERVICE, + CAP_NET_BROADCAST, + CAP_NET_ADMIN, + CAP_NET_RAW, + CAP_IPC_LOCK, + CAP_IPC_OWNER, + CAP_SYS_MODULE, + CAP_SYS_RAWIO, + CAP_SYS_CHROOT, + CAP_SYS_PTRACE, + CAP_SYS_PACCT, + CAP_SYS_ADMIN, + CAP_SYS_BOOT, + CAP_SYS_NICE, + CAP_SYS_RESOURCE, + CAP_SYS_TIME, + CAP_SYS_TTY_CONFIG, + CAP_MKNOD, + CAP_LEASE, + CAP_AUDIT_WRITE, + CAP_AUDIT_CONTROL, + CAP_SETFCAP, + CAP_MAC_OVERRIDE, + CAP_MAC_ADMIN, + CAP_SYSLOG, + CAP_WAKE_ALARM, + CAP_BLOCK_SUSPEND, + CAP_AUDIT_READ, + CAP_PERFMON, + CAP_BPF, + CAP_CHECKPOINT_RESTORE, + } +} diff --git a/vendor/github.com/moby/sys/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go new file mode 100644 index 0000000000..d6b6932a94 --- /dev/null +++ b/vendor/github.com/moby/sys/capability/syscall_linux.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Capability Authors. +// Copyright 2013 Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package capability + +import ( + "syscall" + "unsafe" +) + +type capHeader struct { + version uint32 + pid int32 +} + +type capData struct { + effective uint32 + permitted uint32 + inheritable uint32 +} + +func capget(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func capset(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +// not yet in syscall +const ( + pr_CAP_AMBIENT = 47 + pr_CAP_AMBIENT_IS_SET = uintptr(1) + pr_CAP_AMBIENT_RAISE = uintptr(2) + pr_CAP_AMBIENT_LOWER = uintptr(3) + pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) +) + +func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { + _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) + if e1 != 0 { + err = e1 + } + return +} + +const ( + vfsXattrName = "security.capability" + + vfsCapVerMask = 0xff000000 + vfsCapVer1 = 0x01000000 + vfsCapVer2 = 0x02000000 + + vfsCapFlagMask = ^vfsCapVerMask + vfsCapFlageffective = 0x000001 + + vfscapDataSizeV1 = 4 * (1 + 2*1) + vfscapDataSizeV2 = 4 * (1 + 2*2) +) + +type vfscapData struct { + magic uint32 + data [2]struct { + permitted uint32 + inheritable uint32 + } + effective [2]uint32 + version int8 +} + +var _vfsXattrName *byte + +func init() { + _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) +} + +func getVfsCap(path string, dest *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) + if e1 != 0 { + if e1 == syscall.ENODATA { + dest.version = 2 + return + } + err = e1 + } + switch dest.magic & vfsCapVerMask { + case vfsCapVer1: + dest.version = 1 + if r0 != vfscapDataSizeV1 { + return syscall.EINVAL + } + dest.data[1].permitted = 0 + dest.data[1].inheritable = 0 + case vfsCapVer2: + dest.version = 2 + if r0 != vfscapDataSizeV2 { + return syscall.EINVAL + } + default: + return syscall.EINVAL + } + if dest.magic&vfsCapFlageffective != 0 { + dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable + dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable + } else { + dest.effective[0] = 0 + dest.effective[1] = 0 + } + return +} + +func setVfsCap(path string, data *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var size uintptr + if data.version == 1 { + data.magic = vfsCapVer1 + size = vfscapDataSizeV1 + } else if data.version == 2 { + data.magic = vfsCapVer2 + if data.effective[0] != 0 || data.effective[1] != 0 { + data.magic |= vfsCapFlageffective + } + size = vfscapDataSizeV2 + } else { + return syscall.EINVAL + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bf5999fee4..1cadeac949 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -355,8 +355,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483 -## explicit; go 1.21 +# github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 +## explicit; go 1.22.0 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -718,8 +718,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.10 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -806,6 +806,9 @@ github.com/moby/docker-image-spec/specs-go/v1 # github.com/moby/patternmatcher v0.6.0 ## explicit; go 1.19 github.com/moby/patternmatcher +# github.com/moby/sys/capability v0.2.0 +## explicit; go 1.21 +github.com/moby/sys/capability # github.com/moby/sys/mountinfo v0.7.2 ## explicit; go 1.17 github.com/moby/sys/mountinfo From 7f29233a3fd868fa637b65fd4a94481be77d6015 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 24 Sep 2024 15:22:18 +0200 Subject: [PATCH 2/2] vendor: update containers/image Signed-off-by: Giuseppe Scrivano --- go.mod | 26 +-- go.sum | 104 +++++----- vendor/dario.cat/mergo/.gitignore | 3 + vendor/dario.cat/mergo/README.md | 102 +++++----- vendor/dario.cat/mergo/map.go | 2 +- vendor/dario.cat/mergo/merge.go | 2 +- .../containers/image/v5/copy/compression.go | 120 +++++++----- .../containers/image/v5/copy/copy.go | 34 ++-- .../containers/image/v5/copy/progress_bars.go | 13 +- .../containers/image/v5/copy/sign.go | 2 +- .../containers/image/v5/copy/single.go | 36 ++-- .../containers/image/v5/docker/body_reader.go | 4 +- .../image/v5/docker/daemon/client.go | 2 + .../image/v5/docker/docker_image.go | 6 + .../image/v5/docker/docker_image_dest.go | 22 ++- .../image/v5/docker/docker_image_src.go | 73 ++++--- .../image/v5/internal/blobinfocache/types.go | 21 +- .../v5/internal/imagedestination/wrapper.go | 3 + .../internal/manifest/docker_schema2_list.go | 5 +- .../image/v5/internal/manifest/manifest.go | 5 - .../image/v5/internal/manifest/oci_index.go | 5 +- .../internal/pkg/platform/platform_matcher.go | 4 +- .../image/v5/internal/private/private.go | 5 + .../image/v5/manifest/docker_schema1.go | 6 +- .../containers/image/v5/manifest/oci.go | 2 +- .../image/v5/oci/layout/oci_delete.go | 121 +++--------- .../image/v5/openshift/openshift-copies.go | 6 +- .../internal/prioritize/prioritize.go | 72 ++++--- .../v5/pkg/blobinfocache/memory/memory.go | 38 +++- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 129 ++++++++---- .../image/v5/signature/fulcio_cert.go | 4 +- .../image/v5/signature/internal/rekor_set.go | 36 ++-- .../v5/signature/internal/sigstore_payload.go | 71 +++++-- .../v5/signature/policy_config_sigstore.go | 133 +++++++++++-- .../v5/signature/policy_eval_signedby.go | 38 ++-- .../v5/signature/policy_eval_sigstore.go | 184 ++++++++++++------ .../v5/signature/policy_reference_match.go | 2 +- .../image/v5/signature/policy_types.go | 29 ++- .../containers/image/v5/signature/simple.go | 10 +- .../image/v5/storage/storage_dest.go | 8 +- .../image/v5/storage/storage_reference.go | 2 +- .../image/v5/storage/storage_src.go | 6 +- .../image/v5/tarball/tarball_src.go | 6 +- .../image/v5/tarball/tarball_transport.go | 4 +- .../github.com/coreos/go-oidc/v3/oidc/jwks.go | 16 +- .../coreos/go-oidc/v3/oidc/verify.go | 4 +- .../go-jose/go-jose/v4/CHANGELOG.md | 24 +++ .../github.com/go-jose/go-jose/v4/crypter.go | 10 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 21 +- .../github.com/go-jose/go-jose/v4/opaque.go | 3 + .../github.com/go-jose/go-jose/v4/signing.go | 10 +- .../letsencrypt/boulder/core/objects.go | 93 +++------ .../letsencrypt/boulder/core/util.go | 4 +- .../letsencrypt/boulder/goodkey/good_key.go | 129 +++++++----- .../fulcio/pkg/certificate/extensions.go | 38 ++-- .../sigstore/pkg/cryptoutils/publickey.go | 70 +++---- .../sigstore/sigstore/pkg/oauthflow/device.go | 3 +- .../sigstore/sigstore/pkg/oauthflow/flow.go | 30 ++- .../sylabs/sif/v2/pkg/sif/create.go | 40 +++- vendor/golang.org/x/oauth2/token.go | 7 + vendor/golang.org/x/time/LICENSE | 4 +- vendor/modules.txt | 36 ++-- 62 files changed, 1239 insertions(+), 809 deletions(-) diff --git a/go.mod b/go.mod index 9d6eba0048..2be9faf61e 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/containers/podman/v5 // Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates -go 1.22.0 +go 1.22.6 require ( github.com/BurntSushi/toml v1.4.0 @@ -16,7 +16,7 @@ require ( github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.7.5 - github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6 + github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 github.com/containers/libhvee v0.7.1 github.com/containers/ocicrypt v1.2.0 github.com/containers/psgo v1.9.0 @@ -86,7 +86,7 @@ require ( ) require ( - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/hcsshim v0.12.6 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -107,7 +107,7 @@ require ( github.com/containernetworking/cni v1.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c // indirect - github.com/coreos/go-oidc/v3 v3.10.0 // indirect + github.com/coreos/go-oidc/v3 v3.11.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -122,7 +122,7 @@ require ( github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-gonic/gin v1.9.1 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -145,7 +145,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.20.1 // indirect + github.com/google/go-containerregistry v0.20.2 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -158,7 +158,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect - github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect @@ -192,12 +192,12 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/fulcio v1.6.4 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.4 // indirect + github.com/sigstore/sigstore v1.8.9 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.18.0 // indirect + github.com/sylabs/sif/v2 v2.19.1 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -218,10 +218,10 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.24.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/grpc v1.65.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index a3f89b3b48..84f979bdc6 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -87,8 +87,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E= github.com/containers/gvisor-tap-vsock v0.7.5/go.mod h1:GW9jOqAEEGdaS20XwTYdm6KCYDHIulOE/yEEOabkoE4= -github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6 h1:nXEEUAo8l2HLlMBy+LsHju2AikpA30jvlTSHbnjJXVw= -github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6/go.mod h1:r//zsX8SjmVH0F87d+gakcgR4W5HTFGSgSLB4sufW6A= +github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 h1:eIwxm8+oAoTk+PDuOTbZRFG1DBF5tAlFO+niIamyzaM= +github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46/go.mod h1:GgaW+YZJaJmcGtyPZNtsggfM4BBYIMfu/fFK62ZKU0o= github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4= github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -103,8 +103,8 @@ github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPF github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= -github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= -github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= @@ -134,8 +134,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= -github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= @@ -173,8 +173,8 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= -github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -211,12 +211,12 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM= -github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -252,8 +252,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0= -github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -275,8 +275,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -326,8 +326,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PNc1GwDRA8+6lBx1TB9UnxDQliCqR73Y= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -384,6 +384,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= @@ -425,25 +427,25 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= -github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= +github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rootless-containers/rootlesskit/v2 v2.3.1 h1:wdYtdKxWFvVLby9ThMP6O6/v2q/GmOXbkRi+4m9nPW0= github.com/rootless-containers/rootlesskit/v2 v2.3.1/go.mod h1:tdtfS9ak4bGmwJRmcjsAzcHN5rJ3c5dB7yhSV10KTbk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY= +github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= @@ -458,12 +460,12 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= -github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY= +github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w= -github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg= +github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= +github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= @@ -488,8 +490,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI= -github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo= +github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0= +github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -530,12 +532,12 @@ github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= -github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= -github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -554,8 +556,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIX go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= @@ -564,8 +566,8 @@ go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBq go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= @@ -610,8 +612,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -669,8 +671,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -693,11 +695,11 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore index 529c3412ba..45ad0f1ae3 100644 --- a/vendor/dario.cat/mergo/.gitignore +++ b/vendor/dario.cat/mergo/.gitignore @@ -13,6 +13,9 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +# Golang/Intellij +.idea + # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 7d0cf9f32a..0b3c488893 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. ### Important notes #### 1.0.0 -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` #### 0.3.9 @@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: -Buy Me a Coffee at ko-fi.com Donate using Liberapay Become my sponsor ### Mergo in the wild -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) ## Install @@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { } ``` +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go index b50d5c2a4e..759b4f74fd 100644 --- a/vendor/dario.cat/mergo/map.go +++ b/vendor/dario.cat/mergo/map.go @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { dstMap[fieldName] = src.Field(i).Interface() } } diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go index 0ef9b2138c..fd47c95b2b 100644 --- a/vendor/dario.cat/mergo/merge.go +++ b/vendor/dario.cat/mergo/merge.go @@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } - } else { + } else if src.Elem().Kind() != reflect.Struct { if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { dst.Set(src) } diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 081c49312f..fb5e1b174e 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -52,6 +52,16 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI } stream.reader = reader + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } res := bpDetectCompressionStepData{ isCompressed: decompressor != nil, format: format, @@ -71,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI // bpCompressionStepData contains data that the copy pipeline needs about the compression step. type bpCompressionStepData struct { - operation bpcOperation // What we are actually doing - uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) - uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. - uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. - srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. - uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. - closers []io.Closer // Objects to close after the upload is done, if any. + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. } type bpcOperation int @@ -129,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp // We can’t do anything with an encrypted blob unless decrypted. logrus.Debugf("Using original blob without modification for encrypted blob") return &bpCompressionStepData{ - operation: bpcOpPreserveOpaque, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: nil, - srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, - uploadedCompressorName: internalblobinfocache.UnknownCompression, + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, }, nil } return nil, nil @@ -157,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } return &bpCompressionStepData{ - operation: bpcOpCompressUncompressed, - uploadedOperation: types.Compress, - uploadedAlgorithm: uploadedAlgorithm, - uploadedAnnotations: annotations, - srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, - uploadedCompressorName: uploadedAlgorithm.Name(), - closers: []io.Closer{reader}, + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, }, nil } return nil, nil @@ -197,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } succeeded = true return &bpCompressionStepData{ - operation: bpcOpRecompressCompressed, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: ic.compressionFormat, - uploadedAnnotations: annotations, - srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, - uploadedCompressorName: ic.compressionFormat.Name(), - closers: []io.Closer{decompressed, recompressed}, + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, }, nil } return nil, nil @@ -226,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: bpcOpDecompressCompressed, - uploadedOperation: types.Decompress, - uploadedAlgorithm: nil, - srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, - uploadedCompressorName: internalblobinfocache.Uncompressed, - closers: []io.Closer{s}, + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, }, nil } return nil, nil @@ -276,7 +299,8 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom // We only record the base variant of the format on upload; we didn’t do anything with // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger // reuse of any kind between the blob digest and the TOC digest. - uploadedCompressorName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, } } @@ -336,24 +360,16 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } } - if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorName == "" { - return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded: %q)", - d.srcCompressorBaseVariantName, d.uploadedCompressorName) + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) } - if d.uploadedCompressorName != internalblobinfocache.UnknownCompression { - if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Don’t record zstd:chunked algorithms. - // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, - // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. - // - // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate - // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName - // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about - // inconsistent data to be logged. - c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ - BaseVariantCompressor: d.uploadedCompressorName, - }) - } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) } if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { @@ -361,7 +377,9 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, // so record neither the variant name, nor the TOC digest. c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ - BaseVariantCompressor: d.srcCompressorBaseVariantName, + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) } return nil diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 996a4e2d7a..867ba73c7c 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -193,35 +193,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, reportWriter = options.ReportWriter } + // safeClose amends retErr with an error from c.Close(), if any. + safeClose := func(name string, c io.Closer) { + err := c.Close() + if err == nil { + return + } + // Do not use %w for err as we don't want it to be unwrapped by callers. + if retErr != nil { + retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr) + } else { + retErr = fmt.Errorf(" (%s: %s)", name, err.Error()) + } + } + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) } dest := imagedestination.FromPublic(publicDest) - defer func() { - if err := dest.Close(); err != nil { - if retErr != nil { - retErr = fmt.Errorf(" (dest: %v): %w", err, retErr) - } else { - retErr = fmt.Errorf(" (dest: %v)", err) - } - } - }() + defer safeClose("dest", dest) publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) } rawSource := imagesource.FromPublic(publicRawSource) - defer func() { - if err := rawSource.Close(); err != nil { - if retErr != nil { - retErr = fmt.Errorf(" (src: %v): %w", err, retErr) - } else { - retErr = fmt.Errorf(" (src: %v)", err) - } - } - }() + defer safeClose("src", rawSource) // If reportWriter is not a TTY (e.g., when piping to a file), do not // print the progress bars to avoid long and hard to parse output. diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 08128ce8d8..59f41d2169 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -24,13 +24,18 @@ func (c *copier) newProgressPool() *mpb.Progress { // customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar func customPartialBlobDecorFunc(s decor.Statistics) string { + current := decor.SizeB1024(s.Current) + total := decor.SizeB1024(s.Total) + refill := decor.SizeB1024(s.Refill) if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill) + } + // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%") + if s.Refill == 0 { + return fmt.Sprintf("%.1f / %.1f", current, total) } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage) } // progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 0ec54ded24..7ddfe917bb 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -106,7 +106,7 @@ func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity if len(c.signers) == 1 { return nil, fmt.Errorf("creating signature: %w", err) } else { - return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err) + return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err) } } res = append(res, newSig) diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 714dc81368..324785a8bf 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "maps" "reflect" "slices" "strings" @@ -162,7 +163,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar if format == nil { format = defaultCompressionFormat } - if format.Name() == compression.ZstdChunked.Name() { + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { if ic.requireCompressionFormatMatch { return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") } @@ -322,10 +323,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst if err != nil { return fmt.Errorf("parsing image configuration: %w", err) } - wantedPlatforms, err := platform.WantedPlatforms(sys) - if err != nil { - return fmt.Errorf("getting current platform information %#v: %w", sys, err) - } + wantedPlatforms := platform.WantedPlatforms(sys) options := newOrderedSet() match := false @@ -888,21 +886,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse // Handling of compression, encryption, and the related MIME types and the like are all the responsibility // of the generic code in this package. res := types.BlobInfo{ - Digest: reusedBlob.Digest, - Size: reusedBlob.Size, - URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. - Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls) - MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. CompressionOperation: reusedBlob.CompressionOperation, CompressionAlgorithm: reusedBlob.CompressionAlgorithm, CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. } // The transport is only expected to fill CompressionOperation and CompressionAlgorithm - // if the blob was substituted; otherwise, fill it in based + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based // on what we know from the srcInfos we were given. if reusedBlob.Digest == inputInfo.Digest { - res.CompressionOperation = inputInfo.CompressionOperation - res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) } return res } diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index 7d66ef6bc0..29d3b0420e 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -6,7 +6,7 @@ import ( "fmt" "io" "math" - "math/rand" + "math/rand/v2" "net/http" "net/url" "strconv" @@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise } br.body = nil - time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede headers := map[string][]string{ "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 354af2140f..64ccf6ae55 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -80,6 +80,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { return &http.Client{ Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsc, }, CheckRedirect: dockerclient.CheckRedirect, @@ -89,6 +90,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { func httpConfig() *http.Client { return &http.Client{ Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, TLSClientConfig: nil, }, CheckRedirect: dockerclient.CheckRedirect, diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 9741afc3f0..74f559dce7 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. } for _, tag := range tagsHolder.Tags { if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values + // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary + // to the spec, may include JSON null values in the list; and Go silently parses them as "". + if tag == "" { + logrus.Debugf("Ignoring invalid empty tag") + continue + } // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, // contrary to the tag format specified in // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 7f7a74bd37..ed3d4a2c0b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } + originalCandidateKnownToBeMissing := false if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) @@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, if haveBlob { return true, reusedInfo, nil } + originalCandidateKnownToBeMissing = true } else { logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) + // We can get here with a blob detected to be zstd when the user wants a zstd:chunked. + // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find + // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry + // with the BIC’s annotations. + // This is not quite correct, it only works if the BIC also contains an acceptable _location_. + // Ideally, we could look up just the compression algorithm/annotations for info.digest, + // and use it even if no location candidate exists and the original dandidate is present. } // Then try reusing blobs from other locations. @@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // for it in the current repo. candidateRepo = reference.TrimNamed(d.ref.ref) } - if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + if originalCandidateKnownToBeMissing && + candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") continue } @@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) return true, private.ReusedBlob{ - Digest: candidate.Digest, - Size: size, - CompressionOperation: candidate.CompressionOperation, - CompressionAlgorithm: candidate.CompressionAlgorithm}, nil + Digest: candidate.Digest, + Size: size, + CompressionOperation: candidate.CompressionOperation, + CompressionAlgorithm: candidate.CompressionAlgorithm, + CompressionAnnotations: candidate.CompressionAnnotations, + }, nil } return false, private.ReusedBlob{}, nil diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index c8f6ba3055..6e44ce0960 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // Don’t just build a string, try to preserve the typed error. primary := &attempts[len(attempts)-1] extras := []string{} - for i := 0; i < len(attempts)-1; i++ { + for _, attempt := range attempts[:len(attempts)-1] { // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. // The paired [] at least have some chance of being unambiguous. - extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) + extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err)) } return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) } @@ -464,26 +464,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc var res []signature.Signature switch { case s.c.supportsSignatures: - sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) case s.c.signatureBase != nil: - sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) default: return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") } - sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigstoreSigs...) return res, nil } @@ -505,35 +499,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest * return manifest.Digest(s.cachedManifest) } -// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, +// which is not nil, storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := []signature.Signature{} for i := 0; ; i++ { if i >= maxLookasideSignatures { - return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) + return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) } sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) if err != nil { - return nil, err + return err } signature, missing, err := s.getOneSignature(ctx, sigURL) if err != nil { - return nil, err + return err } if missing { break } - signatures = append(signatures, signature) + *dest = append(*dest, signature) } - return signatures, nil + return nil } // getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) @@ -596,48 +590,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL } } -// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } - var sigs []signature.Signature for _, sig := range parsedBody.Signatures { if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content)) } } - return sigs, nil + return nil } -func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { if !s.c.useSigstoreAttachments { logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") - return nil, nil + return nil } manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } if ociManifest == nil { - return nil, nil + return nil } logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) - res := []signature.Signature{} for layerIndex, layer := range ociManifest.Layers { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. @@ -648,11 +645,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, none.NoCache) if err != nil { - return nil, err + return err } - res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) + *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) } - return res, nil + return nil } // deleteImage deletes the named image from the registry, if supported. @@ -830,7 +827,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) handleBufferedNetworkReader(&br) }() - for i := uint(0); i < nBuffers; i++ { + for range nBuffers { b := bufferedNetworkReaderBuffer{ data: make([]byte, bufferSize), } diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index 276c8073e3..acf82ee639 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -37,8 +37,11 @@ type BlobInfoCache2 interface { // RecordDigestCompressorData records data for the blob with the specified digest. // WARNING: Only call this with LOCALLY VERIFIED data: - // - don’t record a compressor for a digest just because some remote author claims so - // (e.g. because a manifest says so); + // - don’t record a compressor for a digest just because some remote author claims so + // (e.g. because a manifest says so); + // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant + // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them + // in a manifest) // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) @@ -52,6 +55,9 @@ type BlobInfoCache2 interface { // (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) type DigestCompressorData struct { BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. + // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: + SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) + SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. } // CandidateLocations2Options are used in CandidateLocations2. @@ -66,9 +72,10 @@ type CandidateLocations2Options struct { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` + Digest digest.Digest + CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index cdd3c5e5d0..f5a38541ae 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob Size: blob.Size, CompressionOperation: blob.CompressionOperation, CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. }, nil } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index f847fa9cc8..07922ceceb 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont // ChooseInstance parses blob as a schema2 manifest list, and returns the digest // of the image which is appropriate for the current environment. func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index ee0ddc772a..3fb52104a6 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -205,11 +205,6 @@ type ReuseConditions struct { // (which can be nil to represent uncompressed or unknown) matches reuseConditions. func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { if c.RequiredCompression != nil { - if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false - } if candidateCompression == nil || (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { return false diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index fe78efaebe..6a0f88d3a6 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -236,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi if preferGzip == types.OptionalBoolTrue { didPreferGzip = true } - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil for manifestIndex, d := range index.Manifests { diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index afdce1d3d9..3a16dad637 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -153,7 +153,7 @@ var compatibility = map[string][]string{ // WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. -func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { +func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform { // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. // The fields are not specified by the OCI specification, as of version 1.1, usefully enough // to be interoperable, anyway. @@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { Variant: v, }) } - return res, nil + return res } // MatchesPlatform returns true if a platform descriptor from a multi-arch image matches diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 63fb9326de..d81ea6703e 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -134,9 +134,14 @@ type ReusedBlob struct { Size int64 // Must be provided // The following compression fields should be set when the reuse substitutes // a differently-compressed blob. + // They may be set also to change from a base variant to a specific variant of an algorithm. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be + // added even if the digest doesn’t change (if we found the annotations in a cache). + CompressionAnnotations map[string]string + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 222aa896ee..b74a1e240d 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { // Add the history and rootfs information. rootfs, err := json.Marshal(rootFS) if err != nil { - return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err) } rawRootfs := json.RawMessage(rootfs) raw["rootfs"] = &rawRootfs history, err := json.Marshal(convertedHistory) if err != nil { - return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err) + return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err) } rawHistory := json.RawMessage(history) raw["history"] = &rawHistory // Encode the result. config, err = json.Marshal(raw) if err != nil { - return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err) + return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err) } return config, nil } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index f714574ee9..0faa866b7f 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { if err := json.Unmarshal(manifestBlob, &oci1); err != nil { return nil, err } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest, manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go index bcf257df67..08366a7e24 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -27,17 +27,8 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex return err } - var blobsUsedByImage map[digest.Digest]int - - switch descriptor.MediaType { - case imgspecv1.MediaTypeImageManifest: - blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir) - case imgspecv1.MediaTypeImageIndex: - blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir) - default: - return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) - } - if err != nil { + blobsUsedByImage := make(map[digest.Digest]int) + if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil { return err } @@ -54,82 +45,48 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex return ref.deleteReferenceFromIndex(descriptorIndex) } -func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { - manifest, err := ref.getManifest(descriptor, sharedBlobsDir) - if err != nil { - return nil, err - } - blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest) - blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference - - return blobsUsedInManifest, nil -} - -func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { +// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself. +func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error { blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) if err != nil { - return nil, err - } - index, err := parseIndex(blobPath) - if err != nil { - return nil, err + return err } - blobsUsedInImageRefIndex := make(map[digest.Digest]int) - err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir) - if err != nil { - return nil, err + dest[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := parseJSON[imgspecv1.Manifest](blobPath) + if err != nil { + return err + } + dest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + dest[layer.Digest]++ + } + case imgspecv1.MediaTypeImageIndex: + index, err := parseIndex(blobPath) + if err != nil { + return err + } + if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) } - blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference - - return blobsUsedInImageRefIndex, nil + return nil } -// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map -func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { +// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself. +func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { for _, descriptor := range index.Manifests { - destination[descriptor.Digest]++ - switch descriptor.MediaType { - case imgspecv1.MediaTypeImageManifest: - manifest, err := ref.getManifest(&descriptor, sharedBlobsDir) - if err != nil { - return err - } - for digest, count := range ref.getBlobsUsedInManifest(manifest) { - destination[digest] += count - } - case imgspecv1.MediaTypeImageIndex: - blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) - if err != nil { - return err - } - index, err := parseIndex(blobPath) - if err != nil { - return err - } - err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir) - if err != nil { - return err - } - default: - return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil { + return err } } - return nil } -func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int { - blobsUsedInManifest := make(map[digest.Digest]int, 0) - - blobsUsedInManifest[manifest.Config.Digest]++ - for _, layer := range manifest.Layers { - blobsUsedInManifest[layer.Digest]++ - } - - return blobsUsedInManifest -} - // This takes in a map of the digest and their usage count in the manifest to be deleted // It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { @@ -138,7 +95,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges return nil, err } blobsUsedInRootIndex := make(map[digest.Digest]int) - err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) if err != nil { return nil, err } @@ -224,17 +181,3 @@ func saveJSON(path string, content any) error { return json.NewEncoder(file).Encode(content) } - -func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) { - manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) - if err != nil { - return nil, err - } - - manifest, err := parseJSON[imgspecv1.Manifest](manifestPath) - if err != nil { - return nil, err - } - - return manifest, nil -} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index fff586bee6..cef3dcccfb 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -365,7 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err if len(clusterInfo.CertificateAuthority) != 0 { err := validateFileIsReadable(clusterInfo.CertificateAuthority) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } } @@ -403,13 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { if len(authInfo.ClientCertificate) != 0 { err := validateFileIsReadable(authInfo.ClientCertificate) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } } if len(authInfo.ClientKey) != 0 { err := validateFileIsReadable(authInfo.ClientKey) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 03548209f9..d73aafbdb1 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -28,9 +28,10 @@ const replacementUnknownLocationAttempts = 2 // CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, // which can be later combined with information about a location. type CandidateTemplate struct { - digest digest.Digest - compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm } // CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable @@ -40,7 +41,7 @@ type CandidateTemplate struct { // if not nil, the call is assumed to be CandidateLocations2. func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { if v2Options == nil { - return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm values are not used. + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. digest: digest, } } @@ -60,14 +61,40 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation return nil } return &CandidateTemplate{ - digest: digest, - compressionOperation: types.Decompress, - compressionAlgorithm: nil, + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, } case blobinfocache.UnknownCompression: logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) return nil // Not allowed with CandidateLocations2 default: + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) if err != nil { logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", @@ -83,9 +110,10 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation return nil } return &CandidateTemplate{ - digest: digest, - compressionOperation: types.Compress, - compressionAlgorithm: &algo, + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, } } } @@ -100,11 +128,12 @@ type CandidateWithTime struct { func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { return CandidateWithTime{ candidate: blobinfocache.BICReplacementCandidate2{ - Digest: template.digest, - CompressionOperation: template.compressionOperation, - CompressionAlgorithm: template.compressionAlgorithm, - UnknownLocation: false, - Location: location, + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, }, lastSeen: lastSeen, } @@ -114,11 +143,12 @@ func (template CandidateTemplate) CandidateWithLocation(location types.BICLocati func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { return CandidateWithTime{ candidate: blobinfocache.BICReplacementCandidate2{ - Digest: template.digest, - CompressionOperation: template.compressionOperation, - CompressionAlgorithm: template.compressionAlgorithm, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, lastSeen: time.Time{}, } @@ -170,8 +200,6 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the // number of entries to limit for known and unknown location separately, only to make testing simpler. -// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original -// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { // split unknown candidates and known candidates // and limit them separately. diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 067c6b7e11..9d4125d664 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -28,7 +28,7 @@ type cache struct { uncompressedDigestsByTOC map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression } // New returns a BlobInfoCache implementation which is in-memory only. @@ -49,7 +49,7 @@ func new2() *cache { uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - compressors: map[digest.Digest]string{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, } } @@ -148,20 +148,36 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type // WARNING: Only call this with LOCALLY VERIFIED data: // - don’t record a compressor for a digest just because some remote author claims so // (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) // // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { mem.mutex.Lock() defer mem.mutex.Unlock() - if previous, ok := mem.compressors[anyDigest]; ok && previous != data.BaseVariantCompressor { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } } if data.BaseVariantCompressor == blobinfocache.UnknownCompression { delete(mem.compressors, anyDigest) return } - mem.compressors[anyDigest] = data.BaseVariantCompressor + mem.compressors[anyDigest] = data } // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory @@ -171,13 +187,15 @@ func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi // with unknown compression. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v, ok := mem.compressors[digest]; ok { - compressorName = v + compressionData = v } - template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressorName, - }) + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) if template == nil { return candidates } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 8d2bf72898..1a79310239 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -3,6 +3,7 @@ package sqlite import ( "database/sql" + "encoding/json" "errors" "fmt" "sync" @@ -303,6 +304,16 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { `uncompressedDigest TEXT NOT NULL )`, }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, } _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { @@ -461,6 +472,9 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type // WARNING: Only call this with LOCALLY VERIFIED data: // - don’t record a compressor for a digest just because some remote author claims so // (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) // // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. @@ -468,21 +482,46 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) if err != nil { - return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) } + warned := false if gotPrevious && previous != data.BaseVariantCompressor { logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true } if data.BaseVariantCompressor == blobinfocache.UnknownCompression { if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } } else { if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", anyDigest.String(), data.BaseVariantCompressor); err != nil { return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) } } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) + } + } return void{}, nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } @@ -493,19 +532,32 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi // with unknown compression. func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v2Options != nil { - compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) - if err != nil { - return nil, fmt.Errorf("scanning compressorName: %w", err) - } - if found { - compressorName = compressor + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } } } - template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressorName, - }) + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) if template == nil { return candidates, nil } @@ -561,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types if err != nil { return nil, err } - - // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. - // (In the extreme, we could turn _everything_ this function does into a single query. - // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) - // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. - rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) - if err != nil { - return nil, fmt.Errorf("querying for other digests: %w", err) - } - defer rows.Close() - for rows.Next() { - var otherDigestString string - if err := rows.Scan(&otherDigestString); err != nil { - return nil, fmt.Errorf("scanning other digest: %w", err) - } - otherDigest, err := digest.Parse(otherDigestString) + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("querying for other digests: %w", err) } - if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) if err != nil { return nil, err } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) } - } - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("iterating through other digests: %w", err) - } - if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) - if err != nil { - return nil, err + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } } } } diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index 4e99864226..31dfdd3422 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -195,10 +195,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, return untrustedCertificate.PublicKey, nil } -func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, untrustedPayloadBytes []byte) (crypto.PublicKey, error) { - rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes, + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes, untrustedBase64Signature, untrustedPayloadBytes) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index e79c91cf99..bddaca690b 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -40,17 +40,20 @@ type UntrustedRekorPayload struct { // A compile-time check that UntrustedRekorSET implements json.Unmarshaler var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) } return err } +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { @@ -77,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { - err := p.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. @@ -113,7 +110,7 @@ func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { // VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. // Returns bundle upload time on success. -func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { // FIXME: Should the publicKey parameter hard-code ecdsa? // == Parse SET bytes @@ -130,7 +127,14 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) } untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) - if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched := false + for _, pk := range publicKeys { + if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched = true + break + } + } + if !publicKeymatched { return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") } diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index a2609c954b..90a81dc1c4 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "strings" "time" "github.com/containers/image/v5/version" @@ -79,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. @@ -126,7 +121,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { - return NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + return NewInvalidSignatureError("Field optional.timestamp is not an integer") } s.untrustedTimestamp = &intTimestamp } @@ -171,24 +166,62 @@ type SigstorePayloadAcceptanceRules struct { ValidateSignedDockerManifestDigest func(digest.Digest) error } -// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components +// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created +// by any of the public keys in publicKeys. +// +// This is an internal implementation detail of VerifySigstorePayload and should have no other callers. +// It is INSUFFICIENT alone to consider the signature acceptable. +func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error { + if len(publicKeys) == 0 { + return errors.New("Need at least one public key to verify the sigstore payload, but got 0") + } + + verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys)) + for _, key := range publicKeys { + // Failing to load a verifier indicates that something is really, really + // invalid about the public key; prefer to fail even if the signature might be + // valid with other keys, so that users fix their fallback keys before they need them. + // For that reason, we even initialize all verifiers before trying to validate the signature + // with any key. + verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm) + if err != nil { + return err + } + verifiers = append(verifiers, verifier) + } + + var failures []string + for _, verifier := range verifiers { + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)) + if err == nil { + return nil + } + + failures = append(failures, err.Error()) + } + + if len(failures) == 0 { + // Coverage: We have checked there is at least one public key, any success causes an early return, + // and any failure adds an entry to failures => there must be at least one error. + return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded") + } + return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", ")) +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components // match expected values, both as specified by rules, and returns it. // We return an *UntrustedSigstorePayload, although nothing actually uses it, // just to double-check against stupid typos. -func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { - verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm) - if err != nil { - return nil, fmt.Errorf("creating verifier: %w", err) - } - +func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) if err != nil { return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) } - // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), - // which seems to be not used by anything. So we don’t bother. - if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil { - return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err)) + + if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil { + return nil, err } var unmatchedPayload UntrustedSigstorePayload diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index beb5d0673e..965901e187 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -2,7 +2,6 @@ package signature import ( "encoding/json" - "errors" "fmt" "github.com/containers/image/v5/signature/internal" @@ -15,29 +14,57 @@ type PRSigstoreSignedOption func(*prSigstoreSigned) error func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.KeyPath != "" { - return errors.New(`"keyPath" already specified`) + return InvalidPolicyFormatError(`"keyPath" already specified`) } pr.KeyPath = keyPath return nil } } +// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPaths != nil { + return InvalidPolicyFormatError(`"keyPaths" already specified`) + } + if len(keyPaths) == 0 { + return InvalidPolicyFormatError(`"keyPaths" contains no entries`) + } + pr.KeyPaths = keyPaths + return nil + } +} + // PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.KeyData != nil { - return errors.New(`"keyData" already specified`) + return InvalidPolicyFormatError(`"keyData" already specified`) } pr.KeyData = keyData return nil } } +// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyDatas != nil { + return InvalidPolicyFormatError(`"keyDatas" already specified`) + } + if len(keyDatas) == 0 { + return InvalidPolicyFormatError(`"keyDatas" contains no entries`) + } + pr.KeyDatas = keyDatas + return nil + } +} + // PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.Fulcio != nil { - return errors.New(`"fulcio" already specified`) + return InvalidPolicyFormatError(`"fulcio" already specified`) } pr.Fulcio = fulcio return nil @@ -48,29 +75,57 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.RekorPublicKeyPath != "" { - return errors.New(`"rekorPublicKeyPath" already specified`) + return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`) } pr.RekorPublicKeyPath = rekorPublicKeyPath return nil } } +// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPaths != nil { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`) + } + if len(rekorPublickeyPaths) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`) + } + pr.RekorPublicKeyPaths = rekorPublickeyPaths + return nil + } +} + // PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.RekorPublicKeyData != nil { - return errors.New(`"rekorPublicKeyData" already specified`) + return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`) } pr.RekorPublicKeyData = rekorPublicKeyData return nil } } +// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyDatas != nil { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`) + } + if len(rekorPublickeyDatas) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`) + } + pr.RekorPublicKeyDatas = rekorPublickeyDatas + return nil + } +} + // PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.SignedIdentity != nil { - return errors.New(`"signedIdentity" already specified`) + return InvalidPolicyFormatError(`"signedIdentity" already specified`) } pr.SignedIdentity = signedIdentity return nil @@ -92,21 +147,40 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, if res.KeyPath != "" { keySources++ } + if res.KeyPaths != nil { + keySources++ + } if res.KeyData != nil { keySources++ } + if res.KeyDatas != nil { + keySources++ + } if res.Fulcio != nil { keySources++ } if keySources != 1 { - return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified") + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified") } - if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil { - return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously") + rekorSources := 0 + if res.RekorPublicKeyPath != "" { + rekorSources++ + } + if res.RekorPublicKeyPaths != nil { + rekorSources++ + } + if res.RekorPublicKeyData != nil { + rekorSources++ + } + if res.RekorPublicKeyDatas != nil { + rekorSources++ } - if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil { - return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used") + if rekorSources > 1 { + return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously") + } + if res.Fulcio != nil && rekorSources == 0 { + return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") } if res.SignedIdentity == nil { @@ -144,7 +218,8 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil) func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { *pr = prSigstoreSigned{} var tmp prSigstoreSigned - var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool + var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool var fulcio prSigstoreSignedFulcio var signedIdentity json.RawMessage if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { @@ -154,18 +229,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { case "keyPath": gotKeyPath = true return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths case "keyData": gotKeyData = true return &tmp.KeyData + case "keyDatas": + gotKeyDatas = true + return &tmp.KeyDatas case "fulcio": gotFulcio = true return &fulcio case "rekorPublicKeyPath": gotRekorPublicKeyPath = true return &tmp.RekorPublicKeyPath + case "rekorPublicKeyPaths": + gotRekorPublicKeyPaths = true + return &tmp.RekorPublicKeyPaths case "rekorPublicKeyData": gotRekorPublicKeyData = true return &tmp.RekorPublicKeyData + case "rekorPublicKeyDatas": + gotRekorPublicKeyDatas = true + return &tmp.RekorPublicKeyDatas case "signedIdentity": return &signedIdentity default: @@ -192,18 +279,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { if gotKeyPath { opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) } + if gotKeyPaths { + opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths)) + } if gotKeyData { opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) } + if gotKeyDatas { + opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas)) + } if gotFulcio { opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) } if gotRekorPublicKeyPath { opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) } + if gotRekorPublicKeyPaths { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths)) + } if gotRekorPublicKeyData { opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) } + if gotRekorPublicKeyDatas { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) + } opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) res, err := newPRSigstoreSigned(opts...) @@ -221,7 +320,7 @@ type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.CAPath != "" { - return errors.New(`"caPath" already specified`) + return InvalidPolicyFormatError(`"caPath" already specified`) } f.CAPath = caPath return nil @@ -232,7 +331,7 @@ func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOptio func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.CAData != nil { - return errors.New(`"caData" already specified`) + return InvalidPolicyFormatError(`"caData" already specified`) } f.CAData = caData return nil @@ -243,7 +342,7 @@ func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOptio func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.OIDCIssuer != "" { - return errors.New(`"oidcIssuer" already specified`) + return InvalidPolicyFormatError(`"oidcIssuer" already specified`) } f.OIDCIssuer = oidcIssuer return nil @@ -254,7 +353,7 @@ func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFul func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.SubjectEmail != "" { - return errors.New(`"subjectEmail" already specified`) + return InvalidPolicyFormatError(`"subjectEmail" already specified`) } f.SubjectEmail = subjectEmail return nil diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 896ca5a60d..e5c9329185 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "os" "slices" "github.com/containers/image/v5/internal/multierr" @@ -27,33 +26,18 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva } // FIXME: move this to per-context initialization - var data [][]byte - keySources := 0 - if pr.KeyPath != "" { - keySources++ - d, err := os.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = [][]byte{d} - } - if pr.KeyPaths != nil { - keySources++ - data = [][]byte{} - for _, path := range pr.KeyPaths { - d, err := os.ReadFile(path) - if err != nil { - return sarRejected, nil, err - } - data = append(data, d) - } - } - if pr.KeyData != nil { - keySources++ - data = [][]byte{pr.KeyData} + const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified` + data, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: notOneSourceErrorText, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + }) + if err != nil { + return sarRejected, nil, err } - if keySources != 1 { - return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`) + if data == nil { + return sarRejected, nil, errors.New(notOneSourceErrorText) } // FIXME: move this to per-context initialization diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go index 4851650778..9c553771cb 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "os" + "strings" "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/private" @@ -20,37 +21,69 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" ) -// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set, +// configBytesSources contains configuration fields which may result in one or more []byte values +type configBytesSources struct { + inconsistencyErrorMessage string // Error to return if more than one source is set + path string // …Path: a path to a file containing the data, or "" + paths []string // …Paths: paths to files containing the data, or nil + data []byte // …Data: a single instance ofhe raw data, or nil + datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas +} + +// loadBytesFromConfigSources ensures at most one of the sources in src is set, // and returns the referenced data, or nil if neither is set. -func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) { - switch { - case data != nil && path != "": - return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix) - case path != "": - d, err := os.ReadFile(path) +func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) { + sources := 0 + var data [][]byte // = nil + if src.path != "" { + sources++ + d, err := os.ReadFile(src.path) if err != nil { return nil, err } - return d, nil - case data != nil: - return data, nil - default: // Nothing - return nil, nil + data = [][]byte{d} + } + if src.paths != nil { + sources++ + data = [][]byte{} + for _, path := range src.paths { + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + data = append(data, d) + } } + if src.data != nil { + sources++ + data = [][]byte{src.data} + } + if src.datas != nil { // codespell:ignore datas + sources++ + data = src.datas // codespell:ignore datas + } + if sources > 1 { + return nil, errors.New(src.inconsistencyErrorMessage) + } + return data, nil } // prepareTrustRoot creates a fulcioTrustRoot from the input data. // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { - caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath) + caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`, + path: f.CAPath, + data: f.CAData, + }) if err != nil { return nil, err } - if caCertBytes == nil { - return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`) + if len(caCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`) } certs := x509.NewCertPool() - if ok := certs.AppendCertsFromPEM(caCertBytes); !ok { + if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok { return nil, errors.New("error loading Fulcio CA certificates") } fulcio := fulcioTrustRoot{ @@ -66,24 +99,35 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { // sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy type sigstoreSignedTrustRoot struct { - publicKey crypto.PublicKey - fulcio *fulcioTrustRoot - rekorPublicKey *ecdsa.PublicKey + publicKeys []crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKeys []*ecdsa.PublicKey } func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { res := sigstoreSignedTrustRoot{} - publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath) + publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + datas: pr.KeyDatas, // codespell:ignore datas + }) if err != nil { return nil, err } - if publicKeyPEM != nil { - pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) - if err != nil { - return nil, fmt.Errorf("parsing public key: %w", err) + if publicKeyPEMs != nil { + for index, keyData := range publicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData) + if err != nil { + return nil, fmt.Errorf("parsing public key %d: %w", index+1, err) + } + res.publicKeys = append(res.publicKeys, pk) + } + if len(res.publicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`) } - res.publicKey = pk } if pr.Fulcio != nil { @@ -94,21 +138,32 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) res.fulcio = f } - rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath) + rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`, + path: pr.RekorPublicKeyPath, + paths: pr.RekorPublicKeyPaths, + data: pr.RekorPublicKeyData, + datas: pr.RekorPublicKeyDatas, // codespell:ignore datas + }) if err != nil { return nil, err } - if rekorPublicKeyPEM != nil { - pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM) - if err != nil { - return nil, fmt.Errorf("parsing Rekor public key: %w", err) - } - pkECDSA, ok := pk.(*ecdsa.PublicKey) - if !ok { - return nil, fmt.Errorf("Rekor public key is not using ECDSA") + if rekorPublicKeyPEMs != nil { + for index, pem := range rekorPublicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1) + } + res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA) + } + if len(res.rekorPublicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`) } - res.rekorPublicKey = pkECDSA } return &res, nil @@ -134,37 +189,51 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva } untrustedPayload := sig.UntrustedPayload() - var publicKey crypto.PublicKey + var publicKeys []crypto.PublicKey switch { - case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. + case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified") - case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. + case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified") - case trustRoot.publicKey != nil: - if trustRoot.rekorPublicKey != nil { + case trustRoot.publicKeys != nil: + if trustRoot.rekorPublicKeys != nil { untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) } - // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. - // FIXME: We could just generate DER instead of the full PEM text - recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey) - if err != nil { - // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. - // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) - return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + var rekorFailures []string + for _, candidatePublicKey := range trustRoot.publicKeys { + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + } + // We don’t care about the Rekor timestamp, just about log presence. + _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload) + if err == nil { + publicKeys = append(publicKeys, candidatePublicKey) + break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate + } + rekorFailures = append(rekorFailures, err.Error()) } - // We don’t care about the Rekor timestamp, just about log presence. - if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil { - return sarRejected, err + if len(publicKeys) == 0 { + if len(rekorFailures) == 0 { + // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure. + return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`) + } + return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", "))) } + } else { + publicKeys = trustRoot.publicKeys } - publicKey = trustRoot.publicKey case trustRoot.fulcio != nil: - if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations. + if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") } untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] @@ -179,19 +248,20 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) } - pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio, + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio, []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) if err != nil { return sarRejected, err } - publicKey = pk + publicKeys = []crypto.PublicKey{pk} } - if publicKey == nil { - // Coverage: This should never happen, we have already excluded the possibility in the switch above. + if len(publicKeys) == 0 { + // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set, + // and we have already excluded the possibility in the switch above. return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") } - signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ ValidateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go index 48dbfbbde5..390957b02b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { - return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err) + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err) } return newParsedRef, nil } diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go index 96e91a0a9c..32aa1c0ad4 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_types.go +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -74,7 +74,7 @@ type prSignedBy struct { // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyPaths []string `json:"keyPaths,omitempty"` // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyData []byte `json:"keyData,omitempty"` @@ -111,24 +111,35 @@ type prSignedBaseLayer struct { type prSigstoreSigned struct { prCommon - // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. KeyData []byte `json:"keyData,omitempty"` - // FIXME: Multiple public keys? + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + KeyDatas [][]byte `json:"keyDatas,omitempty"` - // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. - // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional - // (and Rekor inclusion is not required if a Rekor public key is not specified). + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"` // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. - // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional - // (and Rekor inclusion is not required if a Rekor public key is not specified). + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` // SignedIdentity specifies what image identity the signature must be claiming about the image. // Defaults to "matchRepoDigestOrExact" if not specified. diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index 30df997d86..94a8465930 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -105,13 +105,7 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if formatErr, ok := err.(internal.JSONFormatError); ok { - err = internal.NewInvalidSignatureError(formatErr.Error()) - } - } - return err + return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. @@ -149,7 +143,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { - return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer") } s.untrustedTimestamp = &intTimestamp } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 842a3ab068..a7a2865fc9 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -325,7 +325,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces return private.UploadedBlob{}, err } - out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ) if err != nil { return private.UploadedBlob{}, err } @@ -337,7 +337,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces }() if out.TOCDigest == "" && out.UncompressedDigest == "" { - return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") + return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set") } blobDigest := srcInfo.Digest @@ -356,11 +356,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is // responsible for ensuring blobDigest has been validated. if out.CompressedDigest != blobDigest { - return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q", + return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", out.CompressedDigest, blobDigest) } // So, record also information about blobDigest, that might benefit reuse. - // We trust ApplyDiffWithDiffer to validate or create both values correctly. + // We trust PrepareStagedLayer to validate or create both values correctly. s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) } else { diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 2a1099f679..acc4cb30e8 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -37,7 +37,7 @@ func newReference(transport storageTransport, named reference.Named, id string) } if id != "" { if err := validateImageID(id); err != nil { - return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference) + return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference) } } // We take a copy of the transport, which contains a pointer to the diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 4f501fc22a..55788f8877 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "os" + "slices" "sync" "github.com/containers/image/v5/docker/reference" @@ -300,7 +301,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed } - physicalBlobInfos := []types.BlobInfo{} + physicalBlobInfos := []types.BlobInfo{} // Built reversed layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) @@ -340,9 +341,10 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige Size: size, MediaType: uncompressedLayerType, } - physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + physicalBlobInfos = append(physicalBlobInfos, blobInfo) layerID = layer.Parent } + slices.Reverse(physicalBlobInfos) res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 18d4cc2d29..7d4a83bc91 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -103,7 +103,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). if _, err := io.Copy(io.Discard, reader); err != nil { - return nil, fmt.Errorf("error reading %q: %v", filename, err) + return nil, fmt.Errorf("error reading %q: %w", filename, err) } if uncompressed != nil { uncompressed.Close() @@ -152,7 +152,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Encode and digest the image configuration blob. configBytes, err := json.Marshal(&config) if err != nil { - return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err) } configID := digest.Canonical.FromBytes(configBytes) blobs[configID] = tarballBlob{ @@ -177,7 +177,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Encode the manifest. manifestBytes, err := json.Marshal(&manifest) if err != nil { - return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err) } // Return the image. diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index 63d835530b..b33208a51b 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -38,13 +38,13 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc if filename == "-" { stdin, err = io.ReadAll(os.Stdin) if err != nil { - return nil, fmt.Errorf("error buffering stdin: %v", err) + return nil, fmt.Errorf("error buffering stdin: %w", err) } continue } f, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q: %v", filename, err) + return nil, fmt.Errorf("error opening %q: %w", filename, err) } f.Close() } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go index 9a70c14328..6a846ece95 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -64,16 +64,28 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) if now == nil { now = time.Now } - return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now} + return &RemoteKeySet{ + jwksURL: jwksURL, + now: now, + // For historical reasons, this package uses contexts for configuration, not just + // cancellation. In hindsight, this was a bad idea. + // + // Attemps to reason about how cancels should work with background requests have + // largely lead to confusion. Use the context here as a config bag-of-values and + // ignore the cancel function. + ctx: context.WithoutCancel(ctx), + } } // RemoteKeySet is a KeySet implementation that validates JSON web tokens against // a jwks_uri endpoint. type RemoteKeySet struct { jwksURL string - ctx context.Context now func() time.Time + // Used for configuration. Cancelation is ignored. + ctx context.Context + // guard all other fields mu sync.RWMutex diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go index 0ac58d2995..52b27b746a 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -120,8 +120,8 @@ type Config struct { } // VerifierContext returns an IDTokenVerifier that uses the provider's key set to -// verify JWTs. As opposed to Verifier, the context is used for all requests to -// the upstream JWKs endpoint. +// verify JWTs. As opposed to Verifier, the context is used to configure requests +// to the upstream JWKs endpoint. The provided context's cancellation is ignored. func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) } diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 28bdd2fc08..6f717dbd86 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,27 @@ +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index aba08424c3..d81b03b447 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } decrypter, err := newDecrypter(key) if err != nil { return nil, err @@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index a565aaab27..8a52842106 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { return key.K.bytes(), nil } -func tryJWKS(key interface{}, headers ...Header) interface{} { +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { var jwks JSONWebKeySet switch jwksType := key.(type) { @@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { case JSONWebKeySet: jwks = jwksType default: - return key + // If the specified key is not a JWKS, return as is. + return key, nil } + // Determine the KID to search for from the headers. var kid string for _, header := range headers { if header.KeyID != "" { @@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { } } + // If no KID is specified in the headers, reject. if kid == "" { - return key + return nil, ErrJWKSKidNotFound } + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. keys := jwks.Key(kid) if len(keys) == 0 { - return key + return nil, ErrJWKSKidNotFound } - return keys[0].Key + return keys[0].Key, nil } diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go index 68db085ef6..429427232e 100644 --- a/vendor/github.com/go-jose/go-jose/v4/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index 46c9a4d96f..3dec0112b6 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } verifier, err := newVerifier(key) if err != nil { return err @@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 64df4a8d8c..c01f551abd 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -157,58 +157,44 @@ type ValidationRecord struct { UsedRSAKEX bool `json:"-"` } -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil -} - // Challenge is an aggregate of all data needed for any challenges. // // Rather than define individual types for different types of // challenge, we just throw all the elements into one bucket, // together with the common metadata elements. type Challenge struct { - // The type of challenge + // Type is the type of challenge encoded in this object. Type AcmeChallenge `json:"type"` - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` + // URL is the URL to which a response can be posted. Required for all types. + URL string `json:"url,omitempty"` - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` + // Status is the status of this challenge. Required for all types. + Status AcmeStatus `json:"status,omitempty"` - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` + // Validated is the time at which the server validated the challenge. Required + // if status is valid. + Validated *time.Time `json:"validated,omitempty"` - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` + // Error contains the error that occurred during challenge validation, if any. + // If set, the Status must be "invalid". + Error *probs.ProblemDetails `json:"error,omitempty"` - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges + // Token is a random value that uniquely identifies the challenge. It is used + // by all current challenges (http-01, tls-alpn-01, and dns-01). Token string `json:"token,omitempty"` - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. + // ProvidedKeyAuthorization used to carry the expected key authorization from + // the RA to the VA. However, since this field is never presented to the user + // via the ACME API, it should not be on this type. + // + // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead. + // TODO(#7514): Remove this. ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` } // ExpectedKeyAuthorization computes the expected KeyAuthorization value for @@ -273,43 +259,18 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { +// CheckPending ensures that a challenge object is pending and has a token. +// This is used before offering the challenge to the client, and before actually +// validating a challenge. +func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") + return fmt.Errorf("challenge is not pending") } - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") + if !looksLikeAToken(ch.Token) { + return fmt.Errorf("token is missing or malformed") } + return nil } diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index 31f6d2fcfa..641521f169 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -76,9 +76,9 @@ func NewToken() string { var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) -// LooksLikeAToken checks whether a string represents a 32-octet value in +// looksLikeAToken checks whether a string represents a 32-octet value in // the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { +func looksLikeAToken(token string) bool { return tokenFormat.MatchString(token) } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 087a018123..04a075d35b 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -39,6 +39,9 @@ var ( ) type Config struct { + // AllowedKeys enables or disables specific key algorithms and sizes. If + // nil, defaults to just those keys allowed by the Let's Encrypt CPS. + AllowedKeys *AllowedKeys // WeakKeyFile is the path to a JSON file containing truncated modulus hashes // of known weak RSA keys. If this config value is empty, then RSA modulus // hash checking will be disabled. @@ -54,6 +57,40 @@ type Config struct { FermatRounds int } +// AllowedKeys is a map of six specific key algorithm and size combinations to +// booleans indicating whether keys of that type are considered good. +type AllowedKeys struct { + // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple + // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes + // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which + // have a known method to easily compute their private key, such as Debian Weak + // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at + // common key sizes, so we restrict all issuance to those common key sizes. + RSA2048 bool + RSA3072 bool + RSA4096 bool + // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid + // points on the NIST P-256, P-384, or P-521 elliptic curves. + ECDSAP256 bool + ECDSAP384 bool + ECDSAP521 bool +} + +// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's +// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA +// 4096, ECDSA 256 and ECDSA P384. +// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate +// If this is ever changed, the CP/CPS MUST be changed first. +func LetsEncryptCPS() AllowedKeys { + return AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + } +} + // ErrBadKey represents an error with a key. It is distinct from the various // ways in which an ACME request can have an erroneous key (BadPublicKeyError, // BadCSRError) because this library is used to check both JWS signing keys and @@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - blockedCheck BlockedKeyCheckFunc + allowedKeys AllowedKeys + weakRSAList *WeakRSAKeys + blockedList *blockedKeys + fermatRounds int + blockedCheck BlockedKeyCheckFunc } -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { +// NewPolicy returns a key policy based on the given configuration, with sane +// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys +// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those +// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization +// is disabled. +func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { + if config == nil { + config = &Config{} + } kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - blockedCheck: bkc, + blockedCheck: bkc, + } + if config.AllowedKeys == nil { + kp.allowedKeys = LetsEncryptCPS() + } else { + kp.allowedKeys = *config.AllowedKeys } if config.WeakKeyFile != "" { keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) @@ -264,44 +302,30 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { // Simply use a whitelist for now. params := c.Params() switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): + case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params(): + return nil + case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params(): return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): + case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params(): return nil default: return badKey("ECDSA curve %v not allowed", params.Name) } } -// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple -// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes -// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which -// have a known method to easily compute their private key, such as Debian Weak -// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at -// common key sizes, so we restrict all issuance to those common key sizes. -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - // GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") +func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error { + modulus := key.N + + err := policy.goodRSABitLen(key) + if err != nil { + return err } + if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { return badKey("key is on a known weak RSA key list") } - modulus := key.N - - // See comment on acceptableRSAKeySizes above. - modulusBitLen := modulus.BitLen() - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - // Rather than support arbitrary exponents, which significantly increases // the size of the key space we allow, we restrict E to the defacto standard // RSA exponent 65537. There is no specific standards document that specifies @@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { return nil } +func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error { + // See comment on AllowedKeys above. + modulusBitLen := key.N.BitLen() + switch { + case modulusBitLen == 2048 && policy.allowedKeys.RSA2048: + return nil + case modulusBitLen == 3072 && policy.allowedKeys.RSA3072: + return nil + case modulusBitLen == 4096 && policy.allowedKeys.RSA4096: + return nil + default: + return badKey("key size not supported: %d", modulusBitLen) + } +} + // Returns true iff integer i is divisible by any of the primes in smallPrimes. // // Short circuits; execution time is dependent on i. Do not use this on secret @@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { b2 := new(big.Int) b2.Mul(a, a).Sub(b2, n) - for i := 0; i < rounds; i++ { + for range rounds { // To see if b2 is a perfect square, we take its square root, square that, // and check to see if we got the same result back. bb.Sqrt(b2).Mul(bb, bb) diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index 38f80d5ae6..584aac971f 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -69,69 +69,69 @@ type Extensions struct { // Deprecated // Triggering event of the Github Workflow. Matches the `event_name` claim of ID // tokens from Github Actions - GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2 + GithubWorkflowTrigger string `json:"GithubWorkflowTrigger,omitempty" yaml:"github-workflow-trigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 // Deprecated // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID // tokens from Github Actions - GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3 + GithubWorkflowSHA string `json:"GithubWorkflowSHA,omitempty" yaml:"github-workflow-sha,omitempty"` // OID 1.3.6.1.4.1.57264.1.3 // Deprecated // Name of Github Actions Workflow. Matches the `workflow` claim of the ID // tokens from Github Actions - GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4 + GithubWorkflowName string `json:"GithubWorkflowName,omitempty" yaml:"github-workflow-name,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 // Deprecated // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID // tokens from Github Actions - GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5 + GithubWorkflowRepository string `json:"GithubWorkflowRepository,omitempty" yaml:"github-workflow-repository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 // Deprecated // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens // from Github Actions - GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6 + GithubWorkflowRef string `json:"GithubWorkflowRef,omitempty" yaml:"github-workflow-ref,omitempty"` // 1.3.6.1.4.1.57264.1.6 // Reference to specific build instructions that are responsible for signing. - BuildSignerURI string // 1.3.6.1.4.1.57264.1.9 + BuildSignerURI string `json:"BuildSignerURI,omitempty" yaml:"build-signer-uri,omitempty"` // 1.3.6.1.4.1.57264.1.9 // Immutable reference to the specific version of the build instructions that is responsible for signing. - BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10 + BuildSignerDigest string `json:"BuildSignerDigest,omitempty" yaml:"build-signer-digest,omitempty"` // 1.3.6.1.4.1.57264.1.10 // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. - RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11 + RunnerEnvironment string `json:"RunnerEnvironment,omitempty" yaml:"runner-environment,omitempty"` // 1.3.6.1.4.1.57264.1.11 // Source repository URL that the build was based on. - SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12 + SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty" yaml:"source-repository-uri,omitempty"` // 1.3.6.1.4.1.57264.1.12 // Immutable reference to a specific version of the source code that the build was based upon. - SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13 + SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty" yaml:"source-repository-digest,omitempty"` // 1.3.6.1.4.1.57264.1.13 // Source Repository Ref that the build run was based upon. - SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14 + SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty" yaml:"source-repository-ref,omitempty"` // 1.3.6.1.4.1.57264.1.14 // Immutable identifier for the source repository the workflow was based upon. - SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15 + SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty" yaml:"source-repository-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 // Source repository owner URL of the owner of the source repository that the build was based on. - SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16 + SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty" yaml:"source-repository-owner-uri,omitempty"` // 1.3.6.1.4.1.57264.1.16 // Immutable identifier for the owner of the source repository that the workflow was based upon. - SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17 + SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty" yaml:"source-repository-owner-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 // Build Config URL to the top-level/initiating build instructions. - BuildConfigURI string // 1.3.6.1.4.1.57264.1.18 + BuildConfigURI string `json:"BuildConfigURI,omitempty" yaml:"build-config-uri,omitempty"` // 1.3.6.1.4.1.57264.1.18 // Immutable reference to the specific version of the top-level/initiating build instructions. - BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19 + BuildConfigDigest string `json:"BuildConfigDigest,omitempty" yaml:"build-config-digest,omitempty"` // 1.3.6.1.4.1.57264.1.19 // Event or action that initiated the build. - BuildTrigger string // 1.3.6.1.4.1.57264.1.20 + BuildTrigger string `json:"BuildTrigger,omitempty" yaml:"build-trigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 // Run Invocation URL to uniquely identify the build execution. - RunInvocationURI string // 1.3.6.1.4.1.57264.1.21 + RunInvocationURI string `json:"RunInvocationURI,omitempty" yaml:"run-invocation-uri,omitempty"` // 1.3.6.1.4.1.57264.1.21 // Source repository visibility at the time of signing the certificate. - SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22 + SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22 } func (e Extensions) Render() ([]pkix.Extension, error) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index a8b2805e64..1e2fa031be 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -20,7 +20,6 @@ import ( "crypto" "crypto/ecdsa" "crypto/ed25519" - "crypto/elliptic" "crypto/rsa" "crypto/sha1" // nolint:gosec "crypto/x509" @@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error { switch pub := first.(type) { case *rsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) + return errors.New(genErrMsg(first, second, "rsa")) } case *ecdsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) + return errors.New(genErrMsg(first, second, "ecdsa")) } case ed25519.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) + return errors.New(genErrMsg(first, second, "ed25519")) } default: return errors.New("unsupported key type") @@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string { // ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. func ValidatePubKey(pub crypto.PublicKey) error { + // goodkey policy enforces: + // * RSA + // * Size of key: 2048 <= size <= 4096, size % 8 = 0 + // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) + // * Small primes check for modulus + // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) + // * Key is easily factored with Fermat's factorization method + // * EC + // * Public key Q is not the identity element (Ø) + // * Public key Q's x and y are within [0, p-1] + // * Public key Q is on the curve + // * Public key Q's order matches the subgroups (nQ = Ø) + allowedKeys := &goodkey.AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + ECDSAP521: true, + } + cfg := &goodkey.Config{ + FermatRounds: 100, + AllowedKeys: allowedKeys, + } + p, err := goodkey.NewPolicy(cfg, nil) + if err != nil { + // Should not occur, only chances to return errors are if fermat rounds + // are <0 or when loading blocked/weak keys from disk (not used here) + return errors.New("unable to initialize key policy") + } + switch pk := pub.(type) { case *rsa.PublicKey: - // goodkey policy enforces: - // * Size of key: 2048 <= size <= 4096, size % 8 = 0 - // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) - // * Small primes check for modulus - // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - // * Key is easily factored with Fermat's factorization method - p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) - if err != nil { - // Should not occur, only chances to return errors are if fermat rounds - // are <0 or when loading blocked/weak keys from disk (not used here) - return errors.New("unable to initialize key policy") - } // ctx is unused return p.GoodKey(context.Background(), pub) case *ecdsa.PublicKey: - // Unable to use goodkey policy because P-521 curve is not supported - return validateEcdsaKey(pk) + // ctx is unused + return p.GoodKey(context.Background(), pub) case ed25519.PublicKey: return validateEd25519Key(pk) } return errors.New("unsupported public key type") } -// Enforce that the ECDSA key curve is one of: -// * NIST P-256 (secp256r1, prime256v1) -// * NIST P-384 -// * NIST P-521. -// Other EC curves, like secp256k1, are not supported by Go. -func validateEcdsaKey(pub *ecdsa.PublicKey) error { - switch pub.Curve { - case elliptic.P224(): - return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521") - case elliptic.P256(), elliptic.P384(), elliptic.P521(): - return nil - default: - return fmt.Errorf("unexpected ec curve") - } -} - // No validations currently, ED25519 supports only one key size. func validateEd25519Key(_ ed25519.PublicKey) error { return nil diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go index de56f8e216..3dad8c34f4 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go @@ -135,8 +135,9 @@ func (d *DeviceFlowTokenGetter) deviceFlow(p *oidc.Provider, clientID, redirectU // Some providers use a secret here, we don't need for sigstore oauth one so leave it off. data := url.Values{ "grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"}, + "client_id": []string{clientID}, "device_code": []string{parsed.DeviceCode}, - "scope": []string{"openid", "email"}, + "scope": []string{"openid email"}, "code_verifier": []string{pkce.Value}, } diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index 28abcac508..c1b6ef6b7a 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -114,10 +114,24 @@ func OIDConnect(issuer, id, secret, redirectURL string, tg TokenGetter) (*OIDCID return tg.GetIDToken(provider, config) } +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`, "True", `"True"`: + *sb = true + case "false", `"false"`, "False", `"False"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + type claims struct { - Email string `json:"email"` - Verified bool `json:"email_verified"` - Subject string `json:"sub"` + Email string `json:"email"` + Verified stringAsBool `json:"email_verified"` + Subject string `json:"sub"` } // SubjectFromToken extracts the subject claim from an OIDC Identity Token @@ -129,6 +143,16 @@ func SubjectFromToken(tok *oidc.IDToken) (string, error) { return subjectFromClaims(claims) } +// SubjectFromUnverifiedToken extracts the subject claim from the raw bytes of +// an OIDC identity token. +func SubjectFromUnverifiedToken(tok []byte) (string, error) { + claims := claims{} + if err := json.Unmarshal(tok, &claims); err != nil { + return "", err + } + return subjectFromClaims(claims) +} + func subjectFromClaims(c claims) (string, error) { if c.Email != "" { if !c.Verified { diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 0eb1e1d16e..91dd430c1c 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -12,33 +12,47 @@ import ( "errors" "fmt" "io" + "math" "os" "time" "github.com/google/uuid" ) +var errAlignmentOverflow = errors.New("integer overflow when calculating alignment") + // nextAligned finds the next offset that satisfies alignment. -func nextAligned(offset int64, alignment int) int64 { +func nextAligned(offset int64, alignment int) (int64, error) { align64 := uint64(alignment) offset64 := uint64(offset) - if align64 != 0 && offset64%align64 != 0 { - offset64 = (offset64 & ^(align64 - 1)) + align64 + if align64 <= 0 || offset64%align64 == 0 { + return offset, nil + } + + offset64 += (align64 - offset64%align64) + + if offset64 > math.MaxInt64 { + return 0, errAlignmentOverflow } - return int64(offset64) + //nolint:gosec // Overflow handled above. + return int64(offset64), nil } // writeDataObjectAt writes the data object described by di to ws, using time t, recording details // in d. The object is written at the first position that satisfies the alignment requirements // described by di following offsetUnaligned. func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll - offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + offset, err := nextAligned(offsetUnaligned, di.opts.alignment) if err != nil { return err } + if _, err := ws.Seek(offset, io.SeekStart); err != nil { + return err + } + n, err := io.Copy(ws, di.r) if err != nil { return err @@ -72,6 +86,7 @@ func (f *FileImage) calculatedDataSize() int64 { var ( errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") errPrimaryPartition = errors.New("image already contains a primary partition") + errObjectIDOverflow = errors.New("object ID would overflow") ) // writeDataObject writes the data object described by di to f, using time t, recording details in @@ -81,6 +96,11 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro return errInsufficientCapacity } + // We derive the ID from i, so make sure the ID will not overflow. + if int64(i) >= math.MaxUint32 { + return errObjectIDOverflow + } + // If this is a primary partition, verify there isn't another primary partition, and update the // architecture in the global header. if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys { @@ -92,7 +112,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro } d := &f.rds[i] - d.ID = uint32(i) + 1 + d.ID = uint32(i) + 1 //nolint:gosec // Overflow handled above. f.h.DataSize = f.calculatedDataSize() @@ -213,8 +233,16 @@ func OptCreateWithCloseOnUnload(b bool) CreateOpt { } } +var errDescriptorCapacityNotSupported = errors.New("descriptor capacity not supported") + // createContainer creates a new SIF container file in rw, according to opts. func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + // The supported number of descriptors is limited by the unsigned 32-bit ID field in each + // rawDescriptor. + if co.descriptorCapacity >= math.MaxUint32 { + return nil, errDescriptorCapacityNotSupported + } + rds := make([]rawDescriptor, co.descriptorCapacity) rdsSize := int64(binary.Size(rds)) diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index 1cadeac949..27b91c1629 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# dario.cat/mergo v1.0.0 +# dario.cat/mergo v1.0.1 ## explicit; go 1.13 dario.cat/mergo # github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 @@ -244,8 +244,8 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.7.5 ## explicit; go 1.21 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6 -## explicit; go 1.21.0 +# github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 +## explicit; go 1.22.6 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -406,7 +406,7 @@ github.com/containers/storage/types ## explicit; go 1.19 github.com/containers/winquit/pkg/winquit github.com/containers/winquit/pkg/winquit/win32 -# github.com/coreos/go-oidc/v3 v3.10.0 +# github.com/coreos/go-oidc/v3 v3.11.0 ## explicit; go 1.21 github.com/coreos/go-oidc/v3/oidc # github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f @@ -551,7 +551,7 @@ github.com/gin-gonic/gin/render github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/json -# github.com/go-jose/go-jose/v4 v4.0.2 +# github.com/go-jose/go-jose/v4 v4.0.4 ## explicit; go 1.21 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher @@ -655,7 +655,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.1 +# github.com/google/go-containerregistry v0.20.2 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -740,8 +740,8 @@ github.com/kr/fs # github.com/leodido/go-urn v1.2.4 ## explicit; go 1.16 github.com/leodido/go-urn -# github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 -## explicit; go 1.21 +# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec +## explicit; go 1.22.0 github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier @@ -992,8 +992,8 @@ github.com/shirou/gopsutil/v4/process # github.com/shoenig/go-m1cpu v0.1.6 ## explicit; go 1.20 github.com/shoenig/go-m1cpu -# github.com/sigstore/fulcio v1.4.5 -## explicit; go 1.21 +# github.com/sigstore/fulcio v1.6.4 +## explicit; go 1.22.6 github.com/sigstore/fulcio/pkg/api github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/rekor v1.3.6 @@ -1006,8 +1006,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey github.com/sigstore/rekor/pkg/generated/client/tlog github.com/sigstore/rekor/pkg/generated/models github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.4 -## explicit; go 1.21 +# github.com/sigstore/sigstore v1.8.9 +## explicit; go 1.22.5 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/oauth github.com/sigstore/sigstore/pkg/oauthflow @@ -1034,8 +1034,8 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.18.0 -## explicit; go 1.21.0 +# github.com/sylabs/sif/v2 v2.19.1 +## explicit; go 1.22.5 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -1212,7 +1212,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1253,15 +1253,15 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.24.0 ## explicit; go 1.19 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.65.0 ## explicit; go 1.21