diff --git a/copy/compression.go b/copy/compression.go index 318450a3a..fb5e1b174 100644 --- a/copy/compression.go +++ b/copy/compression.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -34,10 +35,10 @@ var ( // bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. type bpDetectCompressionStepData struct { - isCompressed bool - format compressiontypes.Algorithm // Valid if isCompressed - decompressor compressiontypes.DecompressorFunc // Valid if isCompressed - srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob. + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob. } // blobPipelineDetectCompressionStep updates *stream to detect its current compression format. @@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI } stream.reader = reader + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } res := bpDetectCompressionStepData{ isCompressed: decompressor != nil, format: format, decompressor: decompressor, } if res.isCompressed { - res.srcCompressorName = format.Name() + res.srcCompressorBaseVariantName = format.BaseVariantName() } else { - res.srcCompressorName = internalblobinfocache.Uncompressed + res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed } if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { @@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI // bpCompressionStepData contains data that the copy pipeline needs about the compression step. type bpCompressionStepData struct { - operation bpcOperation // What we are actually doing - uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) - uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. - uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. - srcCompressorName string // Compressor name to record in the blob info cache for the source blob. - uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. - closers []io.Closer // Objects to close after the upload is done, if any. + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. } type bpcOperation int @@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp // We can’t do anything with an encrypted blob unless decrypted. logrus.Debugf("Using original blob without modification for encrypted blob") return &bpCompressionStepData{ - operation: bpcOpPreserveOpaque, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: nil, - srcCompressorName: internalblobinfocache.UnknownCompression, - uploadedCompressorName: internalblobinfocache.UnknownCompression, + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, }, nil } return nil, nil @@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } return &bpCompressionStepData{ - operation: bpcOpCompressUncompressed, - uploadedOperation: types.Compress, - uploadedAlgorithm: uploadedAlgorithm, - uploadedAnnotations: annotations, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: uploadedAlgorithm.Name(), - closers: []io.Closer{reader}, + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, }, nil } return nil, nil @@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } succeeded = true return &bpCompressionStepData{ - operation: bpcOpRecompressCompressed, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: ic.compressionFormat, - uploadedAnnotations: annotations, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: ic.compressionFormat.Name(), - closers: []io.Closer{decompressed, recompressed}, + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, }, nil } return nil, nil @@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: bpcOpDecompressCompressed, - uploadedOperation: types.Decompress, - uploadedAlgorithm: nil, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: internalblobinfocache.Uncompressed, - closers: []io.Closer{s}, + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, }, nil } return nil, nil @@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom algorithm = nil } return &bpCompressionStepData{ - operation: bpcOp, - uploadedOperation: uploadedOp, - uploadedAlgorithm: algorithm, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: detected.srcCompressorName, + operation: bpcOp, + uploadedOperation: uploadedOp, + uploadedAlgorithm: algorithm, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + // We only record the base variant of the format on upload; we didn’t do anything with + // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger + // reuse of any kind between the blob digest and the TOC digest. + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, } } @@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // No useful information case bpcOpCompressUncompressed: c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + if d.uploadedAnnotations != nil { + tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations) + if err != nil { + return fmt.Errorf("parsing just-created compression annotations: %w", err) + } + if tocDigest != nil { + c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest) + } + } case bpcOpDecompressCompressed: c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) case bpcOpRecompressCompressed, bpcOpPreserveCompressed: @@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } } - if d.srcCompressorName == "" || d.uploadedCompressorName == "" { - return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)", - d.srcCompressorName, d.uploadedCompressorName) + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) } - if d.uploadedCompressorName != internalblobinfocache.UnknownCompression { - if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Don’t record zstd:chunked algorithms. - // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, - // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. - // - // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate - // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName - // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about - // inconsistent data to be logged. - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) - } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) } if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && - d.srcCompressorName != internalblobinfocache.UnknownCompression { - if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Don’t record zstd:chunked algorithms, see above. - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) - } + d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + // If the source is already using some TOC-dependent variant, we either copied the + // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, + // so record neither the variant name, nor the TOC digest. + c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) } return nil } diff --git a/copy/single.go b/copy/single.go index 17cc8c833..5db7737b8 100644 --- a/copy/single.go +++ b/copy/single.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "maps" "reflect" "slices" "strings" @@ -149,6 +150,28 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar ic.compressionFormat = c.options.DestinationCtx.CompressionFormat ic.compressionLevel = c.options.DestinationCtx.CompressionLevel } + // HACK: Don’t combine zstd:chunked and encryption. + // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption + // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have. + // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without + // encryption. (That can be determined from the unencrypted config anyway, but, still...) + // + // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of + // hard-coding zstd:chunked / zstd. + if ic.c.options.OciEncryptLayers != nil { + format := ic.compressionFormat + if format == nil { + format = defaultCompressionFormat + } + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { + if ic.requireCompressionFormatMatch { + return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") + } + logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead") + ic.compressionFormat = &compression.Zstd + } + } + // Decide whether we can substitute blobs with semantic equivalents: // - Don’t do that if we can’t modify the manifest at all // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. @@ -866,21 +889,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse // Handling of compression, encryption, and the related MIME types and the like are all the responsibility // of the generic code in this package. res := types.BlobInfo{ - Digest: reusedBlob.Digest, - Size: reusedBlob.Size, - URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. - Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls) - MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. CompressionOperation: reusedBlob.CompressionOperation, CompressionAlgorithm: reusedBlob.CompressionAlgorithm, CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. } // The transport is only expected to fill CompressionOperation and CompressionAlgorithm - // if the blob was substituted; otherwise, fill it in based + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based // on what we know from the srcInfos we were given. if reusedBlob.Digest == inputInfo.Digest { - res.CompressionOperation = inputInfo.CompressionOperation - res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) } return res } diff --git a/copy/single_test.go b/copy/single_test.go index 144b5ed2a..890a63bce 100644 --- a/copy/single_test.go +++ b/copy/single_test.go @@ -55,22 +55,42 @@ func TestUpdatedBlobInfoFromReuse(t *testing.T) { }, { // Reuse with substitution reused: private.ReusedBlob{ - Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - Size: 513543640, - CompressionOperation: types.Decompress, - CompressionAlgorithm: nil, + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 513543640, + CompressionOperation: types.Decompress, + CompressionAlgorithm: nil, + CompressionAnnotations: map[string]string{"decompressed": "value"}, }, expected: types.BlobInfo{ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: 513543640, URLs: nil, - Annotations: map[string]string{"test-annotation-2": "two"}, + Annotations: map[string]string{"test-annotation-2": "two", "decompressed": "value"}, MediaType: imgspecv1.MediaTypeImageLayerGzip, CompressionOperation: types.Decompress, CompressionAlgorithm: nil, // CryptoOperation is set to the zero value }, }, + { // Reuse turning zstd into zstd:chunked + reused: private.ReusedBlob{ + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.ZstdChunked, + CompressionAnnotations: map[string]string{"zstd-toc": "value"}, + }, + expected: types.BlobInfo{ + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + URLs: nil, + Annotations: map[string]string{"test-annotation-2": "two", "zstd-toc": "value"}, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.ZstdChunked, + // CryptoOperation is set to the zero value + }, + }, } { res := updatedBlobInfoFromReuse(srcInfo, c.reused) assert.Equal(t, c.expected, res, fmt.Sprintf("%#v", c.reused)) diff --git a/docker/docker_image_dest.go b/docker/docker_image_dest.go index 7f7a74bd3..ed3d4a2c0 100644 --- a/docker/docker_image_dest.go +++ b/docker/docker_image_dest.go @@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } + originalCandidateKnownToBeMissing := false if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) @@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, if haveBlob { return true, reusedInfo, nil } + originalCandidateKnownToBeMissing = true } else { logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) + // We can get here with a blob detected to be zstd when the user wants a zstd:chunked. + // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find + // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry + // with the BIC’s annotations. + // This is not quite correct, it only works if the BIC also contains an acceptable _location_. + // Ideally, we could look up just the compression algorithm/annotations for info.digest, + // and use it even if no location candidate exists and the original dandidate is present. } // Then try reusing blobs from other locations. @@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // for it in the current repo. candidateRepo = reference.TrimNamed(d.ref.ref) } - if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + if originalCandidateKnownToBeMissing && + candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") continue } @@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) return true, private.ReusedBlob{ - Digest: candidate.Digest, - Size: size, - CompressionOperation: candidate.CompressionOperation, - CompressionAlgorithm: candidate.CompressionAlgorithm}, nil + Digest: candidate.Digest, + Size: size, + CompressionOperation: candidate.CompressionOperation, + CompressionAlgorithm: candidate.CompressionAlgorithm, + CompressionAnnotations: candidate.CompressionAnnotations, + }, nil } return false, private.ReusedBlob{}, nil diff --git a/internal/blobinfocache/blobinfocache.go b/internal/blobinfocache/blobinfocache.go index 893aa959d..f31ee3124 100644 --- a/internal/blobinfocache/blobinfocache.go +++ b/internal/blobinfocache/blobinfocache.go @@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() { func (bic *v1OnlyBlobInfoCache) Close() { } -func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + return "" +} + +func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) { } func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 { diff --git a/internal/blobinfocache/types.go b/internal/blobinfocache/types.go index c9e4aaa48..acf82ee63 100644 --- a/internal/blobinfocache/types.go +++ b/internal/blobinfocache/types.go @@ -26,19 +26,40 @@ type BlobInfoCache2 interface { // Close destroys state created by Open(). Close() - // RecordDigestCompressorName records a compressor for the blob with the specified digest, - // or Uncompressed or UnknownCompression. - // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a - // digest just because some remote author claims so (e.g. because a manifest says so); + // UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. + // Returns "" if the uncompressed digest is unknown. + UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest + // RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) + + // RecordDigestCompressorData records data for the blob with the specified digest. + // WARNING: Only call this with LOCALLY VERIFIED data: + // - don’t record a compressor for a digest just because some remote author claims so + // (e.g. because a manifest says so); + // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant + // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them + // in a manifest) // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. - RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 } +// DigestCompressorData is information known about how a blob is compressed. +// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) +type DigestCompressorData struct { + BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. + // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: + SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) + SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. +} + // CandidateLocations2Options are used in CandidateLocations2. type CandidateLocations2Options struct { // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if @@ -51,9 +72,10 @@ type CandidateLocations2Options struct { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` + Digest digest.Digest + CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/internal/imagedestination/wrapper.go b/internal/imagedestination/wrapper.go index cdd3c5e5d..f5a38541a 100644 --- a/internal/imagedestination/wrapper.go +++ b/internal/imagedestination/wrapper.go @@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob Size: blob.Size, CompressionOperation: blob.CompressionOperation, CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. }, nil } diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go index ee0ddc772..3fb52104a 100644 --- a/internal/manifest/manifest.go +++ b/internal/manifest/manifest.go @@ -205,11 +205,6 @@ type ReuseConditions struct { // (which can be nil to represent uncompressed or unknown) matches reuseConditions. func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { if c.RequiredCompression != nil { - if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false - } if candidateCompression == nil || (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { return false diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go index ae2cc32ca..ebb3ef559 100644 --- a/internal/manifest/manifest_test.go +++ b/internal/manifest/manifest_test.go @@ -192,6 +192,9 @@ func TestCandidateCompressionMatchesReuseConditions(t *testing.T) { }{ // RequiredCompression restrictions {&compression.Zstd, nil, &compression.Zstd, true}, + {&compression.Zstd, nil, &compression.ZstdChunked, true}, + {&compression.ZstdChunked, nil, &compression.Zstd, false}, + {&compression.ZstdChunked, nil, &compression.ZstdChunked, true}, {&compression.Gzip, nil, &compression.Zstd, false}, {&compression.Zstd, nil, nil, false}, {nil, nil, &compression.Zstd, true}, diff --git a/internal/private/private.go b/internal/private/private.go index 63fb9326d..d81ea6703 100644 --- a/internal/private/private.go +++ b/internal/private/private.go @@ -134,9 +134,14 @@ type ReusedBlob struct { Size int64 // Must be provided // The following compression fields should be set when the reuse substitutes // a differently-compressed blob. + // They may be set also to change from a base variant to a specific variant of an algorithm. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be + // added even if the digest doesn’t change (if we found the annotations in a cache). + CompressionAnnotations map[string]string + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } diff --git a/pkg/blobinfocache/boltdb/boltdb.go b/pkg/blobinfocache/boltdb/boltdb.go index 07230f873..2d4137ffd 100644 --- a/pkg/blobinfocache/boltdb/boltdb.go +++ b/pkg/blobinfocache/boltdb/boltdb.go @@ -2,6 +2,8 @@ package boltdb import ( + "bytes" + "encoding/json" "errors" "fmt" "io/fs" @@ -25,9 +27,15 @@ var ( // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") + // uncompressedDigestByTOCBucket stores a mapping from a TOC digest to an uncompressed digest. + uncompressedDigestByTOCBucket = []byte("uncompressedDigestByTOC") // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. digestCompressorBucket = []byte("digestCompressor") + // digestSpecificVariantCompressorBucket stores a mapping from any digest to a (compressor, NUL byte, annotations as JSON), valid + // only if digestCompressorBucket contains a value. The compressor is not `UnknownCompression`. + digestSpecificVariantCompressorBucket = []byte("digestSpecificVariantCompressor") + // It may not exist in caches created by older versions, even if digestCompressorBucket is present. // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") @@ -243,27 +251,122 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified -// compressor, or is blobinfocache.Uncompressed. +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (bdc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + if b := tx.Bucket(uncompressedDigestByTOCBucket); b != nil { + if uncompressedBytes := b.Get([]byte(tocDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + res = d + return nil + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + res = "" + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +func (bdc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + b, err := tx.CreateBucketIfNotExists(uncompressedDigestByTOCBucket) if err != nil { return err } + key := []byte(tocDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (bdc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + _ = bdc.update(func(tx *bolt.Tx) error { key := []byte(anyDigest.String()) + + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + if err != nil { + return err + } + warned := false if previousBytes := b.Get(key); previousBytes != nil { - if string(previousBytes) != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) + if string(previousBytes) != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), data.BaseVariantCompressor) + warned = true + } + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + if err := b.Delete(key); err != nil { + return err + } + if b := tx.Bucket(digestSpecificVariantCompressorBucket); b != nil { + if err := b.Delete(key); err != nil { + return err + } } } - if compressorName == blobinfocache.UnknownCompression { - return b.Delete(key) + if err := b.Put(key, []byte(data.BaseVariantCompressor)); err != nil { + return err } - return b.Put(key, []byte(compressorName)) + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + b, err := tx.CreateBucketIfNotExists(digestSpecificVariantCompressorBucket) + if err != nil { + return err + } + if !warned { // Don’t warn twice about the same digest + if previousBytes := b.Get(key); previousBytes != nil { + if prevSVCBytes, _, ok := bytes.Cut(previousBytes, []byte{0}); ok { + prevSVC := string(prevSVCBytes) + if data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return err + } + data := bytes.Clone([]byte(data.SpecificVariantCompressor)) + data = append(data, 0) + data = append(data, annotations...) + if err := b.Put(key, data); err != nil { + return err + } + } + return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } @@ -300,23 +403,37 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket // (which might be nil) with corresponding compression -// info from compressionBucket (which might be nil), and returns the result of appending them +// info from compressionBucket and specificVariantCompresssionBucket (which might be nil), and returns the result of appending them // to candidates. // v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates // with unknown compression. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, - v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket, specificVariantCompresssionBucket *bolt.Bucket, + digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { digestKey := []byte(digest.String()) - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if compressionBucket != nil { // the bucket won't exist if the cache was created by a v1 implementation and // hasn't yet been updated by a v2 implementation if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { - compressorName = string(compressorNameValue) + compressionData.BaseVariantCompressor = string(compressorNameValue) + } + if specificVariantCompresssionBucket != nil { + if svcData := specificVariantCompresssionBucket.Get(digestKey); svcData != nil { + if compressorBytes, annotationBytes, ok := bytes.Cut(svcData, []byte{0}); ok { + compressionData.SpecificVariantCompressor = string(compressorBytes) + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return candidates // FIXME? Log error (but throttle the log volume on repeated accesses)? + } + } + } } } - ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) - if !ok { + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { return candidates } @@ -330,28 +447,11 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := t.UnmarshalBinary(v); err != nil { return err } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - Location: types.BICLocationReference{Opaque: string(k)}, - }, - LastSeen: t, - }) + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: string(k)}, t)) return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } else if v2Options != nil { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + candidates = append(candidates, template.CandidateWithUnknownLocation()) } return candidates } @@ -377,11 +477,12 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types if scopeBucket != nil { scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) } - // compressionBucket won't have been created if previous writers never recorded info about compression, + // compressionBucket and svCompressionBucket won't have been created if previous writers never recorded info about compression, // and we don't want to fail just because of that compressionBucket := tx.Bucket(digestCompressorBucket) + specificVariantCompressionBucket := tx.Bucket(digestSpecificVariantCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, primaryDigest, v2Options) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -394,7 +495,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, d, v2Options) } return nil }); err != nil { @@ -403,7 +504,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, uncompressedDigestValue, v2Options) } } } diff --git a/pkg/blobinfocache/internal/prioritize/prioritize.go b/pkg/blobinfocache/internal/prioritize/prioritize.go index 9cd9c8f7d..40ed09bed 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -25,57 +25,133 @@ const replacementAttempts = 5 // This is a heuristic/guess, and could well use a different value. const replacementUnknownLocationAttempts = 2 -// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob -// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options. +// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, +// which can be later combined with information about a location. +type CandidateTemplate struct { + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm +} + +// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable +// for a CandidateLocations* call with v2Options. // // v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); // if not nil, the call is assumed to be CandidateLocations2. -// -// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2 -func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) { +func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { if v2Options == nil { - return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used. + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. + digest: digest, + } } - var op types.LayerCompression - var algo *compression.Algorithm - switch compressorName { + requiredCompression := "nil" + if v2Options.RequiredCompression != nil { + requiredCompression = v2Options.RequiredCompression.Name() + } + switch data.BaseVariantCompressor { case blobinfocache.Uncompressed: - op = types.Decompress - algo = nil + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, nil) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v", + digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, + } case blobinfocache.UnknownCompression: logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) - return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2 + return nil // Not allowed with CandidateLocations2 default: - op = types.Compress - algo_, err := compression.AlgorithmByName(compressorName) + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. + algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) if err != nil { logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", - digest.String(), compressorName, err) - return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + digest.String(), data.BaseVariantCompressor, err) + return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required } - algo = &algo_ - } - if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ - PossibleManifestFormats: v2Options.PossibleManifestFormats, - RequiredCompression: v2Options.RequiredCompression, - }, algo) { - requiredCompresssion := "nil" - if v2Options.RequiredCompression != nil { - requiredCompresssion = v2Options.RequiredCompression.Name() + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, } - logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", - digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats) - return false, types.PreserveOriginal, nil } - - return true, op, algo } // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { - Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) + candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) +} + +// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen) +func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, + }, + lastSeen: lastSeen, + } +} + +// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location. +func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + lastSeen: time.Time{}, + } } // candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, @@ -91,35 +167,35 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) // First, deal with the primaryDigest/uncompressedDigest cases: - if xi.Candidate.Digest != xj.Candidate.Digest { + if xi.candidate.Digest != xj.candidate.Digest { // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter - if xi.Candidate.Digest == css.primaryDigest { + if xi.candidate.Digest == css.primaryDigest { return -1 } - if xj.Candidate.Digest == css.primaryDigest { + if xj.candidate.Digest == css.primaryDigest { return 1 } if css.uncompressedDigest != "" { - if xi.Candidate.Digest == css.uncompressedDigest { + if xi.candidate.Digest == css.uncompressedDigest { return 1 } - if xj.Candidate.Digest == css.uncompressedDigest { + if xj.candidate.Digest == css.uncompressedDigest { return -1 } } } else { // xi.Candidate.Digest == xj.Candidate.Digest // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time - if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { - return -xi.LastSeen.Compare(xj.LastSeen) + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return -xi.lastSeen.Compare(xj.lastSeen) } } // Neither of the digests are primaryDigest/uncompressedDigest: - if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time + if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time return -cmp } // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest) + return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest) } // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the @@ -138,7 +214,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, uncompressedDigest: uncompressedDigest, }).compare) for _, candidate := range cs { - if candidate.Candidate.UnknownLocation { + if candidate.candidate.UnknownLocation { unknownLocationCandidates = append(unknownLocationCandidates, candidate) } else { knownLocationCandidates = append(knownLocationCandidates, candidate) @@ -150,11 +226,11 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) for i := 0; i < knownLocationCandidatesUsed; i++ { - res[i] = knownLocationCandidates[i].Candidate + res[i] = knownLocationCandidates[i].candidate } // If candidates with unknown location are found, lets add them to final list for i := 0; i < unknownLocationCandidatesUsed; i++ { - res = append(res, unknownLocationCandidates[i].Candidate) + res = append(res, unknownLocationCandidates[i].candidate) } return res } diff --git a/pkg/blobinfocache/internal/prioritize/prioritize_test.go b/pkg/blobinfocache/internal/prioritize/prioritize_test.go index ab47fe062..a344d7e27 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize_test.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize_test.go @@ -8,9 +8,11 @@ import ( "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -53,6 +55,230 @@ var ( } ) +func TestCandidateTemplateWithCompression(t *testing.T) { + chunkedAnnotations := map[string]string{"a": "b"} + uncompressedData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.Uncompressed, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + gzipData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.GzipAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + zstdData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + zstdChunkedData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: chunkedAnnotations, + } + + for _, c := range []struct { + name string + requiredCompression *compressiontypes.Algorithm + data blobinfocache.DigestCompressorData + v2Matches bool + // if v2Matches: + v2Op types.LayerCompression + v2Algo string + v2Annotations map[string]string + }{ + { + name: "unknown", + requiredCompression: nil, + data: blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }, + v2Matches: false, + }, + { + name: "uncompressed", + requiredCompression: nil, + data: uncompressedData, + v2Matches: true, + v2Op: types.Decompress, + v2Algo: "", + v2Annotations: nil, + }, + { + name: "uncompressed, want gzip", + requiredCompression: &compression.Gzip, + data: uncompressedData, + v2Matches: false, + }, + { + name: "gzip", + requiredCompression: nil, + data: gzipData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.GzipAlgorithmName, + v2Annotations: nil, + }, + { + name: "gzip, want zstd", + requiredCompression: &compression.Zstd, + data: gzipData, + v2Matches: false, + }, + { + name: "unknown base", + requiredCompression: nil, + data: blobinfocache.DigestCompressorData{ + BaseVariantCompressor: "this value is unknown", + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }, + v2Matches: false, + }, + { + name: "zstd", + requiredCompression: nil, + data: zstdData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, + }, + { + name: "zstd, want gzip", + requiredCompression: &compression.Gzip, + data: zstdData, + v2Matches: false, + }, + { + name: "zstd, want zstd", + requiredCompression: &compression.Zstd, + data: zstdData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, + }, + { + name: "zstd, want zstd:chunked", + requiredCompression: &compression.ZstdChunked, + data: zstdData, + v2Matches: false, + }, + { + name: "zstd:chunked", + requiredCompression: nil, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:chunked, want gzip", + requiredCompression: &compression.Gzip, + data: zstdChunkedData, + v2Matches: false, + }, + { + name: "zstd:chunked, want zstd", // Note that we return the full chunked data in this case. + requiredCompression: &compression.Zstd, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:chunked, want zstd:chunked", + requiredCompression: &compression.ZstdChunked, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:unknown", + requiredCompression: nil, + data: blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: "this value is unknown", + SpecificVariantAnnotations: chunkedAnnotations, + }, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, + }, + } { + res := CandidateTemplateWithCompression(nil, digestCompressedPrimary, c.data) + assert.Equal(t, &CandidateTemplate{ + digest: digestCompressedPrimary, + compressionOperation: types.PreserveOriginal, + compressionAlgorithm: nil, + compressionAnnotations: nil, + }, res, c.name) + + // These tests only use RequiredCompression in CandidateLocations2Options for clarity; + // CandidateCompressionMatchesReuseConditions should have its own tests of handling the full set of options. + res = CandidateTemplateWithCompression(&blobinfocache.CandidateLocations2Options{ + RequiredCompression: c.requiredCompression, + }, digestCompressedPrimary, c.data) + if !c.v2Matches { + assert.Nil(t, res, c.name) + } else { + require.NotNil(t, res, c.name) + assert.Equal(t, digestCompressedPrimary, res.digest, c.name) + assert.Equal(t, c.v2Op, res.compressionOperation, c.name) + if c.v2Algo == "" { + assert.Nil(t, res.compressionAlgorithm, c.name) + } else { + require.NotNil(t, res.compressionAlgorithm, c.name) + assert.Equal(t, c.v2Algo, res.compressionAlgorithm.Name()) + } + assert.Equal(t, c.v2Annotations, res.compressionAnnotations, c.name) + } + } +} + +func TestCandidateWithLocation(t *testing.T) { + template := CandidateTemplateWithCompression(&blobinfocache.CandidateLocations2Options{}, digestCompressedPrimary, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: map[string]string{"a": "b"}, + }) + require.NotNil(t, template) + loc := types.BICLocationReference{Opaque: "opaque"} + time := time.Now() + res := template.CandidateWithLocation(loc, time) + assert.Equal(t, digestCompressedPrimary, res.candidate.Digest) + assert.Equal(t, types.Compress, res.candidate.CompressionOperation) + assert.Equal(t, compressiontypes.ZstdChunkedAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, map[string]string{"a": "b"}, res.candidate.CompressionAnnotations) + assert.Equal(t, false, res.candidate.UnknownLocation) + assert.Equal(t, loc, res.candidate.Location) + assert.Equal(t, time, res.lastSeen) +} + +func TestCandidateWithUnknownLocation(t *testing.T) { + template := CandidateTemplateWithCompression(&blobinfocache.CandidateLocations2Options{}, digestCompressedPrimary, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: map[string]string{"a": "b"}, + }) + require.NotNil(t, template) + res := template.CandidateWithUnknownLocation() + assert.Equal(t, digestCompressedPrimary, res.candidate.Digest) + assert.Equal(t, types.Compress, res.candidate.CompressionOperation) + assert.Equal(t, compressiontypes.ZstdChunkedAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, map[string]string{"a": "b"}, res.candidate.CompressionAnnotations) + assert.Equal(t, true, res.candidate.UnknownLocation) +} + func TestCandidateSortStateLess(t *testing.T) { type p struct { d digest.Digest diff --git a/pkg/blobinfocache/internal/test/test.go b/pkg/blobinfocache/internal/test/test.go index 66d6d0c4a..cb4a4e6b0 100644 --- a/pkg/blobinfocache/internal/test/test.go +++ b/pkg/blobinfocache/internal/test/test.go @@ -25,7 +25,7 @@ const ( compressorNameU = blobinfocache.Uncompressed compressorNameA = compressiontypes.GzipAlgorithmName compressorNameB = compressiontypes.ZstdAlgorithmName - compressorNameCU = compressiontypes.ZstdChunkedAlgorithmName + compressorNameCU = compressiontypes.XzAlgorithmName digestUnknownLocation = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777") digestFilteringUncompressed = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") @@ -43,6 +43,8 @@ func GenericCache(t *testing.T, newTestCache func(t *testing.T) blobinfocache.Bl }{ {"UncompressedDigest", testGenericUncompressedDigest}, {"RecordDigestUncompressedPair", testGenericRecordDigestUncompressedPair}, + {"UncompressedDigestForTOC", testGenericUncompressedDigestForTOC}, + {"RecordTOCUncompressedPair", testGenericRecordTOCUncompressedPair}, {"RecordKnownLocations", testGenericRecordKnownLocations}, {"CandidateLocations", testGenericCandidateLocations}, {"CandidateLocations2", testGenericCandidateLocations2}, @@ -99,6 +101,28 @@ func testGenericRecordDigestUncompressedPair(t *testing.T, cache blobinfocache.B } } +func testGenericUncompressedDigestForTOC(t *testing.T, cache blobinfocache.BlobInfoCache2) { + // Nothing is known. + assert.Equal(t, digest.Digest(""), cache.UncompressedDigestForTOC(digestUnknown)) + + cache.RecordTOCUncompressedPair(digestCompressedA, digestUncompressed) + cache.RecordTOCUncompressedPair(digestCompressedB, digestUncompressed) + // Known TOC→uncompressed mapping + assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedA)) + assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedB)) +} + +func testGenericRecordTOCUncompressedPair(t *testing.T, cache blobinfocache.BlobInfoCache2) { + for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. + // Known TOC→uncompressed mapping + cache.RecordTOCUncompressedPair(digestCompressedA, digestUncompressed) + assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedA)) + // Two mappings to the same uncompressed digest + cache.RecordTOCUncompressedPair(digestCompressedB, digestUncompressed) + assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedB)) + } +} + func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoCache2) { transport := mocks.NameImageTransport("==BlobInfocache transport mock") for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. @@ -126,6 +150,7 @@ func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoC type candidate struct { d digest.Digest cn string + ca map[string]string lr string } @@ -170,11 +195,12 @@ func assertCandidatesMatch2(t *testing.T, scopeName string, expected []candidate algo = &algo_ } e[i] = blobinfocache.BICReplacementCandidate2{ - Digest: ev.d, - CompressionOperation: op, - CompressionAlgorithm: algo, - UnknownLocation: false, - Location: types.BICLocationReference{Opaque: scopeName + ev.lr}, + Digest: ev.d, + CompressionOperation: op, + CompressionAlgorithm: algo, + CompressionAnnotations: ev.ca, + UnknownLocation: false, + Location: types.BICLocationReference{Opaque: scopeName + ev.lr}, } } assertCandidatesMatch2Native(t, e, actual) @@ -259,14 +285,16 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa {"B", digestCompressedB, compressorNameB}, {"CU", digestCompressedUnrelated, compressorNameCU}, } + chunkedAnnotations := map[string]string{"a": "b"} digestNameSetFiltering := []struct { // Used primarily to test filtering in CandidateLocations2Options - n string - d digest.Digest - m string + n string + d digest.Digest + base, specific string + annotations map[string]string }{ - {"gzip", digestGzip, compressiontypes.GzipAlgorithmName}, - {"zstd", digestZstd, compressiontypes.ZstdAlgorithmName}, - {"zstdChunked", digestZstdChunked, compressiontypes.ZstdChunkedAlgorithmName}, + {"gzip", digestGzip, compressiontypes.GzipAlgorithmName, blobinfocache.UnknownCompression, nil}, + {"zstd", digestZstd, compressiontypes.ZstdAlgorithmName, blobinfocache.UnknownCompression, nil}, + {"zstdChunked", digestZstdChunked, compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName, chunkedAnnotations}, } for _, e := range digestNameSetFiltering { // digestFilteringUncompressed exists only to allow the three entries to be considered as candidates cache.RecordDigestUncompressedPair(e.d, digestFilteringUncompressed) @@ -290,7 +318,11 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa // ---------------------------- // If a record exists with compression without Location then // then return a record without location and with `UnknownLocation: true` - cache.RecordDigestCompressorName(digestUnknownLocation, compressiontypes.Bzip2AlgorithmName) + cache.RecordDigestCompressorData(digestUnknownLocation, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.Bzip2AlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) res = cache.CandidateLocations2(transport, scope, digestUnknownLocation, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, }) @@ -331,7 +363,11 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa // that shouldn’t happen in real-world usage. if scopeIndex != 0 { for _, e := range digestNameSetPrioritization { - cache.RecordDigestCompressorName(e.d, blobinfocache.UnknownCompression) + cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) } } @@ -394,12 +430,16 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa }) assertCandidatesMatch2(t, scopeName, []candidate{}, res) - // Tests of lookups / prioritization when compression is unknown + // Tests of lookups / prioritization when compression is known // ------------------------------------------------------------- // Set the "known" compression values for _, e := range digestNameSetPrioritization { - cache.RecordDigestCompressorName(e.d, e.m) + cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: e.m, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) } // No substitutions allowed: @@ -481,7 +521,11 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n}) } for _, e := range digestNameSetFiltering { - cache.RecordDigestCompressorName(e.d, e.m) + cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: e.base, + SpecificVariantCompressor: e.specific, + SpecificVariantAnnotations: e.annotations, + }) } // No filtering @@ -489,9 +533,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa CanSubstitute: true, }) assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) // Manifest format filters @@ -500,16 +544,16 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa PossibleManifestFormats: []string{manifest.DockerV2Schema2MediaType}, }) assertCandidatesMatch2(t, scopeName, []candidate{ - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, PossibleManifestFormats: []string{imgspecv1.MediaTypeImageManifest}, }) assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) // Compression algorithm filters @@ -518,21 +562,37 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa RequiredCompression: &compression.Gzip, }) assertCandidatesMatch2(t, scopeName, []candidate{ - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, RequiredCompression: &compression.ZstdChunked, }) - // Right now, zstd:chunked requests never match a candidate, see CandidateCompressionMatchesReuseConditions(). - assertCandidatesMatch2(t, scopeName, []candidate{}, res) + assertCandidatesMatch2(t, scopeName, []candidate{ + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, RequiredCompression: &compression.Zstd, }) - assertCandidatesMatch2(t, scopeName, []candidate{ // When the user asks for zstd, zstd:chunked candidates are also acceptable. - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, + assertCandidatesMatch2(t, scopeName, []candidate{ // When the user asks for zstd, zstd:chunked candidates are also acceptable, and include the chunked information. + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + }, res) + + // After RecordDigestCompressorData with zstd:chunked details, a later call with zstd-only does not drop the chunked details. + cache.RecordDigestCompressorData(digestZstdChunked, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) + res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ + CanSubstitute: true, + }) + assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) } } diff --git a/pkg/blobinfocache/memory/memory.go b/pkg/blobinfocache/memory/memory.go index 32aaa4d6f..9d4125d66 100644 --- a/pkg/blobinfocache/memory/memory.go +++ b/pkg/blobinfocache/memory/memory.go @@ -24,10 +24,11 @@ type locationKey struct { type cache struct { mutex sync.Mutex // The following fields can only be accessed with mutex held. - uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest - knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest + uncompressedDigests map[digest.Digest]digest.Digest + uncompressedDigestsByTOC map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression } // New returns a BlobInfoCache implementation which is in-memory only. @@ -44,10 +45,11 @@ func New() types.BlobInfoCache { func new2() *cache { return &cache{ - uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, - knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - compressors: map[digest.Digest]string{}, + uncompressedDigests: map[digest.Digest]digest.Digest{}, + uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, } } @@ -104,6 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre anyDigestSet.Add(anyDigest) } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok { + return d + } + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + mem.uncompressedDigestsByTOC[tocDigest] = uncompressed +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { @@ -118,19 +144,40 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type locationScope[location] = time.Now() // Possibly overwriting an older entry. } -// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified -// algorithm, or uncompressed, or that we no longer know. -func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { mem.mutex.Lock() defer mem.mutex.Unlock() - if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName) + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } } - if compressorName == blobinfocache.UnknownCompression { - delete(mem.compressors, blobDigest) + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + delete(mem.compressors, anyDigest) return } - mem.compressors[blobDigest] = compressorName + mem.compressors[anyDigest] = data } // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory @@ -140,38 +187,25 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso // with unknown compression. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v, ok := mem.compressors[digest]; ok { - compressorName = v + compressionData = v } - ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) - if !ok { + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { return candidates } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present if len(locations) > 0 { for l, t := range locations { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - Location: l, - }, - LastSeen: t, - }) + candidates = append(candidates, template.CandidateWithLocation(l, t)) } } else if v2Options != nil { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + candidates = append(candidates, template.CandidateWithUnknownLocation()) } return candidates } diff --git a/pkg/blobinfocache/none/none.go b/pkg/blobinfocache/none/none.go index 4b7122f92..9a2219e79 100644 --- a/pkg/blobinfocache/none/none.go +++ b/pkg/blobinfocache/none/none.go @@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { diff --git a/pkg/blobinfocache/sqlite/sqlite.go b/pkg/blobinfocache/sqlite/sqlite.go index a5be85a65..1a7931023 100644 --- a/pkg/blobinfocache/sqlite/sqlite.go +++ b/pkg/blobinfocache/sqlite/sqlite.go @@ -3,6 +3,7 @@ package sqlite import ( "database/sql" + "encoding/json" "errors" "fmt" "sync" @@ -295,6 +296,24 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { `PRIMARY KEY (transport, scope, digest, location) )`, }, + { + "DigestTOCUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` + + // index implied by PRIMARY KEY + `tocDigest TEXT PRIMARY KEY NOT NULL,` + + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, } _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { @@ -385,6 +404,57 @@ func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + return "", nil + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)", + tocDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { @@ -398,29 +468,58 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// RecordDigestCompressorName records a compressor for the blob with the specified digest, -// or Uncompressed or UnknownCompression. -// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a -// digest just because some remote author claims so (e.g. because a manifest says so); +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. -func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) if err != nil { - return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) } - if gotPrevious && previous != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName) + warned := false + if gotPrevious && previous != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true } - if compressorName == blobinfocache.UnknownCompression { + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } } else { if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", - anyDigest.String(), compressorName); err != nil { - return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err) + anyDigest.String(), data.BaseVariantCompressor); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) + } + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) } } return void{}, nil @@ -433,18 +532,33 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor // with unknown compression. func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v2Options != nil { - compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) - if err != nil { - return nil, fmt.Errorf("scanning compressorName: %w", err) - } - if found { - compressorName = compressor + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } } } - ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) - if !ok { + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { return candidates, nil } @@ -463,15 +577,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Scan(&location, &time); err != nil { return nil, fmt.Errorf("scanning candidate: %w", err) } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - Location: types.BICLocationReference{Opaque: location}, - }, - LastSeen: time, - }) + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time)) rowAdded = true } if err := rows.Err(); err != nil { @@ -479,16 +585,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW } if !rowAdded && v2Options != nil { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + candidates = append(candidates, template.CandidateWithUnknownLocation()) } return candidates, nil } @@ -516,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types if err != nil { return nil, err } - - // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. - // (In the extreme, we could turn _everything_ this function does into a single query. - // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) - // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. - rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) - if err != nil { - return nil, fmt.Errorf("querying for other digests: %w", err) - } - defer rows.Close() - for rows.Next() { - var otherDigestString string - if err := rows.Scan(&otherDigestString); err != nil { - return nil, fmt.Errorf("scanning other digest: %w", err) - } - otherDigest, err := digest.Parse(otherDigestString) + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("querying for other digests: %w", err) } - if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) if err != nil { return nil, err } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) } - } - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("iterating through other digests: %w", err) - } - if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) - if err != nil { - return nil, err + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } } } } diff --git a/storage/storage_dest.go b/storage/storage_dest.go index 92dfebd33..842a3ab06 100644 --- a/storage/storage_dest.go +++ b/storage/storage_dest.go @@ -84,18 +84,36 @@ type storageImageDestinationLockProtected struct { currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - // In general, a layer is identified either by (compressed) digest, or by TOC digest. + // Externally, a layer is identified either by (compressed) digest, or by TOC digest + // (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two + // different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest. + // We may or may not know the relationships between these three values. + // // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not - // recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.) - - // Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called. - // The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest + // recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.) + // + // Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available + // before commitLayer is called. + // The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted). + // + // WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle. + // The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently. + // But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs + // for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some + // other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer + // to mismatching contents. + // Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic, + // instead of interpreting these fields, especially blobDiffIDs, directly. + // + // Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller + // to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases. + indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID + indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above. // Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames) - // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. + // should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. // They are looked up in the order they are mentioned above. diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer @@ -145,9 +163,12 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* }, indexToStorageID: make(map[int]string), lockProtected: storageImageDestinationLockProtected{ - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - indexToTOCDigest: make(map[int]digest.Digest), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + + indexToDiffID: make(map[int]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), indexToAdditionalLayer: make(map[int]storage.AdditionalLayer), filenames: make(map[digest.Digest]string), @@ -323,20 +344,30 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces s.lock.Lock() if out.UncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest + if out.TOCDigest != "" { + options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) + } + // Don’t set indexToTOCDigest on this path: + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match any other TOC value. + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is // responsible for ensuring blobDigest has been validated. if out.CompressedDigest != blobDigest { return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q", out.CompressedDigest, blobDigest) } - s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + // So, record also information about blobDigest, that might benefit reuse. // We trust ApplyDiffWithDiffer to validate or create both values correctly. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) } else { - // Don’t identify layers by TOC if UncompressedDigest is available. - // - Using UncompressedDigest allows image reuse with non-partially-pulled layers - // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. - // That TOC is quite unlikely to match with any other TOC value. + // Use diffID for layer identity if it is known. + if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + } s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest } s.lockProtected.diffOutputs[options.LayerIndex] = out @@ -465,49 +496,40 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } - if len(layers) > 0 { - if size != -1 { - s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest - return true, private.ReusedBlob{ - Digest: blobDigest, - Size: size, - }, nil - } - if !options.CanSubstitute { - return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest) - } - s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest - return true, private.ReusedBlob{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - }, nil + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest + return true, reused, nil } } } if options.TOCDigest != "" && options.LayerIndex != nil { + // Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that. + // Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse. + uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest) + if uncompressedDigest != "" { + layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + } + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest + reused.MatchedByTOCDigest = true + return true, reused, nil + } + } // Check if we have a chunked layer in storage with the same TOC digest. layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest) - if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) } - if len(layers) > 0 { - if size != -1 { - s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest - return true, private.ReusedBlob{ - Digest: blobDigest, - Size: size, - MatchedByTOCDigest: true, - }, nil - } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { - s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest - return true, private.ReusedBlob{ - Digest: layers[0].UncompressedDigest, - Size: layers[0].UncompressedSize, - MatchedByTOCDigest: true, - }, nil + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + if uncompressedDigest != "" { + s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest } + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + reused.MatchedByTOCDigest = true + return true, reused, nil } } @@ -515,49 +537,137 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige return false, private.ReusedBlob{}, nil } +// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not. +// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again. +func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) { + if len(layers) > 0 { + if blobSize != -1 { + return true, private.ReusedBlob{ + Digest: blobDigest, + Size: blobSize, + } + } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { + return true, private.ReusedBlob{ + Digest: layers[0].UncompressedDigest, + Size: layers[0].UncompressedSize, + CompressionOperation: types.Decompress, + CompressionAlgorithm: nil, + } + } + } + return false, private.ReusedBlob{} +} + +// trustedLayerIdentityData is a _consistent_ set of information known about a single layer. +type trustedLayerIdentityData struct { + layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID + + diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC + tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC + blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted. +} + +// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest). +// blobDigest is the (possibly-compressed) layer digest referenced in the manifest. +// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available. +// +// The caller must hold s.lock. +func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) { + // The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set, + // even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available. + // + // If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find + // a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file: + // but the layer in question, and possibly child layers, might already have been committed to storage. + // A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set + // new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again. + // + // Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match + // and proceed to the originally-planned TOC match. + + res := trustedLayerIdentityData{} + diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex] + if layerIdentifiedByDiffID { + res.layerIdentifiedByTOC = false + res.diffID = diffID + } + if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok { + res.tocDigest = tocDigest + if !layerIdentifiedByDiffID { + res.layerIdentifiedByTOC = true + } + } + if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok { + if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC { + // This is the only data we have, so it is clearly self-consistent. + res.layerIdentifiedByTOC = false + res.diffID = otherDiffID + res.blobDigest = blobDigest + layerIdentifiedByDiffID = true + } else { + // We have set up the layer identity without referring to blobDigest: + // an attacker might have used a manifest with non-matching tocDigest and blobDigest. + // But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest, + // we know blobDigest is fine as well. + if res.diffID != "" && otherDiffID == res.diffID { + res.blobDigest = blobDigest + } + } + } + if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC { + return trustedLayerIdentityData{}, false // We found nothing at all + } + return res, true +} + // computeID computes a recommended image ID based on information we have so far. If // the manifest is not of a type that we recognize, we return an empty value, indicating // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { +func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) { // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + layerInfos := m.LayerInfos() + // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. var diffIDs []digest.Digest - switch m := m.(type) { + switch m.(type) { case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - for i, compat := range m.ExtractedV1Compatibility { - if compat.ThrowAway { + // Build a list of the diffIDs we've generated for the non-throwaway FS layers + for i, li := range layerInfos { + if li.EmptyLayer { continue } - blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.lockProtected.blobDiffIDs[blobSum] - if !ok { - // this can, in principle, legitimately happen when a layer is reused by TOC. - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" + trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest) + if !ok { // We have already committed all layers if we get to this point, so the data must have been available. + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest) } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) + if trusted.diffID == "" { + if trusted.layerIdentifiedByTOC { + logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID") + return "", nil + } + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest) + } + diffIDs = append(diffIDs, trusted.diffID) } case *manifest.Schema2, *manifest.OCI1: // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate // the diffID list. default: - return "" + return "", nil } // We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption. // // Traditionally that means the same ~config digest, as computed by m.ImageID; - // but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, + // but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see // most of the data. // - // So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, + // So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, // must enter into the image ID computation. // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades, // and to introduce the changed behavior only when partial pulls are used. @@ -566,28 +676,31 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above. ordinaryImageID, err := m.ImageID(diffIDs) if err != nil { - return "" + return "", err } tocIDInput := "" hasLayerPulledByTOC := false - for i := range m.LayerInfos() { - layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. - tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC - if ok { + for i, li := range layerInfos { + trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest) + if !ok { // We have already committed all layers if we get to this point, so the data must have been available. + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest) + } + layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. + if trusted.layerIdentifiedByTOC { hasLayerPulledByTOC = true - layerValue = tocDigest.String() + layerValue = trusted.tocDigest.String() } tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator. } if !hasLayerPulledByTOC { - return ordinaryImageID + return ordinaryImageID, nil } // ordinaryImageID is a digest of a config, which is a JSON value. // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded() logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) - return tocImageID + return tocImageID, nil } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig @@ -671,14 +784,14 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig s.lock.Lock() defer s.lock.Unlock() - if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { - return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. + trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest) + if !ok { + return "", false } - - if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { - return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value. + if trusted.layerIdentifiedByTOC { + return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. } - return "", false + return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value. } // commitLayer commits the specified layer with the given index to the storage. @@ -778,6 +891,16 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { + // If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput. + // That way it will be persisted in storage even if the cache is deleted; also + // we can use the value below to avoid the untrustedUncompressedDigest logic (and notably + // the costly commit delay until a manifest is available). + s.lock.Lock() + if d, ok := s.lockProtected.indexToDiffID[index]; ok { + diffOutput.UncompressedDigest = d + } + s.lock.Unlock() + var untrustedUncompressedDigest digest.Digest if diffOutput.UncompressedDigest == "" { d, err := s.untrustedLayerDiffID(index) @@ -832,47 +955,43 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. - var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions + var filename string + var gotFilename bool s.lock.Lock() - tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set - optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set - filename, gotFilename := s.lockProtected.filenames[layerDigest] + trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest) + if ok && trusted.blobDigest != "" { + filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest] + } s.lock.Unlock() - if gotFilename && tocDigest == "" { - // If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based, - // and we don't know the relationship of the layerDigest and TOC digest. - // We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer - // reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the - // originally-planned TOC match. - - // Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream. - trustedUncompressedDigest = optionalDiffID - trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching. + if !ok { // We have already determined newLayerID, so the data must have been available. + return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest) + } + var trustedOriginalDigest digest.Digest // For storage.LayerOptions + if gotFilename { + // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest. + trustedOriginalDigest = trusted.blobDigest } else { // Try to find the layer with contents matching the data we use. var layer *storage.Layer // = nil - if tocDigest != "" { - layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest) - if err2 == nil && len(layers) > 0 { + if trusted.diffID != "" { + if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 { layer = &layers[0] - } else { - return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2) } - } else { - // Because tocDigest == "", optionaldiffID must have been set - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID) - if err2 == nil && len(layers) > 0 { + } + if layer == nil && trusted.tocDigest != "" { + if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 { layer = &layers[0] - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest) - if err2 == nil && len(layers) > 0 { - layer = &layers[0] - } } - if layer == nil { - return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2) + } + if layer == nil && trusted.blobDigest != "" { + if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 { + layer = &layers[0] } } + if layer == nil { + return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID) + } + // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ @@ -880,7 +999,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D } diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2) + return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -902,20 +1021,19 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return nil, fmt.Errorf("storing blob to file %q: %w", filename, err) } - if optionalDiffID == "" && layer.UncompressedDigest != "" { - optionalDiffID = layer.UncompressedDigest + if trusted.diffID == "" && layer.UncompressedDigest != "" { + trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now. } - // The stream we have is uncompressed, this matches contents of the stream. - // If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream. - trustedUncompressedDigest = optionalDiffID - // FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted). + // The stream we have is uncompressed, and it matches trusted.diffID (if known). + // + // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse. // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). // // We can legitimately set storage.LayerOptions.OriginalDigest to "", - // but that would just result in PutLayer computing the digest of the input stream == optionalDiffID. + // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. - trustedOriginalDigest = optionalDiffID + trustedOriginalDigest = trusted.diffID // Allow using the already-collected layer contents without extracting the layer again. // @@ -923,11 +1041,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // We don’t have the original compressed data here to trivially set filenames[layerDigest]. // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. - if trustedUncompressedDigest != "" { + if trusted.diffID != "" { s.lock.Lock() - s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest - s.lockProtected.filenames[trustedUncompressedDigest] = filename - s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize + s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID + s.lockProtected.filenames[trusted.diffID] = filename + s.lockProtected.fileSizes[trusted.diffID] = fileSize s.lock.Unlock() } } @@ -940,11 +1058,12 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: trustedOriginalDigest, - UncompressedDigest: trustedUncompressedDigest, + OriginalDigest: trustedOriginalDigest, + // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream. + UncompressedDigest: trusted.diffID, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err) + return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err) } return layer, nil } @@ -1155,7 +1274,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Create the image record, pointing to the most-recently added layer. intendedID := s.imageRef.id if intendedID == "" { - intendedID = s.computeID(man) + intendedID, err = s.computeID(man) + if err != nil { + return err + } } oldNames := []string{} img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) diff --git a/storage/storage_test.go b/storage/storage_test.go index 662aa3fbf..d94890eb4 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -315,9 +315,21 @@ func createUncommittedImageDest(t *testing.T, ref types.ImageReference, cache ty desc := layer.storeBlob(t, dest, cache, manifest.DockerV2Schema2LayerMediaType) layerDescriptors = append(layerDescriptors, desc) } - configDescriptor := manifest.Schema2Descriptor{} // might be good enough + + var configDescriptor manifest.Schema2Descriptor if config != nil { configDescriptor = config.storeBlob(t, dest, cache, manifest.DockerV2Schema2ConfigMediaType) + } else { + // Use a random digest so that different calls to createUncommittedImageDest with config == nil don’t try to + // use the same image ID. + digestBytes := make([]byte, digest.Canonical.Size()) + _, err := rand.Read(digestBytes) + require.NoError(t, err) + configDescriptor = manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: 1, + Digest: digest.NewDigestFromBytes(digest.Canonical, digestBytes), + } } manifest := manifest.Schema2FromComponents(configDescriptor, layerDescriptors) diff --git a/version/version.go b/version/version.go index 4d889f809..60cfbb79b 100644 --- a/version/version.go +++ b/version/version.go @@ -8,10 +8,10 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 32 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support.