diff --git a/cache/blobs.go b/cache/blobs.go index 88ea08b8a00b7..6ef2f27d82a35 100644 --- a/cache/blobs.go +++ b/cache/blobs.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "strconv" + "time" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff/walking" @@ -19,6 +20,7 @@ import ( imagespecidentity "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -32,7 +34,7 @@ var ErrNoBlobs = errors.Errorf("no blobs for snapshot") // a blob is missing and createIfNeeded is true, then the blob will be created, otherwise ErrNoBlobs will // be returned. Caller must hold a lease when calling this function. // If forceCompression is specified but the blob of compressionType doesn't exist, this function creates it. -func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, comp compression.Config, s session.Group) error { +func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, comp compression.Config, s session.Group, sourceDateEpoch *time.Time) error { if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for computeBlobChain") } @@ -58,36 +60,58 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo // refs rather than every single layer present among their ancestors. filter := sr.layerSet() - return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter) + return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter, sourceDateEpoch) } -func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error { +func blobExistsWithSourceDateEpoch(sr *immutableRef, sourceDateEpoch *time.Time) bool { + if sr.getBlob() == "" { + return false + } + if sourceDateEpoch == nil { + // nil means "any epoch is ok" + return true + } + srEpoch := sr.GetSourceDateEpoch() + return srEpoch != nil && srEpoch.Equal(*sourceDateEpoch) +} + +func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}, sourceDateEpoch *time.Time) error { + if blob := sr.getBlob(); blob != "" { + imageRefs := sr.getImageRefs() + logrus.Debugf("blob=%q, imageRefs=%v", blob, imageRefs) + if len(imageRefs) > 0 { + // Do not check the epoch for the blobs of the base images. + // https://github.com/moby/buildkit/pull/3560#pullrequestreview-1353829200 + sourceDateEpoch = nil + } + } + eg, ctx := errgroup.WithContext(ctx) switch sr.kind() { case Merge: for _, parent := range sr.mergeParents { parent := parent eg.Go(func() error { - return computeBlobChain(ctx, parent, createIfNeeded, comp, s, filter) + return computeBlobChain(ctx, parent, createIfNeeded, comp, s, filter, sourceDateEpoch) }) } case Diff: if _, ok := filter[sr.ID()]; !ok && sr.diffParents.upper != nil { // This diff is just re-using the upper blob, compute that eg.Go(func() error { - return computeBlobChain(ctx, sr.diffParents.upper, createIfNeeded, comp, s, filter) + return computeBlobChain(ctx, sr.diffParents.upper, createIfNeeded, comp, s, filter, sourceDateEpoch) }) } case Layer: eg.Go(func() error { - return computeBlobChain(ctx, sr.layerParent, createIfNeeded, comp, s, filter) + return computeBlobChain(ctx, sr.layerParent, createIfNeeded, comp, s, filter, sourceDateEpoch) }) } if _, ok := filter[sr.ID()]; ok { eg.Go(func() error { _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) { - if sr.getBlob() != "" { + if blobExistsWithSourceDateEpoch(sr, sourceDateEpoch) { return nil, nil } if !createIfNeeded { @@ -169,7 +193,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } } if enableOverlay { - computed, ok, err := sr.tryComputeOverlayBlob(ctx, lower, upper, mediaType, sr.ID(), compressorFunc) + computed, ok, err := sr.tryComputeOverlayBlob(ctx, lower, upper, mediaType, sr.ID(), compressorFunc, sourceDateEpoch) if !ok || err != nil { if !fallback { if !ok { @@ -196,6 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithMediaType(mediaType), diff.WithReference(sr.ID()), diff.WithCompressor(compressorFunc), + diff.WithSourceDateEpoch(sourceDateEpoch), ) if err != nil { bklog.G(ctx).WithError(err).Warnf("failed to compute blob by buildkit differ") @@ -207,6 +232,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithMediaType(mediaType), diff.WithReference(sr.ID()), diff.WithCompressor(compressorFunc), + diff.WithSourceDateEpoch(sourceDateEpoch), ) if err != nil { return nil, err @@ -238,7 +264,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool return nil, errors.Errorf("unknown layer compression type") } - if err := sr.setBlob(ctx, desc); err != nil { + if err := sr.setBlob(ctx, desc, sourceDateEpoch); err != nil { return nil, err } return nil, nil @@ -264,7 +290,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool // setBlob associates a blob with the cache record. // A lease must be held for the blob when calling this function -func (sr *immutableRef) setBlob(ctx context.Context, desc ocispecs.Descriptor) (rerr error) { +func (sr *immutableRef) setBlob(ctx context.Context, desc ocispecs.Descriptor, sourceDateEpoch *time.Time) (rerr error) { if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for setBlob") } @@ -285,7 +311,7 @@ func (sr *immutableRef) setBlob(ctx context.Context, desc ocispecs.Descriptor) ( sr.mu.Lock() defer sr.mu.Unlock() - if sr.getBlob() != "" { + if blobExistsWithSourceDateEpoch(sr, sourceDateEpoch) { return nil } @@ -305,6 +331,9 @@ func (sr *immutableRef) setBlob(ctx context.Context, desc ocispecs.Descriptor) ( sr.queueMediaType(desc.MediaType) sr.queueBlobSize(desc.Size) sr.appendURLs(desc.URLs) + if sourceDateEpoch != nil { + sr.queueSourceDateEpoch(*sourceDateEpoch) + } if err := sr.commitMetadata(); err != nil { return err } diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go index ce41275e6b74c..82425b9c6f55d 100644 --- a/cache/blobs_linux.go +++ b/cache/blobs_linux.go @@ -7,6 +7,7 @@ import ( "bufio" "context" "io" + "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" @@ -25,7 +26,7 @@ var emptyDesc = ocispecs.Descriptor{} // diff between lower and upper snapshot. If the passed mounts cannot // be computed (e.g. because the mounts aren't overlayfs), it returns // an error. -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor, sourceDateEpoch *time.Time) (_ ocispecs.Descriptor, ok bool, err error) { // Get upperdir location if mounts are overlayfs that can be processed by this differ. upperdir, err := overlay.GetUpperdir(lower, upper) if err != nil { @@ -60,7 +61,7 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper } // Close ensure compressorFunc does some finalization works. defer compressed.Close() - if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil { + if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower, sourceDateEpoch); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff") } if err := compressed.Close(); err != nil { @@ -71,7 +72,7 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper } labels[containerdUncompressed] = dgstr.Digest().String() } else { - if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower); err != nil { + if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower, sourceDateEpoch); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write diff") } } diff --git a/cache/blobs_nolinux.go b/cache/blobs_nolinux.go index 1567768c1939b..a4eb852653807 100644 --- a/cache/blobs_nolinux.go +++ b/cache/blobs_nolinux.go @@ -5,13 +5,14 @@ package cache import ( "context" + "time" - "github.com/moby/buildkit/util/compression" "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/util/compression" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor, sourceDateEpoch *time.Time) (_ ocispecs.Descriptor, ok bool, err error) { return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported") } diff --git a/cache/manager_test.go b/cache/manager_test.go index 2b6ed0aaf979c..8f86bf9432216 100644 --- a/cache/manager_test.go +++ b/cache/manager_test.go @@ -412,7 +412,7 @@ func TestMergeBlobchainID(t *testing.T) { mergeRef, err := cm.Merge(ctx, mergeInputs, nil) require.NoError(t, err) - _, err = mergeRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compression.Default)}, false, nil) + _, err = mergeRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compression.Default)}, false, nil, nil) require.NoError(t, err) // verify the merge blobchain ID isn't just set to one of the inputs (regression test) @@ -616,7 +616,7 @@ func TestExtractOnMutable(t *testing.T) { leaseCtx, done, err := leaseutil.WithLease(ctx, co.lm, leases.WithExpiration(0)) require.NoError(t, err) - err = snap.(*immutableRef).setBlob(leaseCtx, desc) + err = snap.(*immutableRef).setBlob(leaseCtx, desc, nil) done(context.TODO()) require.NoError(t, err) err = snap.(*immutableRef).computeChainMetadata(leaseCtx, map[string]struct{}{snap.ID(): {}}) @@ -733,10 +733,10 @@ func TestSetBlob(t *testing.T) { Annotations: map[string]string{ "containerd.io/uncompressed": digest.FromBytes([]byte("foobar2")).String(), }, - }) + }, nil) require.Error(t, err) - err = snap.(*immutableRef).setBlob(ctx, desc) + err = snap.(*immutableRef).setBlob(ctx, desc, nil) require.NoError(t, err) err = snap.(*immutableRef).computeChainMetadata(ctx, map[string]struct{}{snap.ID(): {}}) require.NoError(t, err) @@ -762,7 +762,7 @@ func TestSetBlob(t *testing.T) { err = content.WriteBlob(ctx, co.cs, "ref2", bytes.NewBuffer(b2), desc2) require.NoError(t, err) - err = snap2.(*immutableRef).setBlob(ctx, desc2) + err = snap2.(*immutableRef).setBlob(ctx, desc2, nil) require.NoError(t, err) err = snap2.(*immutableRef).computeChainMetadata(ctx, map[string]struct{}{snap.ID(): {}, snap2.ID(): {}}) require.NoError(t, err) @@ -1171,7 +1171,7 @@ func TestLoopLeaseContent(t *testing.T) { }() var chain []ocispecs.Descriptor for _, compressionType := range compressionLoop { - remotes, err := ref.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil) + remotes, err := ref.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil, nil) require.NoError(t, err) require.Equal(t, 1, len(remotes)) require.Equal(t, 1, len(remotes[0].Descriptors)) @@ -1368,7 +1368,7 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut, defer aRef.Release(ctx) var bDesc ocispecs.Descriptor for _, compressionType := range append([]compression.Type{testCase.a}, testCase.aVariants...) { - remotes, err := aRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil) + remotes, err := aRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil, nil) require.NoError(t, err) require.Equal(t, 1, len(remotes)) require.Equal(t, 1, len(remotes[0].Descriptors)) @@ -1381,7 +1381,7 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut, require.NoError(t, err) defer bRef.Release(ctx) for _, compressionType := range append([]compression.Type{testCase.b}, testCase.bVariants...) { - remotes, err := bRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil) + remotes, err := bRef.GetRemotes(ctx, true, config.RefConfig{Compression: compression.New(compressionType).SetForce(true)}, false, nil, nil) require.NoError(t, err) require.Equal(t, 1, len(remotes)) require.Equal(t, 1, len(remotes[0].Descriptors)) @@ -1730,7 +1730,7 @@ func TestGetRemotes(t *testing.T) { compressionType := compressionType refCfg := config.RefConfig{Compression: compression.New(compressionType).SetForce(true)} eg.Go(func() error { - remotes, err := ir.GetRemotes(egctx, true, refCfg, false, nil) + remotes, err := ir.GetRemotes(egctx, true, refCfg, false, nil, nil) require.NoError(t, err) require.Equal(t, 1, len(remotes)) remote := remotes[0] @@ -1819,13 +1819,13 @@ func TestGetRemotes(t *testing.T) { compressionType := compressionType refCfg := config.RefConfig{Compression: compression.New(compressionType)} eg.Go(func() error { - remotes, err := ir.GetRemotes(egctx, false, refCfg, true, nil) + remotes, err := ir.GetRemotes(egctx, false, refCfg, true, nil, nil) require.NoError(t, err) require.True(t, len(remotes) > 0, "for %s : %d", compressionType, len(remotes)) gotMain, gotVariants := remotes[0], remotes[1:] // Check the main blob is compatible with all == false - mainOnly, err := ir.GetRemotes(egctx, false, refCfg, false, nil) + mainOnly, err := ir.GetRemotes(egctx, false, refCfg, false, nil, nil) require.NoError(t, err) require.Equal(t, 1, len(mainOnly)) mainRemote := mainOnly[0] @@ -1944,7 +1944,7 @@ func TestNondistributableBlobs(t *testing.T) { ref, err := cm.GetByBlob(ctx, desc, nil, descHandlers) require.NoError(t, err) - remotes, err := ref.GetRemotes(ctx, true, config.RefConfig{PreferNonDistributable: true}, false, nil) + remotes, err := ref.GetRemotes(ctx, true, config.RefConfig{PreferNonDistributable: true}, false, nil, nil) require.NoError(t, err) desc2 := remotes[0].Descriptors[0] @@ -1952,7 +1952,7 @@ func TestNondistributableBlobs(t *testing.T) { require.Equal(t, desc.MediaType, desc2.MediaType) require.Equal(t, desc.URLs, desc2.URLs) - remotes, err = ref.GetRemotes(ctx, true, config.RefConfig{PreferNonDistributable: false}, false, nil) + remotes, err = ref.GetRemotes(ctx, true, config.RefConfig{PreferNonDistributable: false}, false, nil, nil) require.NoError(t, err) desc2 = remotes[0].Descriptors[0] diff --git a/cache/metadata.go b/cache/metadata.go index b223024dcae39..693303a157e8a 100644 --- a/cache/metadata.go +++ b/cache/metadata.go @@ -18,6 +18,7 @@ const keyEqualMutable = "cache.equalMutable" const keyCachePolicy = "cache.cachePolicy" const keyDescription = "cache.description" const keyCreatedAt = "cache.createdAt" +const keySourceDateEpoch = "cache.sourceDateEpoch" const keyLastUsedAt = "cache.lastUsedAt" const keyUsageCount = "cache.usageCount" const keyLayerType = "cache.layerType" @@ -191,6 +192,14 @@ func (md *cacheMetadata) GetCreatedAt() time.Time { return md.getTime(keyCreatedAt) } +func (md *cacheMetadata) queueSourceDateEpoch(tm time.Time) error { + return md.queueTime(keySourceDateEpoch, tm, "") +} + +func (md *cacheMetadata) GetSourceDateEpoch() *time.Time { + return md.getTimeOrNil(keySourceDateEpoch) +} + func (md *cacheMetadata) HasCachePolicyDefault() bool { return md.getCachePolicy() == cachePolicyDefault } @@ -506,16 +515,25 @@ func (md *cacheMetadata) queueTime(key string, value time.Time, index string) er return md.queueValue(key, value.UnixNano(), index) } -func (md *cacheMetadata) getTime(key string) time.Time { +func (md *cacheMetadata) getTimeOrNil(key string) *time.Time { v := md.si.Get(key) if v == nil { - return time.Time{} + return nil } var tm int64 if err := v.Unmarshal(&tm); err != nil { + return nil + } + u := time.Unix(tm/1e9, tm%1e9) + return &u +} + +func (md *cacheMetadata) getTime(key string) time.Time { + v := md.getTimeOrNil(key) + if v == nil { return time.Time{} } - return time.Unix(tm/1e9, tm%1e9) + return *v } func (md *cacheMetadata) getBool(key string) bool { diff --git a/cache/refs.go b/cache/refs.go index 338dbd664c966..f8964dec72e42 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -58,7 +58,7 @@ type ImmutableRef interface { Finalize(context.Context) error Extract(ctx context.Context, s session.Group) error // +progress - GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) + GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group, sourceDateEpoch *time.Time) ([]*solver.Remote, error) LayerChain() RefList FileList(ctx context.Context, s session.Group) ([]string, error) } diff --git a/cache/remote.go b/cache/remote.go index b80bd79cfb0ef..8497b7e5ee0fd 100644 --- a/cache/remote.go +++ b/cache/remote.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "strings" + "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" @@ -33,7 +34,7 @@ type Unlazier interface { // layers. If all is true, all available chains that has the specified compression type of topmost blob are // appended to the result. // Note: Use WorkerRef.GetRemotes instead as moby integration requires custom GetRemotes implementation. -func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) { +func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, all bool, s session.Group, sourceDateEpoch *time.Time) ([]*solver.Remote, error) { ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) if err != nil { return nil, err @@ -42,7 +43,7 @@ func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, ref // fast path if compression variants aren't required // NOTE: compressionopt is applied only to *newly created layers* if Force != true. - remote, err := sr.getRemote(ctx, createIfNeeded, refCfg, s) + remote, err := sr.getRemote(ctx, createIfNeeded, refCfg, s, sourceDateEpoch) if err != nil { return nil, err } @@ -138,8 +139,8 @@ func getAvailableBlobs(ctx context.Context, cs content.Store, chain *solver.Remo return res, nil } -func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, s session.Group) (*solver.Remote, error) { - err := sr.computeBlobChain(ctx, createIfNeeded, refCfg.Compression, s) +func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, s session.Group, sourceDateEpoch *time.Time) (*solver.Remote, error) { + err := sr.computeBlobChain(ctx, createIfNeeded, refCfg.Compression, s, sourceDateEpoch) if err != nil { return nil, err } diff --git a/cache/remotecache/v1/cachestorage.go b/cache/remotecache/v1/cachestorage.go index 004fac0521c8b..9e53ba3c80d43 100644 --- a/cache/remotecache/v1/cachestorage.go +++ b/cache/remotecache/v1/cachestorage.go @@ -268,7 +268,7 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) return worker.NewWorkerRefResult(ref, cs.w), nil } -func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopts *compression.Config, _ session.Group) ([]*solver.Remote, error) { +func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopts *compression.Config, _ session.Group, _ *time.Time) ([]*solver.Remote, error) { if r := cs.byResultID(res.ID); r != nil && r.result != nil { if compressionopts == nil { return []*solver.Remote{r.result}, nil diff --git a/control/control.go b/control/control.go index 899fb2da0264a..ef356753689f6 100644 --- a/control/control.go +++ b/control/control.go @@ -331,7 +331,14 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } // if SOURCE_DATE_EPOCH is set, enable it for the exporter + var sourceDateEpoch *time.Time if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { + i64, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid epoch value %q", v) + } + sourceDateEpochX := time.Unix(i64, 0).UTC() + sourceDateEpoch = &sourceDateEpochX if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok { if req.ExporterAttrs == nil { req.ExporterAttrs = make(map[string]string) @@ -439,7 +446,7 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* CacheExporters: cacheExporters, Type: req.Exporter, Attrs: req.ExporterAttrs, - }, req.Entitlements, procs, req.Internal, req.SourcePolicy) + }, req.Entitlements, procs, req.Internal, req.SourcePolicy, sourceDateEpoch) if err != nil { return nil, err } diff --git a/docs/build-repro.md b/docs/build-repro.md index 0b634c7fe3202..019bf0ced22a5 100644 --- a/docs/build-repro.md +++ b/docs/build-repro.md @@ -63,9 +63,9 @@ See also the [documentation](/frontend/dockerfile/docs/reference.md#buildkit-bui ## Caveats ### Timestamps of the files inside the image -Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of the files inside the image. +In BuildKit v0.11, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of the files inside the image. -Workaround: +Workaround for BuildKit v0.11: ```dockerfile # Limit the timestamp upper bound to SOURCE_DATE_EPOCH. # Workaround for https://github.com/moby/buildkit/issues/3180 @@ -76,10 +76,15 @@ RUN find $( ls / | grep -E -v "^(dev|mnt|proc|sys)$" ) -newermt "@${SOURCE_DATE_ The `touch` command above is [not effective](https://github.com/moby/buildkit/issues/3309) for mount point directories. A workaround is to create mount point directories below `/dev` (tmpfs) so that the mount points will not be included in the image layer. +> **Note** +> +> This issue is already fixed in the master branch. +> containerd >= 1.7 is needed for the containerd worker mode. + ### Timestamps of whiteouts -Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of "whiteouts" that are created on removing files. +In BuildKit v0.11, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of "whiteouts" that are created on removing files. -Workaround: +Workaround for BuildKit v0.11: ```dockerfile # Squash the entire stage for resetting the whiteout timestamps. # Workaround for https://github.com/moby/buildkit/issues/3168 @@ -88,3 +93,8 @@ COPY --from=0 / / ``` The timestamps of the regular files in the original stage are maintained in the squashed stage, so you do not need to touch the files after this `COPY` instruction. + +> **Note** +> +> This issue is already fixed in the master branch. +> containerd >= 1.7 is needed for the containerd worker mode. diff --git a/exporter/containerimage/export.go b/exporter/containerimage/export.go index 18c678a678be6..b025725516671 100644 --- a/exporter/containerimage/export.go +++ b/exporter/containerimage/export.go @@ -266,7 +266,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source if !e.storeAllowIncomplete { if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), opts.Epoch) if err != nil { return nil, nil, err } @@ -279,7 +279,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } if len(src.Refs) > 0 { for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), opts.Epoch) if err != nil { return nil, nil, err } @@ -322,7 +322,7 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou annotations := map[digest.Digest]map[string]string{} mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), e.opts.Epoch) if err != nil { return err } @@ -334,7 +334,7 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou } if len(src.Refs) > 0 { for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), e.opts.Epoch) if err != nil { return err } @@ -375,7 +375,7 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag } } - remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s) + remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s, e.opts.Epoch) if err != nil { return err } diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index e289486839a92..9080182330a5b 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -121,7 +121,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session ref = inp.Ref } - remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref) + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), opts.Epoch, ref) if err != nil { return nil, err } @@ -163,7 +163,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session refs = append(refs, r) } - remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), refs...) + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), opts.Epoch, refs...) if err != nil { return nil, err } @@ -286,7 +286,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return &idxDesc, nil } -func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefConfig, s session.Group, refs ...cache.ImmutableRef) ([]solver.Remote, error) { +func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefConfig, s session.Group, sourceDateEpoch *time.Time, refs ...cache.ImmutableRef) ([]solver.Remote, error) { attr := []attribute.KeyValue{ attribute.String("exportLayers.compressionType", refCfg.Compression.Type.String()), attribute.Bool("exportLayers.forceCompression", refCfg.Compression.Force), @@ -307,7 +307,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC return } eg.Go(func() error { - remotes, err := ref.GetRemotes(ctx, true, refCfg, false, s) + remotes, err := ref.GetRemotes(ctx, true, refCfg, false, s, sourceDateEpoch) if err != nil { return err } diff --git a/exporter/oci/export.go b/exporter/oci/export.go index c1c7dc5626f2d..c34590f7a22a5 100644 --- a/exporter/oci/export.go +++ b/exporter/oci/export.go @@ -207,7 +207,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), e.opts.Epoch) if err != nil { return nil, nil, err } @@ -225,7 +225,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } if len(src.Refs) > 0 { for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID), e.opts.Epoch) if err != nil { return nil, nil, err } diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 70ef310695200..f789c08677879 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -6638,12 +6638,13 @@ COPY Dockerfile \ func testReproSourceDateEpoch(t *testing.T, sb integration.Sandbox) { integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch) - if sb.Snapshotter() == "native" { - t.Skip("the digest is not reproducible with the \"native\" snapshotter because hardlinks are processed in a different way: https://github.com/moby/buildkit/pull/3456#discussion_r1062650263") + if cdAddress := sb.ContainerdAddress(); cdAddress != "" { + // https://github.com/containerd/containerd/commit/9c9f564a35df7990870a53a125afbf88ac412753 + integration.CheckContainerdVersion(t, cdAddress, ">= 1.7.0-beta.1") } f := getFrontend(t, sb) - tm := time.Date(2023, time.January, 10, 12, 34, 56, 0, time.UTC) + tm := time.Date(2023, time.January, 10, 12, 34, 56, 0, time.UTC) // 1673354096 t.Logf("SOURCE_DATE_EPOCH=%d", tm.Unix()) dockerfile := []byte(`# The base image cannot be busybox, due to https://github.com/moby/buildkit/issues/3455 @@ -6657,19 +6658,9 @@ RUN touch -d '2030-01-01 12:34:56' /foo-2030.1 RUN rm -f /foo.1 RUN rm -f /foo-2010.1 RUN rm -f /foo-2030.1 - -# Limit the timestamp upper bound to SOURCE_DATE_EPOCH. -# Workaround for https://github.com/moby/buildkit/issues/3180 -ARG SOURCE_DATE_EPOCH -RUN find $( ls / | grep -E -v "^(dev|mnt|proc|sys)$" ) -newermt "@${SOURCE_DATE_EPOCH}" -writable -xdev | xargs touch --date="@${SOURCE_DATE_EPOCH}" --no-dereference - -# Squash the entire stage for resetting the whiteout timestamps. -# Workaround for https://github.com/moby/buildkit/issues/3168 -FROM scratch -COPY --from=0 / / `) - const expectedDigest = "sha256:d286483eccf4d57c313a3f389cdc196e668d914d319c574b15aabdf1963c5eeb" + const expectedDigest = "sha256:a4aebad4391470aa29fc52b7d69498366a57cebd86f0ef425065151a8b799344" dir, err := integration.Tmpdir( t, diff --git a/go.mod b/go.mod index d24346f08a99b..8d57912935886 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.20 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 + github.com/Masterminds/semver/v3 v3.1.0 github.com/Microsoft/go-winio v0.6.1 github.com/Microsoft/hcsshim v0.10.0-rc.8 github.com/agext/levenshtein v1.2.3 diff --git a/go.sum b/go.sum index edc3ff44bb495..725183173cbaf 100644 --- a/go.sum +++ b/go.sum @@ -124,6 +124,7 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= diff --git a/solver/cachestorage.go b/solver/cachestorage.go index 7f426fbedf54d..45b79b79aaa5f 100644 --- a/solver/cachestorage.go +++ b/solver/cachestorage.go @@ -48,6 +48,6 @@ type CacheInfoLink struct { type CacheResultStorage interface { Save(Result, time.Time) (CacheResult, error) Load(ctx context.Context, res CacheResult) (Result, error) - LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group) ([]*Remote, error) + LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group, sourceDateEpoch *time.Time) ([]*Remote, error) Exists(ctx context.Context, id string) bool } diff --git a/solver/exporter.go b/solver/exporter.go index 78ce77c2d2f5e..a525be5e93790 100644 --- a/solver/exporter.go +++ b/solver/exporter.go @@ -116,7 +116,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach return nil, err } - remotes, err := cm.results.LoadRemotes(ctx, res, opt.CompressionOpt, opt.Session) + remotes, err := cm.results.LoadRemotes(ctx, res, opt.CompressionOpt, opt.Session, opt.SourceDateEpoch) if err != nil { return nil, err } diff --git a/solver/llbsolver/proc/provenance.go b/solver/llbsolver/proc/provenance.go index 1af3af1960288..3a539d76cf5e1 100644 --- a/solver/llbsolver/proc/provenance.go +++ b/solver/llbsolver/proc/provenance.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "strconv" + "time" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/moby/buildkit/exporter/containerimage/exptypes" @@ -15,7 +16,7 @@ import ( ) func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, sourceDateEpoch *time.Time) (*llbsolver.Result, error) { ps, err := exptypes.ParsePlatforms(res.Metadata) if err != nil { return nil, err @@ -41,7 +42,7 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { return nil, errors.Errorf("could not find ref %s", p.ID) } - pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j) + pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j, sourceDateEpoch) if err != nil { return nil, err } diff --git a/solver/llbsolver/proc/sbom.go b/solver/llbsolver/proc/sbom.go index 37de1c4c3e78d..c2ba2d7c9bdd1 100644 --- a/solver/llbsolver/proc/sbom.go +++ b/solver/llbsolver/proc/sbom.go @@ -2,6 +2,7 @@ package proc import ( "context" + "time" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" @@ -14,7 +15,7 @@ import ( ) func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, sourceDateEpoch *time.Time) (*llbsolver.Result, error) { // skip sbom generation if we already have an sbom if sbom.HasSBOM(res.Result) { return res, nil diff --git a/solver/llbsolver/provenance.go b/solver/llbsolver/provenance.go index d288b3fa0894c..830a15b7b1392 100644 --- a/solver/llbsolver/provenance.go +++ b/solver/llbsolver/provenance.go @@ -374,7 +374,7 @@ type ProvenanceCreator struct { addLayers func() error } -func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job) (*ProvenanceCreator, error) { +func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job, sourceDateEpoch *time.Time) (*ProvenanceCreator, error) { var reproducible bool if v, ok := attrs["reproducible"]; ok { b, err := strconv.ParseBool(v) @@ -448,9 +448,10 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve } if _, err := r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - ResolveRemotes: resolveRemotes, - Mode: solver.CacheExportModeRemoteOnly, - ExportRoots: true, + ResolveRemotes: resolveRemotes(sourceDateEpoch), + Mode: solver.CacheExportModeRemoteOnly, + ExportRoots: true, + SourceDateEpoch: sourceDateEpoch, }); err != nil { return err } @@ -556,20 +557,22 @@ func (c *cacheRecord) AddResult(dgst digest.Digest, idx int, createdAt time.Time func (c *cacheRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { } -func resolveRemotes(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid result: %T", res.Sys()) - } +func resolveRemotes(sourceDateEpoch *time.Time) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { + return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } - remotes, err := ref.GetRemotes(ctx, false, config.RefConfig{}, true, nil) - if err != nil { - if errors.Is(err, cache.ErrNoBlobs) { - return nil, nil + remotes, err := ref.GetRemotes(ctx, false, config.RefConfig{}, true, nil, sourceDateEpoch) + if err != nil { + if errors.Is(err, cache.ErrNoBlobs) { + return nil, nil + } + return nil, err } - return nil, err + return remotes, nil } - return remotes, nil } func AddBuildConfig(ctx context.Context, p *provenance.ProvenancePredicate, rp solver.ResultProxy) (map[digest.Digest]int, error) { diff --git a/solver/llbsolver/result.go b/solver/llbsolver/result.go index 718b1b09d301c..07df7bb61db99 100644 --- a/solver/llbsolver/result.go +++ b/solver/llbsolver/result.go @@ -2,6 +2,7 @@ package llbsolver import ( "context" + "time" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/frontend" @@ -19,13 +20,13 @@ type Result struct { type Attestation = frontend.Attestation -func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { +func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group, sourceDateEpoch *time.Time) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { ref, ok := res.Sys().(*worker.WorkerRef) if !ok { return nil, errors.Errorf("invalid result: %T", res.Sys()) } - return ref.GetRemotes(ctx, true, refCfg, all, g) + return ref.GetRemotes(ctx, true, refCfg, all, g, sourceDateEpoch) } } diff --git a/solver/llbsolver/solver.go b/solver/llbsolver/solver.go index 3252982b06450..496cefdddbef8 100644 --- a/solver/llbsolver/solver.go +++ b/solver/llbsolver/solver.go @@ -91,7 +91,7 @@ type Solver struct { // Processor defines a processing function to be applied after solving, but // before exporting -type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job, sourceDateEpoch *time.Time) (*Result, error) func New(opt Opt) (*Solver, error) { s := &Solver{ @@ -139,7 +139,7 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { return s.bridge(b) } -func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job) (func(*Result, exporter.DescriptorReference, error) error, error) { +func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job, sourceDateEpoch *time.Time) (func(*Result, exporter.DescriptorReference, error) error, error) { var stopTrace func() []tracetest.SpanStub if s := trace.SpanFromContext(ctx); s.SpanContext().IsValid() { @@ -198,7 +198,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend } makeProvenance := func(res solver.ResultProxy, cap *provenance.Capture) (*controlapi.Descriptor, func(), error) { - prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j) + prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j, sourceDateEpoch) if err != nil { return nil, nil, err } @@ -397,7 +397,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend }, nil } -func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement, post []Processor, internal bool, srcPol *spb.Policy) (_ *client.SolveResponse, err error) { +func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement, post []Processor, internal bool, srcPol *spb.Policy, sourceDateEpoch *time.Time) (_ *client.SolveResponse, err error) { j, err := s.solver.NewJob(id) if err != nil { return nil, err @@ -451,7 +451,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } if !internal { - rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j) + rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j, sourceDateEpoch) if err1 != nil { defer j.CloseProgress() return nil, err1 @@ -508,7 +508,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } for _, post := range post { - res2, err := post(ctx, resProv, s, j) + res2, err := post(ctx, resProv, s, j, sourceDateEpoch) if err != nil { return nil, err } @@ -537,7 +537,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro var exporterResponse map[string]string if e := exp.Exporter; e != nil { - meta, err := runInlineCacheExporter(ctx, e, inlineCacheExporter, j, cached) + meta, err := runInlineCacheExporter(ctx, e, inlineCacheExporter, j, cached, sourceDateEpoch) if err != nil { return nil, err } @@ -553,7 +553,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } } - cacheExporterResponse, err := runCacheExporters(ctx, cacheExporters, j, cached, inp) + cacheExporterResponse, err := runCacheExporters(ctx, cacheExporters, j, cached, inp, sourceDateEpoch) if err != nil { return nil, err } @@ -578,7 +578,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } -func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], inp *result.Result[cache.ImmutableRef]) (map[string]string, error) { +func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], inp *result.Result[cache.ImmutableRef], sourceDateEpoch *time.Time) (map[string]string, error) { eg, ctx := errgroup.WithContext(ctx) g := session.NewGroup(j.SessionID) var cacheExporterResponse map[string]string @@ -597,10 +597,11 @@ func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j * // all keys have same export chain so exporting others is not needed _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, exp, solver.CacheExportOpt{ - ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), - Mode: exp.CacheExportMode, - Session: g, - CompressionOpt: &compressionConfig, + ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g, sourceDateEpoch), + Mode: exp.CacheExportMode, + Session: g, + CompressionOpt: &compressionConfig, + SourceDateEpoch: sourceDateEpoch, }) return err }); err != nil { @@ -630,14 +631,14 @@ func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j * return cacheExporterResponse, nil } -func runInlineCacheExporter(ctx context.Context, e exporter.ExporterInstance, inlineExporter *RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult]) (map[string][]byte, error) { +func runInlineCacheExporter(ctx context.Context, e exporter.ExporterInstance, inlineExporter *RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], sourceDateEpoch *time.Time) (map[string][]byte, error) { meta := map[string][]byte{} if inlineExporter == nil { return nil, nil } if err := inBuilderContext(ctx, j, "preparing layers for inline cache", j.SessionID+"-cache-inline", func(ctx context.Context, _ session.Group) error { if res := cached.Ref; res != nil { - dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID), sourceDateEpoch) if err != nil { return err } @@ -646,7 +647,7 @@ func runInlineCacheExporter(ctx context.Context, e exporter.ExporterInstance, in } } for k, res := range cached.Refs { - dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID), sourceDateEpoch) if err != nil { return err } @@ -810,7 +811,7 @@ func asInlineCache(e remotecache.Exporter) (inlineCacheExporter, bool) { return ie, ok } -func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, compressionopt compression.Config, g session.Group) ([]byte, error) { +func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, compressionopt compression.Config, g session.Group, sourceDateEpoch *time.Time) ([]byte, error) { ie, ok := asInlineCache(e) if !ok { return nil, nil @@ -820,7 +821,7 @@ func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedR return nil, errors.Errorf("invalid reference: %T", res.Sys()) } - remotes, err := workerRef.GetRemotes(ctx, true, cacheconfig.RefConfig{Compression: compressionopt}, false, g) + remotes, err := workerRef.GetRemotes(ctx, true, cacheconfig.RefConfig{Compression: compressionopt}, false, g, sourceDateEpoch) if err != nil || len(remotes) == 0 { return nil, nil } @@ -834,10 +835,11 @@ func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedR ctx = withDescHandlerCacheOpts(ctx, workerRef.ImmutableRef) refCfg := cacheconfig.RefConfig{Compression: compressionopt} if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - ResolveRemotes: workerRefResolver(refCfg, true, g), // load as many compression blobs as possible - Mode: solver.CacheExportModeMin, - Session: g, - CompressionOpt: &compressionopt, // cache possible compression variants + ResolveRemotes: workerRefResolver(refCfg, true, g, sourceDateEpoch), // load as many compression blobs as possible + Mode: solver.CacheExportModeMin, + Session: g, + CompressionOpt: &compressionopt, // cache possible compression variants + SourceDateEpoch: sourceDateEpoch, }); err != nil { return nil, err } diff --git a/solver/memorycachestorage.go b/solver/memorycachestorage.go index 7fd1fa6268608..41afddb6843fb 100644 --- a/solver/memorycachestorage.go +++ b/solver/memorycachestorage.go @@ -299,7 +299,7 @@ func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result return v.(Result), nil } -func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *compression.Config, _ session.Group) ([]*Remote, error) { +func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *compression.Config, _ session.Group, _ *time.Time) ([]*Remote, error) { return nil, nil } diff --git a/solver/types.go b/solver/types.go index 01b344a3af905..0183ed59356f0 100644 --- a/solver/types.go +++ b/solver/types.go @@ -112,6 +112,8 @@ type CacheExportOpt struct { CompressionOpt *compression.Config // ExportRoots defines if records for root vertexes should be exported. ExportRoots bool + // SourceDateEpoch is for resolving the cache + SourceDateEpoch *time.Time } // CacheExporter can export the artifacts of the build chain diff --git a/util/overlay/overlay_linux.go b/util/overlay/overlay_linux.go index 0d15b5989e9b4..fa7f28f8d8596 100644 --- a/util/overlay/overlay_linux.go +++ b/util/overlay/overlay_linux.go @@ -13,6 +13,7 @@ import ( "strings" "sync" "syscall" + "time" "github.com/containerd/containerd/archive" "github.com/containerd/containerd/mount" @@ -111,7 +112,7 @@ func GetOverlayLayers(m mount.Mount) ([]string, error) { // WriteUpperdir writes a layer tar archive into the specified writer, based on // the diff information stored in the upperdir. -func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mount.Mount) error { +func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mount.Mount, sourceDateEpoch *time.Time) error { emptyLower, err := os.MkdirTemp("", "buildkit") // empty directory used for the lower of diff view if err != nil { return errors.Wrapf(err, "failed to create temp dir") @@ -126,7 +127,14 @@ func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mo } return mount.WithTempMount(ctx, lower, func(lowerRoot string) error { return mount.WithTempMount(ctx, upperView, func(upperViewRoot string) error { - cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot) + var opts []archive.ChangeWriterOpt + if sourceDateEpoch != nil { + opts = append(opts, + archive.WithModTimeUpperBound(*sourceDateEpoch), + archive.WithWhiteoutTime(*sourceDateEpoch), + ) + } + cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot, opts...) if err := Changes(ctx, cw.HandleChange, upperdir, upperViewRoot, lowerRoot); err != nil { if err2 := cw.Close(); err2 != nil { return errors.Wrapf(err, "failed to record upperdir changes (close error: %v)", err2) diff --git a/util/testutil/integration/sandbox.go b/util/testutil/integration/sandbox.go index c78e169be7143..d5910daef91ed 100644 --- a/util/testutil/integration/sandbox.go +++ b/util/testutil/integration/sandbox.go @@ -13,6 +13,8 @@ import ( "testing" "time" + "github.com/Masterminds/semver/v3" + containerdpkg "github.com/containerd/containerd" "github.com/google/shlex" "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" @@ -367,3 +369,31 @@ func CheckFeatureCompat(t *testing.T, sb Sandbox, reason ...string) { t.Skipf("%s worker can not currently run this test due to missing features (%s)", sb.Name(), strings.Join(ereasons, ", ")) } } + +func CheckContainerdVersion(t *testing.T, cdAddress, constraint string) { + t.Helper() + constraintSemVer, err := semver.NewConstraint(constraint) + if err != nil { + t.Fatal(err) + } + + cdClient, err := containerdpkg.New(cdAddress, containerdpkg.WithTimeout(60*time.Second)) + if err != nil { + t.Fatal(err) + } + defer cdClient.Close() + ctx := context.TODO() + cdVersion, err := cdClient.Version(ctx) + if err != nil { + t.Fatal(err) + } + + cdVersionSemVer, err := semver.NewVersion(cdVersion.Version) + if err != nil { + t.Fatal(err) + } + + if !constraintSemVer.Check(cdVersionSemVer) { + t.Skipf("containerd version %q does not satisfy the constraint %q", cdVersion.Version, constraint) + } +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 0000000000000..6b061e6174b3e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 0000000000000..fdbdf1448c366 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - deadcode + - dupl + - errcheck + - gofmt + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - structcheck + - unused + - varcheck + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 400 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 0000000000000..947210d37c5ac --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,188 @@ +# Changelog + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 0000000000000..9ff7da9c48b67 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 0000000000000..eac19178fbd1b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 0000000000000..d8f54dcbd3c69 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 0000000000000..a78235895fdcf --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 0000000000000..7420823675b6b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,571 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + strings.Join(ops, "|"), + cvRegex)) + + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*\,?)+$`, + strings.Join(ops, "|"), + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 0000000000000..391aa46b76df8 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3 < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` + * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + * `^0.2` is equivalent to `>=0.2.0 <0.3.0` + * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + * `^0.0` is equivalent to `>=0.0.0 <0.1.0` + * `^0` is equivalent to `>=0.0.0 <1.0.0` + +Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 0000000000000..a242ad70587ce --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 0000000000000..d6b9cda3eeb70 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,606 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const num string = "0123456789" +const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x +// releases of semver provided use the NewSemver() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index cbd52de0375b5..e7fbef83274bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -57,6 +57,9 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/o github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public +# github.com/Masterminds/semver/v3 v3.1.0 +## explicit; go 1.12 +github.com/Masterminds/semver/v3 # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio diff --git a/worker/cacheresult.go b/worker/cacheresult.go index 50f7c936888a9..bf977a99af2f6 100644 --- a/worker/cacheresult.go +++ b/worker/cacheresult.go @@ -67,7 +67,7 @@ func (s *cacheResultStorage) load(ctx context.Context, id string, hidden bool) ( return NewWorkerRefResult(ref, w), nil } -func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopt *compression.Config, g session.Group) ([]*solver.Remote, error) { +func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopt *compression.Config, g session.Group, sourceDateEpoch *time.Time) ([]*solver.Remote, error) { w, refID, err := s.getWorkerRef(res.ID) if err != nil { return nil, err @@ -89,7 +89,7 @@ func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheRe refCfg := cacheconfig.RefConfig{ Compression: *compressionopt, } - remotes, err := wref.GetRemotes(ctx, false, refCfg, all, g) + remotes, err := wref.GetRemotes(ctx, false, refCfg, all, g, sourceDateEpoch) if err != nil { return nil, nil // ignore error. loadRemote is best effort } diff --git a/worker/result.go b/worker/result.go index 26054cf8c2060..d595e284634f7 100644 --- a/worker/result.go +++ b/worker/result.go @@ -2,6 +2,7 @@ package worker import ( "context" + "time" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" @@ -36,16 +37,16 @@ func (wr *WorkerRef) Release(ctx context.Context) error { // GetRemotes method abstracts ImmutableRef's GetRemotes to allow a Worker to override. // This is needed for moby integration. // Use this method instead of calling ImmutableRef.GetRemotes() directly. -func (wr *WorkerRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg cacheconfig.RefConfig, all bool, g session.Group) ([]*solver.Remote, error) { +func (wr *WorkerRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg cacheconfig.RefConfig, all bool, g session.Group, sourceDateEpoch *time.Time) ([]*solver.Remote, error) { if w, ok := wr.Worker.(interface { - GetRemotes(context.Context, cache.ImmutableRef, bool, cacheconfig.RefConfig, bool, session.Group) ([]*solver.Remote, error) + GetRemotes(context.Context, cache.ImmutableRef, bool, cacheconfig.RefConfig, bool, session.Group, *time.Time) ([]*solver.Remote, error) }); ok { - return w.GetRemotes(ctx, wr.ImmutableRef, createIfNeeded, refCfg, all, g) + return w.GetRemotes(ctx, wr.ImmutableRef, createIfNeeded, refCfg, all, g, sourceDateEpoch) } if wr.ImmutableRef == nil { return nil, nil } - return wr.ImmutableRef.GetRemotes(ctx, createIfNeeded, refCfg, all, g) + return wr.ImmutableRef.GetRemotes(ctx, createIfNeeded, refCfg, all, g, sourceDateEpoch) } type workerRefResult struct {