From 6deaffb3118425028b86fb1c6018e2d17c8dc560 Mon Sep 17 00:00:00 2001 From: asraa Date: Tue, 7 Jun 2022 07:16:05 -0500 Subject: [PATCH] tuf: improve TUF client concurrency and caching (#1953) * move rekor public key fetch inside GetRekorPubs Signed-off-by: Asra Ali * use in-memory metadata and targets, sync to disk on start and updates Signed-off-by: Asra Ali update Signed-off-by: Asra Ali * Use TUF singleton. Co-authored-by: Ville Aikas Signed-off-by: Asra Ali * hayden comment, sync.Once used Signed-off-by: Asra Ali * return global error Signed-off-by: Asra Ali Co-authored-by: Ville Aikas --- .../cli/fulcio/fulcioroots/fulcioroots.go | 1 - .../cli/fulcio/fulcioverifier/ctl/verify.go | 1 - cmd/cosign/cli/verify/verify_blob.go | 17 +- pkg/cosign/tlog.go | 64 +++--- pkg/cosign/tlog_test.go | 2 +- pkg/cosign/tuf/client.go | 212 ++++++++++++------ pkg/cosign/tuf/client_test.go | 63 ++++-- pkg/cosign/tuf/testutils.go | 4 + pkg/cosign/verify.go | 8 +- pkg/cosign/verify_test.go | 16 +- 10 files changed, 241 insertions(+), 147 deletions(-) diff --git a/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go b/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go index cba25ffe376..29989321bbb 100644 --- a/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go +++ b/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go @@ -116,7 +116,6 @@ func initRoots() (*x509.CertPool, *x509.CertPool, error) { if err != nil { return nil, nil, fmt.Errorf("initializing tuf: %w", err) } - defer tufClient.Close() // Retrieve from the embedded or cached TUF root. If expired, a network // call is made to update the root. targets, err := tufClient.GetTargetsByMeta(tuf.Fulcio, []string{fulcioTargetStr, fulcioV1TargetStr}) diff --git a/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go b/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go index 4031d6ae87a..641088f375c 100644 --- a/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go +++ b/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go @@ -81,7 +81,6 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { if err != nil { return err } - defer tufClient.Close() targets, err := tufClient.GetTargetsByMeta(tuf.CTFE, []string{ctPublicKeyStr}) if err != nil { diff --git a/cmd/cosign/cli/verify/verify_blob.go b/cmd/cosign/cli/verify/verify_blob.go index b64119f0705..4823cbeb5ba 100644 --- a/cmd/cosign/cli/verify/verify_blob.go +++ b/cmd/cosign/cli/verify/verify_blob.go @@ -293,9 +293,16 @@ func payloadBytes(blobRef string) ([]byte, error) { } func verifyRekorEntry(ctx context.Context, ko options.KeyOpts, e *models.LogEntryAnon, pubKey signature.Verifier, cert *x509.Certificate, b64sig string, blobBytes []byte) error { + // TODO: This can be moved below offline bundle verification when SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY + // is removed. + rekorClient, err := rekor.NewClient(ko.RekorURL) + if err != nil { + return err + } + // If we have a bundle with a rekor entry, let's first try to verify offline if ko.BundlePath != "" { - if err := verifyRekorBundle(ctx, ko.BundlePath, cert); err == nil { + if err := verifyRekorBundle(ctx, ko.BundlePath, cert, rekorClient); err == nil { fmt.Fprintf(os.Stderr, "tlog entry verified offline\n") return nil } @@ -304,10 +311,6 @@ func verifyRekorEntry(ctx context.Context, ko options.KeyOpts, e *models.LogEntr return nil } - rekorClient, err := rekor.NewClient(ko.RekorURL) - if err != nil { - return err - } // Only fetch from rekor tlog if we don't already have the entry. if e == nil { var pubBytes []byte @@ -346,7 +349,7 @@ func verifyRekorEntry(ctx context.Context, ko options.KeyOpts, e *models.LogEntr return cosign.CheckExpiry(cert, time.Unix(*e.IntegratedTime, 0)) } -func verifyRekorBundle(ctx context.Context, bundlePath string, cert *x509.Certificate) error { +func verifyRekorBundle(ctx context.Context, bundlePath string, cert *x509.Certificate, rekorClient *client.Rekor) error { b, err := cosign.FetchLocalSignedPayloadFromPath(bundlePath) if err != nil { return err @@ -354,7 +357,7 @@ func verifyRekorBundle(ctx context.Context, bundlePath string, cert *x509.Certif if b.Bundle == nil { return fmt.Errorf("rekor entry is not available") } - publicKeys, err := cosign.GetRekorPubs(ctx) + publicKeys, err := cosign.GetRekorPubs(ctx, rekorClient) if err != nil { return fmt.Errorf("retrieving rekor public key: %w", err) } diff --git a/pkg/cosign/tlog.go b/pkg/cosign/tlog.go index 197e881b5e9..8d31cea8d89 100644 --- a/pkg/cosign/tlog.go +++ b/pkg/cosign/tlog.go @@ -77,10 +77,15 @@ func getLogID(pub crypto.PublicKey) (string, error) { // GetRekorPubs retrieves trusted Rekor public keys from the embedded or cached // TUF root. If expired, makes a network call to retrieve the updated targets. -// There is an Env variable that can be used to override this behaviour: +// A Rekor client may optionally be provided in case using SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY +// (see below). +// There are two Env variable that can be used to override this behaviour: // SIGSTORE_REKOR_PUBLIC_KEY - If specified, location of the file that contains // the Rekor Public Key on local filesystem -func GetRekorPubs(ctx context.Context) (map[string]RekorPubKey, error) { +// SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY - If specified, fetches the Rekor public +// key from the Rekor server using the provided rekorClient. +// TODO: Rename SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY to be test-only or remove. +func GetRekorPubs(ctx context.Context, rekorClient *client.Rekor) (map[string]RekorPubKey, error) { publicKeys := make(map[string]RekorPubKey) altRekorPub := os.Getenv(altRekorPublicKey) @@ -104,7 +109,6 @@ func GetRekorPubs(ctx context.Context) (map[string]RekorPubKey, error) { if err != nil { return nil, err } - defer tufClient.Close() targets, err := tufClient.GetTargetsByMeta(tuf.Rekor, []string{rekorTargetStr}) if err != nil { return nil, err @@ -121,6 +125,26 @@ func GetRekorPubs(ctx context.Context) (map[string]RekorPubKey, error) { publicKeys[keyID] = RekorPubKey{PubKey: rekorPubKey, Status: t.Status} } } + + // If we have a Rekor client and we've been told to fetch the Public Key from Rekor, + // additionally fetch it here. + addRekorPublic := os.Getenv(addRekorPublicKeyFromRekor) + if addRekorPublic != "" && rekorClient != nil { + pubOK, err := rekorClient.Pubkey.GetPublicKey(nil) + if err != nil { + return nil, fmt.Errorf("unable to fetch rekor public key from rekor: %w", err) + } + pubFromAPI, err := PemToECDSAKey([]byte(pubOK.Payload)) + if err != nil { + return nil, fmt.Errorf("error converting rekor PEM public key from rekor to ECDSAKey: %w", err) + } + keyID, err := getLogID(pubFromAPI) + if err != nil { + return nil, fmt.Errorf("error generating log ID: %w", err) + } + publicKeys[keyID] = RekorPubKey{PubKey: pubFromAPI, Status: tuf.Active} + } + if len(publicKeys) == 0 { return nil, errors.New("none of the Rekor public keys have been found") } @@ -330,9 +354,7 @@ func FindTLogEntriesByPayload(ctx context.Context, rekorClient *client.Rekor, pa return searchIndex.GetPayload(), nil } -// VerityTLogEntry verifies a TLog entry. If the Env variable -// SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY is specified, fetches the Rekor public -// key from the Rekor server using the provided rekorClient. +// VerityTLogEntry verifies a TLog entry. func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.LogEntryAnon) error { if e.Verification == nil || e.Verification.InclusionProof == nil { return errors.New("inclusion proof not provided") @@ -365,37 +387,11 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L LogID: *e.LogID, } - // If we've been told to fetch the Public Key from Rekor, fetch it here - // first before using the TUF code below. - rekorPubKeys := make(map[string]RekorPubKey) - addRekorPublic := os.Getenv(addRekorPublicKeyFromRekor) - if addRekorPublic != "" { - pubOK, err := rekorClient.Pubkey.GetPublicKey(nil) - if err != nil { - return fmt.Errorf("unable to fetch rekor public key from rekor: %w", err) - } - pubFromAPI, err := PemToECDSAKey([]byte(pubOK.Payload)) - if err != nil { - return fmt.Errorf("error converting rekor PEM public key from rekor to ECDSAKey: %w", err) - } - keyID, err := getLogID(pubFromAPI) - if err != nil { - return fmt.Errorf("error generating log ID: %w", err) - } - rekorPubKeys[keyID] = RekorPubKey{PubKey: pubFromAPI, Status: tuf.Active} - } - - rekorPubKeysTuf, err := GetRekorPubs(ctx) + rekorPubKeys, err := GetRekorPubs(ctx, rekorClient) if err != nil { - if len(rekorPubKeys) == 0 { - return fmt.Errorf("unable to fetch Rekor public keys from TUF repository, and not trusting the Rekor API for fetching public keys: %w", err) - } - fmt.Fprintf(os.Stderr, "**Warning** Failed to fetch Rekor public keys from TUF, using the public key from Rekor API because %s was specified", addRekorPublicKeyFromRekor) + return fmt.Errorf("unable to fetch Rekor public keys: %w", err) } - for k, v := range rekorPubKeysTuf { - rekorPubKeys[k] = v - } pubKey, ok := rekorPubKeys[payload.LogID] if !ok { return errors.New("rekor log public key not found for payload") diff --git a/pkg/cosign/tlog_test.go b/pkg/cosign/tlog_test.go index 03b54709796..fad76a7a2ba 100644 --- a/pkg/cosign/tlog_test.go +++ b/pkg/cosign/tlog_test.go @@ -20,7 +20,7 @@ import ( ) func TestGetRekorPubKeys(t *testing.T) { - keys, err := GetRekorPubs(context.Background()) + keys, err := GetRekorPubs(context.Background(), nil) if err != nil { t.Errorf("Unexpected error calling GetRekorPubs, expected nil: %v", err) } diff --git a/pkg/cosign/tuf/client.go b/pkg/cosign/tuf/client.go index c5df317589f..14268ec21cf 100644 --- a/pkg/cosign/tuf/client.go +++ b/pkg/cosign/tuf/client.go @@ -32,6 +32,7 @@ import ( "runtime" "strconv" "strings" + "sync" "time" "github.com/theupdateframework/go-tuf/client" @@ -46,10 +47,13 @@ const ( SigstoreNoCache = "SIGSTORE_NO_CACHE" ) -// Global in-memory targets to avoid re-downloading when there is no local cache. -// TODO: Consider using this map even when local caching to avoid reading from disk -// multiple times (e.g. when there are multiple signatures to verify in a single call). -var memoryTargets = map[string][]byte{} +var ( + // singletonTUF holds a single instance of TUF that will get reused on + // subsequent invocations of initializeTUF + singletonTUF *TUF + singletonTUFOnce = new(sync.Once) + singletonTUFErr error +) var GetRemoteRoot = func() string { return DefaultRemoteRoot @@ -105,6 +109,10 @@ type remoteCache struct { Mirror string `json:"mirror"` } +func resetForTests() { + singletonTUFOnce = new(sync.Once) +} + func getExpiration(metadata []byte) (*time.Time, error) { s := &data.Signed{} if err := json.Unmarshal(metadata, s); err != nil { @@ -213,15 +221,9 @@ func GetRootStatus(ctx context.Context) (*RootStatus, error) { if err != nil { return nil, err } - defer t.Close() return t.getRootStatus() } -// Close closes the local TUF store. Should only be called once per client. -func (t *TUF) Close() error { - return t.local.Close() -} - // initializeTUF creates a TUF client using the following params: // * embed: indicates using the embedded metadata and in-memory file updates. // When this is false, this uses a filesystem cache. @@ -233,60 +235,64 @@ func (t *TUF) Close() error { // * forceUpdate: indicates checking the remote for an update, even when the local // timestamp.json is up to date. func initializeTUF(ctx context.Context, mirror string, root []byte, embedded fs.FS, forceUpdate bool) (*TUF, error) { - t := &TUF{ - mirror: mirror, - embedded: embedded, - } + singletonTUFOnce.Do(func() { + t := &TUF{ + mirror: mirror, + embedded: embedded, + } - t.targets = newFileImpl() - var err error - t.local, err = newLocalStore() - if err != nil { - return nil, err - } + t.targets = newFileImpl() + t.local, singletonTUFErr = newLocalStore() + if singletonTUFErr != nil { + return + } - t.remote, err = remoteFromMirror(ctx, t.mirror) - if err != nil { - t.Close() - return nil, err - } + t.remote, singletonTUFErr = remoteFromMirror(ctx, t.mirror) + if singletonTUFErr != nil { + return + } - t.client = client.NewClient(t.local, t.remote) + t.client = client.NewClient(t.local, t.remote) - trustedMeta, err := t.local.GetMeta() - if err != nil { - t.Close() - return nil, fmt.Errorf("getting trusted meta: %w", err) - } - - // If the caller does not supply a root, then either use the root in the local store - // or default to the embedded one. - if root == nil { - root, err = getRoot(trustedMeta, t.embedded) + trustedMeta, err := t.local.GetMeta() if err != nil { - t.Close() - return nil, fmt.Errorf("getting trusted root: %w", err) + singletonTUFErr = fmt.Errorf("getting trusted meta: %w", err) + return } - } - if err := t.client.InitLocal(root); err != nil { - t.Close() - return nil, fmt.Errorf("unable to initialize client, local cache may be corrupt: %w", err) - } + // If the caller does not supply a root, then either use the root in the local store + // or default to the embedded one. + if root == nil { + root, err = getRoot(trustedMeta, t.embedded) + if err != nil { + singletonTUFErr = fmt.Errorf("getting trusted root: %w", err) + return + } + } - // We may already have an up-to-date local store! Check to see if it needs to be updated. - trustedTimestamp, ok := trustedMeta["timestamp.json"] - if ok && !isExpiredTimestamp(trustedTimestamp) && !forceUpdate { - return t, nil - } + if err := t.client.InitLocal(root); err != nil { + singletonTUFErr = fmt.Errorf("unable to initialize client, local cache may be corrupt: %w", err) + return + } - // Update if local is not populated or out of date. - if err := t.updateMetadataAndDownloadTargets(); err != nil { - t.Close() - return nil, fmt.Errorf("updating local metadata and targets: %w", err) - } + // We may already have an up-to-date local store! Check to see if it needs to be updated. + trustedTimestamp, ok := trustedMeta["timestamp.json"] + if ok && !isExpiredTimestamp(trustedTimestamp) && !forceUpdate { + // We're golden so stash the TUF object for later use + singletonTUF = t + return + } - return t, err + // Update if local is not populated or out of date. + if err := t.updateMetadataAndDownloadTargets(); err != nil { + singletonTUFErr = fmt.Errorf("updating local metadata and targets: %w", err) + return + } + + // We're golden so stash the TUF object for later use + singletonTUF = t + }) + return singletonTUF, singletonTUFErr } func NewFromEnv(ctx context.Context) (*TUF, error) { @@ -305,12 +311,10 @@ func NewFromEnv(ctx context.Context) (*TUF, error) { } func Initialize(ctx context.Context, mirror string, root []byte) error { - // Initialize the client. Force an update. - t, err := initializeTUF(ctx, mirror, root, GetEmbedded(), true) - if err != nil { + // Initialize the client. Force an update with remote. + if _, err := initializeTUF(ctx, mirror, root, GetEmbedded(), true); err != nil { return err } - t.Close() // Store the remote for later if we are caching. if !noCache() { @@ -399,13 +403,12 @@ func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFil return matchedTargets, nil } -func (t *TUF) updateMetadataAndDownloadTargets() error { - // Download updated targets and cache new metadata and targets in ${TUF_ROOT}. - // NOTE: This only returns *updated* targets. - targetFiles, err := t.client.Update() +// updateClient() updates the TUF client and also caches new metadata, if needed. +func (t *TUF) updateClient() (data.TargetFiles, error) { + targets, err := t.client.Update() if err != nil { - // Get some extra information for debugging. What was the state of the metadata - // on the remote? + // Get some extra information for debugging. What was the state of the top-level + // metadata on the remote? status := struct { Mirror string `json:"mirror"` Metadata map[string]MetadataStatus `json:"metadata"` @@ -432,9 +435,38 @@ func (t *TUF) updateMetadataAndDownloadTargets() error { } b, innerErr := json.MarshalIndent(status, "", "\t") if innerErr != nil { - return innerErr + return nil, innerErr } - return fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) + return nil, fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) + } + // Success! Cache new metadata, if needed. + if noCache() { + return targets, nil + } + // Sync the on-disk cache with the metadata from the in-memory store. + tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) + diskLocal, err := tuf_leveldbstore.FileLocalStore(tufDB) + defer func() { + if diskLocal != nil { + diskLocal.Close() + } + }() + if err != nil { + return nil, fmt.Errorf("creating cached local store: %w", err) + } + if err := syncLocalMeta(t.local, diskLocal); err != nil { + return nil, err + } + // Return updated targets. + return targets, nil +} + +func (t *TUF) updateMetadataAndDownloadTargets() error { + // Download updated targets and cache new metadata and targets in ${TUF_ROOT}. + // NOTE: This only returns *updated* targets. + targetFiles, err := t.updateClient() + if err != nil { + return err } // Download **newly** updated targets. @@ -528,16 +560,41 @@ func cachedTargetsDir(cacheRoot string) string { return filepath.FromSlash(filepath.Join(cacheRoot, "targets")) } +func syncLocalMeta(from client.LocalStore, to client.LocalStore) error { + // Copy trusted metadata in the from LocalStore into the to LocalStore. + tufLocalStoreMeta, err := from.GetMeta() + if err != nil { + return fmt.Errorf("getting metadata to sync: %w", err) + } + for k, v := range tufLocalStoreMeta { + if err := to.SetMeta(k, v); err != nil { + return fmt.Errorf("syncing local store for metadata %s", k) + } + } + return nil +} + // Local store implementations func newLocalStore() (client.LocalStore, error) { + local := client.MemoryLocalStore() if noCache() { - return client.MemoryLocalStore(), nil + return local, nil } + // Otherwise populate the in-memory local store with data fetched from the cache. tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) - local, err := tuf_leveldbstore.FileLocalStore(tufDB) + diskLocal, err := tuf_leveldbstore.FileLocalStore(tufDB) + defer func() { + if diskLocal != nil { + diskLocal.Close() + } + }() if err != nil { return nil, fmt.Errorf("creating cached local store: %w", err) } + // Populate the in-memory local store with data fetched from the cache. + if err := syncLocalMeta(diskLocal, local); err != nil { + return nil, err + } return local, nil } @@ -555,10 +612,15 @@ type targetImpl interface { } func newFileImpl() targetImpl { + memTargets := &memoryCache{} if noCache() { - return &memoryCache{targets: memoryTargets} + return memTargets + } + // Otherwise use a disk-cache with in-memory cached targets. + return &diskCache{ + base: cachedTargetsDir(rootCacheDir()), + memory: memTargets, } - return &diskCache{base: cachedTargetsDir(rootCacheDir())} } // In-memory cache for targets @@ -587,15 +649,25 @@ func (m *memoryCache) Get(p string) ([]byte, error) { // On-disk cache for targets type diskCache struct { + // Base directory for accessing targets. base string + // An in-memory map of targets that are kept in sync. + memory *memoryCache } func (d *diskCache) Get(p string) ([]byte, error) { + // Read from the in-memory cache first. + if b, err := d.memory.Get(p); err == nil { + return b, nil + } fp := filepath.FromSlash(filepath.Join(d.base, p)) return os.ReadFile(fp) } func (d *diskCache) Set(p string, b []byte) error { + if err := d.memory.Set(p, b); err != nil { + return err + } if err := os.MkdirAll(d.base, 0700); err != nil { return fmt.Errorf("creating targets dir: %w", err) } diff --git a/pkg/cosign/tuf/client_test.go b/pkg/cosign/tuf/client_test.go index e7ebb68faa8..7a692835786 100644 --- a/pkg/cosign/tuf/client_test.go +++ b/pkg/cosign/tuf/client_test.go @@ -29,6 +29,7 @@ import ( "reflect" "sort" "strings" + "sync" "testing" "testing/fstest" "time" @@ -61,7 +62,7 @@ func TestNewFromEnv(t *testing.T) { } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() // Now try with expired targets forceExpiration(t, true) @@ -70,7 +71,7 @@ func TestNewFromEnv(t *testing.T) { t.Fatal(err) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() if err := Initialize(ctx, DefaultRemoteRoot, nil); err != nil { t.Error() @@ -85,7 +86,7 @@ func TestNewFromEnv(t *testing.T) { t.Fatal(err) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() } func TestNoCache(t *testing.T) { @@ -101,7 +102,7 @@ func TestNoCache(t *testing.T) { t.Fatal(err) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() // Force expiration so we have some content to download forceExpiration(t, true) @@ -111,12 +112,13 @@ func TestNoCache(t *testing.T) { t.Fatal(err) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() // No filesystem writes when using SIGSTORE_NO_CACHE. if l := dirLen(t, td); l != 0 { t.Errorf("expected no filesystem writes, got %d entries", l) } + resetForTests() } func TestCache(t *testing.T) { @@ -137,7 +139,7 @@ func TestCache(t *testing.T) { t.Fatal(err) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() cachedDirLen := dirLen(t, td) if cachedDirLen == 0 { t.Errorf("expected filesystem writes, got %d entries", cachedDirLen) @@ -145,11 +147,11 @@ func TestCache(t *testing.T) { // Nothing should get downloaded if everything is up to date. forceExpiration(t, false) - tuf, err = NewFromEnv(ctx) + _, err = NewFromEnv(ctx) if err != nil { t.Fatal(err) } - tuf.Close() + resetForTests() if l := dirLen(t, td); cachedDirLen != l { t.Errorf("expected no filesystem writes, got %d entries", l-cachedDirLen) @@ -166,7 +168,7 @@ func TestCache(t *testing.T) { t.Errorf("expected filesystem writes, got %d entries", l) } checkTargetsAndMeta(t, tuf) - tuf.Close() + resetForTests() } func TestCustomRoot(t *testing.T) { @@ -205,7 +207,7 @@ func TestCustomRoot(t *testing.T) { if b, err := tufObj.GetTarget("foo.txt"); err != nil || !bytes.Equal(b, []byte("foo")) { t.Fatal(err) } - tufObj.Close() + resetForTests() // Force expiration on the first timestamp and internal go-tuf verification. forceExpirationVersion(t, 1) @@ -214,9 +216,11 @@ func TestCustomRoot(t *testing.T) { return true } + // This should cause an error that remote metadata is expired. if _, err = NewFromEnv(ctx); err == nil { t.Errorf("expected expired timestamp from the remote") } + // Let internal TUF verification succeed normally now. verify.IsExpired = oldIsExpired @@ -224,6 +228,7 @@ func TestCustomRoot(t *testing.T) { updateTufRepo(t, td, r, "foo1") // Use newTuf and successfully get updated metadata using the cached remote location. + resetForTests() tufObj, err = NewFromEnv(ctx) if err != nil { t.Fatal(err) @@ -231,7 +236,7 @@ func TestCustomRoot(t *testing.T) { if b, err := tufObj.GetTarget("foo.txt"); err != nil || !bytes.Equal(b, []byte("foo1")) { t.Fatal(err) } - tufObj.Close() + resetForTests() } func TestGetTargetsByMeta(t *testing.T) { @@ -266,7 +271,7 @@ func TestGetTargetsByMeta(t *testing.T) { if err != nil { t.Fatal(err) } - defer tufObj.Close() + defer resetForTests() // Fetch a target with no custom metadata. targets, err := tufObj.GetTargetsByMeta(UnknownUsage, []string{"fooNoCustom.txt"}) if err != nil { @@ -361,10 +366,6 @@ func TestUpdatedTargetNamesEmbedded(t *testing.T) { origEmbedded := GetEmbedded origDefaultRemote := GetRemoteRoot - defer func() { - GetEmbedded = origEmbedded - GetRemoteRoot = origDefaultRemote - }() // Create an "expired" embedded repository that does not contain newTarget. ctx := context.Background() @@ -373,13 +374,18 @@ func TestUpdatedTargetNamesEmbedded(t *testing.T) { mapfs := makeMapFS(repository) GetEmbedded = func() fs.FS { return mapfs } - oldIsExpired := verify.IsExpired + oldIsExpired := isExpiredTimestamp isExpiredTimestamp = func(metadata []byte) bool { m, _ := store.GetMeta() timestampExpires, _ := getExpiration(m["timestamp.json"]) metadataExpires, _ := getExpiration(metadata) return metadataExpires.Sub(*timestampExpires) <= 0 } + defer func() { + GetEmbedded = origEmbedded + GetRemoteRoot = origDefaultRemote + isExpiredTimestamp = oldIsExpired + }() // Assert that the embedded repository does not contain the newTarget. newTarget := "fooNew.txt" @@ -402,7 +408,7 @@ func TestUpdatedTargetNamesEmbedded(t *testing.T) { if err != nil { t.Fatal(err) } - defer tufObj.Close() + defer resetForTests() // Try to retrieve the newly added target. targets, err := tufObj.GetTargetsByMeta(Fulcio, []string{"fooNoCustom.txt"}) @@ -418,7 +424,6 @@ func TestUpdatedTargetNamesEmbedded(t *testing.T) { cmpopts.SortSlices(func(a, b string) bool { return a < b })) { t.Fatalf("target data mismatched, expected: %v, got: %v", expectedTB, targetBytes) } - verify.IsExpired = oldIsExpired } func checkTargetsAndMeta(t *testing.T, tuf *TUF) { @@ -622,3 +627,23 @@ func updateTufRepo(t *testing.T, td string, r *tuf.Repo, targetData string) { t.Error(err) } } +func TestConcurrentAccess(t *testing.T) { + var wg sync.WaitGroup + + for i := 0; i < 20; i++ { + wg.Add(1) + go func() { + defer wg.Done() + tufObj, err := NewFromEnv(context.Background()) + if err != nil { + t.Errorf("Failed to construct NewFromEnv: %s", err) + } + if tufObj == nil { + t.Error("Got back nil tufObj") + } + time.Sleep(1 * time.Second) + }() + } + wg.Wait() + resetForTests() +} diff --git a/pkg/cosign/tuf/testutils.go b/pkg/cosign/tuf/testutils.go index 729af872db6..d4e76aa4f69 100644 --- a/pkg/cosign/tuf/testutils.go +++ b/pkg/cosign/tuf/testutils.go @@ -119,8 +119,12 @@ func NewSigstoreTufRepo(t *testing.T, root TestSigstoreRoot) (tuf.LocalStore, *t if !ok { t.Error(err) } + resetForTests() if err := Initialize(ctx, s.URL, rootBytes); err != nil { t.Error(err) } + t.Cleanup(func() { + resetForTests() + }) return remote, r } diff --git a/pkg/cosign/verify.go b/pkg/cosign/verify.go index 0df4a32063a..ddc22828214 100644 --- a/pkg/cosign/verify.go +++ b/pkg/cosign/verify.go @@ -526,7 +526,7 @@ func VerifyImageSignature(ctx context.Context, sig oci.Signature, h v1.Hash, co } } - bundleVerified, err = VerifyBundle(ctx, sig) + bundleVerified, err = VerifyBundle(ctx, sig, co.RekorClient) if err != nil && co.RekorClient == nil { return false, fmt.Errorf("unable to verify bundle: %w", err) } @@ -705,7 +705,7 @@ func verifyImageAttestations(ctx context.Context, atts oci.Signatures, h v1.Hash } } - verified, err := VerifyBundle(ctx, att) + verified, err := VerifyBundle(ctx, att, co.RekorClient) if err != nil && co.RekorClient == nil { return fmt.Errorf("unable to verify bundle: %w", err) } @@ -753,7 +753,7 @@ func CheckExpiry(cert *x509.Certificate, it time.Time) error { return nil } -func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { +func VerifyBundle(ctx context.Context, sig oci.Signature, rekorClient *client.Rekor) (bool, error) { bundle, err := sig.Bundle() if err != nil { return false, err @@ -765,7 +765,7 @@ func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { return false, err } - publicKeys, err := GetRekorPubs(ctx) + publicKeys, err := GetRekorPubs(ctx, rekorClient) if err != nil { return false, fmt.Errorf("retrieving rekor public key: %w", err) } diff --git a/pkg/cosign/verify_test.go b/pkg/cosign/verify_test.go index bfc54777af7..115aba52006 100644 --- a/pkg/cosign/verify_test.go +++ b/pkg/cosign/verify_test.go @@ -42,7 +42,6 @@ import ( "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/sigstore/cosign/internal/pkg/cosign/rekor/mock" "github.com/sigstore/cosign/pkg/cosign/bundle" - ctuf "github.com/sigstore/cosign/pkg/cosign/tuf" "github.com/sigstore/cosign/pkg/oci/static" "github.com/sigstore/cosign/pkg/types" "github.com/sigstore/cosign/test" @@ -225,11 +224,6 @@ func TestVerifyImageSignatureWithNoChain(t *testing.T) { if err != nil { t.Fatalf("creating signer: %v", err) } - testSigstoreRoot := ctuf.TestSigstoreRoot{ - Rekor: sv, - FulcioCertificate: rootCert, - } - _, _ = ctuf.NewSigstoreTufRepo(t, testSigstoreRoot) leafCert, privKey, _ := test.GenerateLeafCert("subject", "oidc-issuer", rootCert, rootKey) pemLeaf := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: leafCert.Raw}) @@ -250,12 +244,14 @@ func TestVerifyImageSignatureWithNoChain(t *testing.T) { opts := []static.Option{static.WithCertChain(pemLeaf, []byte{}), static.WithBundle(rekorBundle)} ociSig, _ := static.NewSignature(payload, base64.StdEncoding.EncodeToString(signature), opts...) + // TODO(asraa): Re-enable passing test when Rekor public keys can be set in CheckOpts, + // instead of relying on the singleton TUF instance. verified, err := VerifyImageSignature(context.TODO(), ociSig, v1.Hash{}, &CheckOpts{RootCerts: rootPool}) - if err != nil { - t.Fatalf("unexpected error while verifying signature, expected no error, got %v", err) + if err == nil { + t.Fatalf("expected error due to custom Rekor public key") } - if verified == false { - t.Fatalf("expected verified=true, got verified=false") + if verified == true { + t.Fatalf("expected verified=false, got verified=true") } }