From 12de7edda9e2ce7d4f8fc43920dcb35af42be682 Mon Sep 17 00:00:00 2001 From: jagathprakash <31057312+jagathprakash@users.noreply.github.com> Date: Mon, 13 Feb 2023 18:12:08 -0500 Subject: [PATCH] [TEP-0089] SPIRE for non-falsifiable provenance. This PR is a part of a larger set of PRs to provide non-falsifiable provenance through SPIRE. In particular this PR uses the SPIRE infrastructure which has already been merged to sign TaskRunStatus. It also has support to verify if TaskRunStatus has been modified by another workload between reconciles. Update pkg/pod/pod.go Co-authored-by: Jerop Kipruto Update pkg/pod/pod.go Co-authored-by: Jerop Kipruto Update pkg/pod/pod.go Co-authored-by: Jerop Kipruto --- docs/spire.md | 256 ++++++++++- .../v1beta1/pipelineruns/4808-regression.yaml | 2 +- pkg/apis/config/feature_flags.go | 5 + pkg/apis/config/feature_flags_test.go | 22 + pkg/pod/pod.go | 38 ++ pkg/pod/pod_test.go | 162 +++++++ pkg/pod/status.go | 87 +++- pkg/pod/status_test.go | 403 ++++++++++++++++- pkg/reconciler/taskrun/controller.go | 5 +- pkg/reconciler/taskrun/taskrun.go | 63 ++- pkg/reconciler/taskrun/taskrun_test.go | 425 ++++++++++++++++-- pkg/spire/controller.go | 26 +- pkg/spire/spire_mock.go | 10 +- pkg/spire/spire_test.go | 25 ++ pkg/spire/verify.go | 5 +- test/e2e-common.sh | 59 +++ test/e2e-tests.sh | 16 + test/featureflags.go | 33 ++ test/init_test.go | 42 ++ test/taskrun_test.go | 231 ++++++++++ .../patch/pipeline-controller-spire.json | 56 +++ test/testdata/spire/config-spire.yaml | 17 + test/testdata/spire/spiffe-csi-driver.yaml | 20 + test/testdata/spire/spire-agent.yaml | 208 +++++++++ test/testdata/spire/spire-server.yaml | 211 +++++++++ 25 files changed, 2363 insertions(+), 64 deletions(-) create mode 100644 test/testdata/patch/pipeline-controller-spire.json create mode 100644 test/testdata/spire/config-spire.yaml create mode 100644 test/testdata/spire/spiffe-csi-driver.yaml create mode 100644 test/testdata/spire/spire-agent.yaml create mode 100644 test/testdata/spire/spire-server.yaml diff --git a/docs/spire.md b/docs/spire.md index 31553910683..21ff5e76018 100644 --- a/docs/spire.md +++ b/docs/spire.md @@ -6,13 +6,58 @@ weight: 1660 --> ⚠️ This is a work in progress: SPIRE support is not yet functional -TaskRun result attestations is currently an alpha experimental feature. Currently all that is implemented is support for configuring Tekton to connect to SPIRE. See TEP-0089 for details on the overall design and feature set. +TaskRun result attestations is currently an alpha experimental feature. Currently all that is implemented is support for configuring Tekton to connect to SPIRE and enabling TaskRun to sign and verify the TaskRunStatus. See [TEP-0089](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md) for details on the overall design and feature set. This being a large feature, this will be implemented in the following phases. This document will be updated as we implement new phases. 1. Add a client for SPIRE (done). -2. Add a configMap which initializes SPIRE (in progress). -3. Modify TaskRun to sign and verify TaskRun Results using SPIRE. -4. Modify Tekton Chains to verify the TaskRun Results. +2. Add a configMap which initializes SPIRE (done). +3. Modify TaskRun to sign and verify TaskRunStatus using SPIRE (done). +4. Enabling Chains to verify the TaskRun Results. + +When the TaskRun result attestations feature is [enabled](./spire.md#enabling-taskrun-result-attestations) all TaskRuns will produce a signature alongside its results, which can then be used to validate its provenance. For example, a TaskRun result that creates user-specified results `commit` and `url` would look like the following. `SVID`, `RESULT_MANIFEST`, `RESULT_MANIFEST.sig`, `commit.sig` and `url.sig` are generated attestations by the integration of SPIRE and Tekton Controller. + +Parsed, the fields would be: +``` +... + +... +📝 Results + + NAME VALUE + ∙ RESULT_MANIFEST commit,url,SVID,commit.sig,url.sig + ∙ RESULT_MANIFEST.sig MEUCIQD55MMII9SEk/esQvwNLGC43y7efNGZ+7fsTdq+9vXYFAIgNoRW7cV9WKriZkcHETIaAKqfcZVJfsKbEmaDyohDSm4= + ∙ SVID -----BEGIN CERTIFICATE----- +MIICGzCCAcGgAwIBAgIQH9VkLxKkYMidPIsofckRQTAKBggqhkjOPQQDAjAeMQsw +CQYDVQQGEwJVUzEPMA0GA1UEChMGU1BJRkZFMB4XDTIyMDIxMTE2MzM1MFoXDTIy +MDIxMTE3MzQwMFowHTELMAkGA1UEBhMCVVMxDjAMBgNVBAoTBVNQSVJFMFkwEwYH +KoZIzj0CAQYIKoZIzj0DAQcDQgAEBRdg3LdxVAELeH+lq8wzdEJd4Gnt+m9G0Qhy +NyWoPmFUaj9vPpvOyRgzxChYnW0xpcDWihJBkq/EbusPvQB8CKOB4TCB3jAOBgNV +HQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1Ud +EwEB/wQCMAAwHQYDVR0OBBYEFID7ARM5+vwzvnLPMO7Icfnj7l7hMB8GA1UdIwQY +MBaAFES3IzpGDqgV3QcQNgX8b/MBwyAtMF8GA1UdEQRYMFaGVHNwaWZmZTovL2V4 +YW1wbGUub3JnL25zL2RlZmF1bHQvdGFza3J1bi9jYWNoZS1pbWFnZS1waXBlbGlu +ZXJ1bi04ZHE5Yy1mZXRjaC1mcm9tLWdpdDAKBggqhkjOPQQDAgNIADBFAiEAi+LR +JkrZn93PZPslaFmcrQw3rVcEa4xKmPleSvQaBoACIF1QB+q1uwH6cNvWdbLK9g+W +T9Np18bK0xc6p5SuTM2C +-----END CERTIFICATE----- + ∙ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + ∙ commit.sig MEQCIEJHk+8B+mCFozp0F52TQ1AadlhEo1lZNOiOnb/ht71aAiBCE0otKB1R0BktlPvweFPldfZfjG0F+NUSc2gPzhErzg== + ∙ url https://github.com/buildpacks/samples + ∙ url.sig MEUCIF0Fuxr6lv1MmkreqDKcPH3m+eXp+gY++VcxWgGCx7T1AiEA9U/tROrKuCGfKApLq2A9EModbdoGXyQXFOpAa0aMpOg= +``` + +However, the verification materials are removed from the final results as part of the TaskRun status. It is stored in the termination messages (more details below): + +``` +$ tkn tr describe cache-image-pipelinerun-8dq9c-fetch-from-git +... + +... +📝 Results + NAME VALUE + ∙ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + ∙ url https://github.com/buildpacks/samples +``` ## Architecture Overview @@ -64,7 +109,7 @@ When a TaskRun is created: ## Enabling TaskRun result attestations To enable TaskRun attestations: -1. Make sure `enforce-nonfalsifiability` is set to `"spire"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior) for details +1. Make sure `enforce-nonfalsifiability` is set to `"spire"` in the `feature-flags` configmap, see [`additional-configs.md`](./additional-configs.md#customizing-the-pipelines-controller-behavior) for details 1. Create a SPIRE deployment containing a SPIRE server, SPIRE agents and the SPIRE CSI driver, for convenience, [this sample single cluster deployment](https://github.com/spiffe/spiffe-csi/tree/main/example/config) can be used. 1. Register the SPIRE workload entry for Tekton with the "Admin" flag, which will allow the Tekton controller to communicate with the SPIRE server to manage the TaskRun identities dynamically. ``` @@ -127,3 +172,204 @@ To enable TaskRun attestations: # spire-node-alias-prefix specifies the SPIRE node alias prefix to use. spire-node-alias-prefix: "/tekton-node/" ``` + +## Sample TaskRun attestation + +The following example shows how this feature works: + +```yaml +kind: TaskRun +apiVersion: tekton.dev/v1beta1 +metadata: + name: non-falsifiable-provenance +spec: + timeout: 60s + taskSpec: + steps: + - name: non-falsifiable + image: ubuntu + script: | + #!/usr/bin/env bash + printf "%s" "hello" > "$(results.foo.path)" + printf "%s" "world" > "$(results.bar.path)" + results: + - name: foo + - name: bar +``` + + +The termination message is: +``` +message: '[{"key":"RESULT_MANIFEST","value":"foo,bar","type":1},{"key":"RESULT_MANIFEST.sig","value":"MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw==","type":1},{"key":"SVID","value":"-----BEGIN + CERTIFICATE-----\nMIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL\nMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y\nMjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG\nByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7\nHMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD\nVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV\nHRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME\nGDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l\neGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy\nb3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56\n4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA==\n-----END + CERTIFICATE-----\n","type":1},{"key":"bar","value":"world","type":1},{"key":"bar.sig","value":"MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M=","type":1},{"key":"foo","value":"hello","type":1},{"key":"foo.sig","value":"MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww==","type":1}]' +``` + +Parsed, the fields are: +- `RESULT_MANIFEST`: List of results that should be present, to prevent pick and choose attacks +- `RESULT_MANIFEST.sig`: The signature of the result manifest +- `SVID`: The x509 certificate that will be used to verify the signature trust chain to the authority +- `*.sig`: The signature of each individual result output +``` + ∙ RESULT_MANIFEST foo,bar + ∙ RESULT_MANIFEST.sig MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw== + ∙ SVID -----BEGIN CERTIFICATE----- +MIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y +MjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7 +HMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD +VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME +GDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l +eGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy +b3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56 +4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA== +-----END CERTIFICATE----- + ∙ bar world + ∙ bar.sig MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M= + ∙ foo hello + ∙ foo.sig MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww== +``` + + +However, the verification materials are removed from the results as part of the TaskRun status: +```console +$ tkn tr describe non-falsifiable-provenance +Name: non-falsifiable-provenance +Namespace: default +Service Account: default +Timeout: 1m0s +Labels: + app.kubernetes.io/managed-by=tekton-pipelines + +🌡️ Status + +STARTED DURATION STATUS +38 seconds ago 36 seconds Succeeded + +📝 Results + + NAME VALUE + ∙ bar world + ∙ foo hello + +🦶 Steps + + NAME STATUS + ∙ non-falsifiable Completed +``` + +## How is the result being verified + +The signatures are being verified by the Tekton controller, the process of verification is as follows: + +- Verifying the SVID + - Obtain the trust bundle from the SPIRE server + - Verify the SVID with the trust bundle + - Verify that the SVID spiffe ID is for the correct TaskRun +- Verifying the result manifest + - Verify the content of `RESULT_MANIFEST` with the field `RESULT_MANIFEST.sig` with the SVID public key + - Verify that there is a corresponding field for all items listed in `RESULT_MANIFEST` (besides SVID and `*.sig` fields) +- Verify individual result fields + - For each of the items in the results, verify its content against its associated `.sig` field + + +# TaskRun Status attestations + +Each TaskRun status that is written by the tekton-pipelines-controller will be signed to ensure that there is no external +tampering of the TaskRun status. Upon each retrieval of the TaskRun, the tekton-pipelines-controller checks if the status is initialized, +and that the signature validates the current status. +The signature and SVID will be stored as annotations on the TaskRun Status field, and can be verified by a client. + +The verification is done on every consumption of the TaskRun except when the TaskRun is uninitialized. When uninitialized, the +tekton-pipelines-controller is not influenced by fields in the status and thus will not sign incorrect reflections of the TaskRun. + +The spec and TaskRun annotations/labels are not signed when there are valid interactions from other controllers or users (i.e. cancelling taskrun). +Editing the object annotations/labels or spec will not result in any unverifiable outcome of the status field. + +As the TaskRun progresses, the Pipeline Controller will reconcile the TaskRun object and continually verify the current hash against the `tekton.dev/status-hash-sig` before updating the hash to match the new status and creating a new signature. + +An example TaskRun annotations would be: + +```console +$ tkn tr describe non-falsifiable-provenance -oyaml +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + annotations: + pipeline.tekton.dev/release: 3ee99ec + creationTimestamp: "2022-03-04T19:10:46Z" + generation: 1 + labels: + app.kubernetes.io/managed-by: tekton-pipelines + name: non-falsifiable-provenance + namespace: default + resourceVersion: "23088242" + uid: 548ebe99-d40b-4580-a9bc-afe80915e22e +spec: + serviceAccountName: default + taskSpec: + results: + - description: "" + name: foo + - description: "" + name: bar + steps: + - image: ubuntu + name: non-falsifiable + resources: {} + script: | + #!/usr/bin/env bash + sleep 30 + printf "%s" "hello" > "$(results.foo.path)" + printf "%s" "world" > "$(results.bar.path)" + timeout: 1m0s +status: + annotations: + tekton.dev/controller-svid: | + -----BEGIN CERTIFICATE----- + MIIB7jCCAZSgAwIBAgIRAI8/08uXSn9tyv7cRN87uvgwCgYIKoZIzj0EAwIwHjEL + MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMDQxODU0NTlaFw0y + MjAzMDQxOTU1MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG + ByqGSM49AgEGCCqGSM49AwEHA0IABL+e9OjkMv+7XgMWYtrzq0ESzJi+znA/Pm8D + nvApAHg3/rEcNS8c5LgFFRzDfcs9fxGSSkL1JrELzoYul1Q13XejgbMwgbAwDgYD + VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV + HRMBAf8EAjAAMB0GA1UdDgQWBBR+ma+yZfo092FKIM4F3yhEY8jgDDAfBgNVHSME + GDAWgBRKiCg5+YdTaQ+5gJmvt2QcDkQ6KjAxBgNVHREEKjAohiZzcGlmZmU6Ly9l + eGFtcGxlLm9yZy90ZWt0b24vY29udHJvbGxlcjAKBggqhkjOPQQDAgNIADBFAiEA + 8xVWrQr8+i6yMLDm9IUjtvTbz9ofjSsWL6c/+rxmmRYCIBTiJ/HW7di3inSfxwqK + 5DKyPrKoR8sq8Ne7flkhgbkg + -----END CERTIFICATE----- + tekton.dev/status-hash: 76692c9dcd362f8a6e4bda8ccb4c0937ad16b0d23149ae256049433192892511 + tekton.dev/status-hash-sig: MEQCIFv2bW0k4g0Azx+qaeZjUulPD8Ma3uCUn0tXQuuR1FaEAiBHQwN4XobOXmC2nddYm04AZ74YubUyNl49/vnbnR/HcQ== + completionTime: "2022-03-04T19:11:22Z" + conditions: + - lastTransitionTime: "2022-03-04T19:11:22Z" + message: All Steps have completed executing + reason: Succeeded + status: "True" + type: Succeeded + - lastTransitionTime: "2022-03-04T19:11:22Z" + message: Spire verified + reason: TaskRunResultsVerified + status: "True" + type: SignedResultsVerified + podName: non-falsifiable-provenance-pod + startTime: "2022-03-04T19:10:46Z" + steps: + ... + +``` + +## How is the status being verified + +The signature are being verified by the Tekton controller, the process of verification is as follows: + +- Verify status-hash fields + - verify `tekton.dev/status-hash` content against its associated `tekton.dev/status-hash-sig` field. If status hash does + not match invalidate the `tekton.dev/verified = no` annotation will be added + +## Further Details + +To learn more about SPIRE attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md). diff --git a/examples/v1beta1/pipelineruns/4808-regression.yaml b/examples/v1beta1/pipelineruns/4808-regression.yaml index df4502a8a88..4ebf63c8fca 100644 --- a/examples/v1beta1/pipelineruns/4808-regression.yaml +++ b/examples/v1beta1/pipelineruns/4808-regression.yaml @@ -92,4 +92,4 @@ spec: name: result-test params: - name: RESULT_STRING_LENGTH - value: "3000" + value: "2000" diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 305200b7fe9..09480544b6e 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -348,6 +348,11 @@ func CheckAlphaOrBetaAPIFields(ctx context.Context) bool { return cfg.FeatureFlags.EnableAPIFields == AlphaAPIFields || cfg.FeatureFlags.EnableAPIFields == BetaAPIFields } +// IsSpireEnabled checks if non-falsifiable provenance is enforced through SPIRE +func IsSpireEnabled(ctx context.Context) bool { + return FromContextOrDefaults(ctx).FeatureFlags.EnforceNonfalsifiability == EnforceNonfalsifiabilityWithSpire +} + func setEnableAPIFields(ctx context.Context, want string) context.Context { featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ "enable-api-fields": want, diff --git a/pkg/apis/config/feature_flags_test.go b/pkg/apis/config/feature_flags_test.go index 2f85f14e98a..45ba583e8b3 100644 --- a/pkg/apis/config/feature_flags_test.go +++ b/pkg/apis/config/feature_flags_test.go @@ -318,6 +318,28 @@ func TestCheckAlphaOrBetaAPIFields(t *testing.T) { } } +func TestIsSpireEnabled(t *testing.T) { + ctx := context.Background() + if config.IsSpireEnabled(ctx) { + t.Errorf("IsSpireEnabled got true but expected to be false") + } + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) + featureflags := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "feature-flags", + }, + Data: map[string]string{ + "enable-api-fields": "alpha", + "enforce-nonfalsifiability": config.EnforceNonfalsifiabilityWithSpire, + }, + } + store.OnConfigChanged(featureflags) + ctx = store.ToContext(ctx) + if !config.IsSpireEnabled(ctx) { + t.Errorf("IsSpireEnabled got false but expected to be true") + } +} + func verifyConfigFileWithExpectedFeatureFlagsConfig(t *testing.T, fileName string, expectedConfig *config.FeatureFlags) { t.Helper() cm := test.ConfigMapFromTestFile(t, fileName) diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 820aac62663..a5498cb6fb5 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -31,6 +31,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/internal/computeresources/tasklevel" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/spire" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -132,6 +133,10 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec // Secrets, along with any arguments needed by Step entrypoints to process // those secrets. commonExtraEntrypointArgs := []string{} + // Entrypoint arg to enable or disable spire + if config.IsSpireEnabled(ctx) { + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire") + } credEntrypointArgs, credVolumes, credVolumeMounts, err := credsInit(ctx, taskRun.Spec.ServiceAccountName, taskRun.Namespace, b.KubeClient) if err != nil { return nil, err @@ -322,6 +327,39 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec return nil, err } + readonly := true + if config.IsSpireEnabled(ctx) { + // add SPIRE's CSI volume to the explicitly declared use volumes + volumes = append(volumes, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }) + + // mount SPIRE's CSI volume to each Step Container + for i := range stepContainers { + c := &stepContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + for i := range initContainers { + // mount SPIRE's CSI volume to each Init Container + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + } + mergedPodContainers := stepContainers // Merge sidecar containers with step containers. diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 37f62924c00..961e81dbdef 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -30,6 +30,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" @@ -2455,6 +2456,167 @@ func TestPodBuild_TaskLevelResourceRequirements(t *testing.T) { } } +func TestPodBuildwithSpireEnabled(t *testing.T) { + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} + readonly := true + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + + for _, c := range []struct { + desc string + trs v1beta1.TaskRunSpec + trAnnotation map[string]string + ts v1beta1.TaskSpec + want *corev1.PodSpec + wantAnnotations map[string]string + }{{ + desc: "simple", + ts: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Name: "name", + Image: "image", + Command: []string{"cmd"}, // avoid entrypoint lookup. + }}, + }, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: initContainers, + Containers: []corev1.Container{{ + Name: "step-name", + Image: "image", + Command: []string{"/tekton/bin/entrypoint"}, + Args: []string{ + "-wait_file", + "/tekton/downward/ready", + "-wait_file_content", + "-post_file", + "/tekton/run/0/out", + "-termination_path", + "/tekton/termination", + "-step_metadata_dir", + "/tekton/run/0/status", + "-enable_spire", + "-entrypoint", + "cmd", + "--", + }, + VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { + Name: "tekton-creds-init-home-0", + MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }}, implicitVolumeMounts...), + TerminationMessagePath: "/tekton/termination", + }}, + Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ + Name: "tekton-creds-init-home-0", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }), + ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + featureFlags := map[string]string{ + "enable-api-fields": "alpha", + "enforce-nonfalsifiability": "spire", + } + names.TestingSeed() + store := config.NewStore(logtesting.TestLogger(t)) + store.OnConfigChanged( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: featureFlags, + }, + ) + kubeclient := fakek8s.NewSimpleClientset( + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"}, + Secrets: []corev1.ObjectReference{{ + Name: "multi-creds", + }}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-creds", + Namespace: "default", + Annotations: map[string]string{ + "tekton.dev/docker-0": "https://us.gcr.io", + "tekton.dev/docker-1": "https://docker.io", + "tekton.dev/git-0": "github.com", + "tekton.dev/git-1": "gitlab.com", + }}, + Type: "kubernetes.io/basic-auth", + Data: map[string][]byte{ + "username": []byte("foo"), + "password": []byte("BestEver"), + }, + }, + ) + var trAnnotations map[string]string + if c.trAnnotation == nil { + trAnnotations = map[string]string{ + ReleaseAnnotation: fakeVersion, + } + } else { + trAnnotations = c.trAnnotation + trAnnotations[ReleaseAnnotation] = fakeVersion + } + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-name", + Namespace: "default", + Annotations: trAnnotations, + }, + Spec: c.trs, + } + + // No entrypoints should be looked up. + entrypointCache := fakeCache{} + builder := Builder{ + Images: images, + KubeClient: kubeclient, + EntrypointCache: entrypointCache, + } + + got, err := builder.Build(store.ToContext(context.Background()), tr, c.ts) + if err != nil { + t.Fatalf("builder.Build: %v", err) + } + + want := kmeta.ChildName(tr.Name, "-pod") + if d := cmp.Diff(got.Name, want); d != "" { + t.Errorf("got %v; want %v", got.Name, want) + } + + if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp, volumeSort, volumeMountSort); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + + if c.wantAnnotations != nil { + if d := cmp.Diff(c.wantAnnotations, got.ObjectMeta.Annotations, cmpopts.IgnoreMapEntries(ignoreReleaseAnnotation)); d != "" { + t.Errorf("Annotation Diff(-want, +got):\n%s", d) + } + } + }) + } +} + // verifyTaskLevelComputeResources verifies that the given TaskRun's containers have the expected compute resources. func verifyTaskLevelComputeResources(expectedComputeResources []ExpectedComputeResources, containers []corev1.Container) error { if len(expectedComputeResources) != len(containers) { diff --git a/pkg/pod/status.go b/pkg/pod/status.go index 233bf65b749..42b37ef205c 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -29,6 +29,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -112,11 +113,15 @@ func SidecarsReady(podStatus corev1.PodStatus) bool { } // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. -func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, kubeclient kubernetes.Interface, ts *v1beta1.TaskSpec) (v1beta1.TaskRunStatus, error) { +func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, kubeclient kubernetes.Interface, ts *v1beta1.TaskSpec, spireEnabled bool, spireAPI spire.ControllerAPIClient) (v1beta1.TaskRunStatus, error) { trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers) @@ -126,7 +131,7 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta if complete { updateCompletedTaskRunStatus(logger, trs, pod) } else { - updateIncompleteTaskRunStatus(trs, pod) + updateIncompleteTaskRunStatus(trs, pod, spireEnabled) } trs.PodName = pod.Name @@ -144,7 +149,7 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta } var merr *multierror.Error - if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, pod.Status.Phase, kubeclient, ts); err != nil { + if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, pod.Status.Phase, kubeclient, ts, spireEnabled, spireAPI); err != nil { merr = multierror.Append(merr, err) } @@ -155,7 +160,28 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta return *trs, merr.ErrorOrNil() } -func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun, podPhase corev1.PodPhase, kubeclient kubernetes.Interface, ts *v1beta1.TaskSpec) *multierror.Error { +func setTaskRunStatusBasedOnSpireVerification(ctx context.Context, logger *zap.SugaredLogger, tr *v1beta1.TaskRun, trs *v1beta1.TaskRunStatus, + filteredResults []v1beta1.PipelineResourceResult, spireAPI spire.ControllerAPIClient) { + if tr.IsSuccessful() && spireAPI != nil && + ((tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Results) >= 1) || len(filteredResults) >= 1) { + logger.Info("validating signed results with spire: ", trs.TaskRunResults) + if err := spireAPI.VerifyTaskRunResults(ctx, filteredResults, tr); err != nil { + logger.Errorf("failed to verify signed results with spire: %w", err) + markStatusSignedResultsFailure(trs, err.Error()) + } else { + logger.Info("successfully validated signed results with spire") + markStatusSignedResultsVerified(trs) + } + } + + // If no results and no results requested, set verified unless results were specified as part of task spec + if len(filteredResults) == 0 && (tr.Status.TaskSpec == nil || len(tr.Status.TaskSpec.Results) == 0) { + markStatusSignedResultsVerified(trs) + } +} + +func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun, podPhase corev1.PodPhase, kubeclient kubernetes.Interface, + ts *v1beta1.TaskSpec, spireEnabled bool, spireAPI spire.ControllerAPIClient) *multierror.Error { trs := &tr.Status var merr *multierror.Error @@ -178,7 +204,7 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL } // populate task run CRD with results from sidecar logs - taskResults, pipelineResourceResults, _ := filterResultsAndResources(sidecarLogResults, specResults) + taskResults, pipelineResourceResults, _ := filterResultsAndResources(sidecarLogResults, specResults, spireEnabled) if tr.IsSuccessful() { trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) @@ -205,10 +231,13 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL merr = multierror.Append(merr, err) } - taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results, specResults) + taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results, specResults, spireEnabled) if tr.IsSuccessful() { trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) + if spireEnabled { + setTaskRunStatusBasedOnSpireVerification(ctx, logger, tr, trs, filteredResults, spireAPI) + } } msg, err = createMessageFromResults(filteredResults) if err != nil { @@ -258,7 +287,7 @@ func createMessageFromResults(results []v1beta1.PipelineResourceResult) (string, return string(bytes), nil } -func filterResultsAndResources(results []v1beta1.PipelineResourceResult, specResults []v1beta1.TaskResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { +func filterResultsAndResources(results []v1beta1.PipelineResourceResult, specResults []v1beta1.TaskResult, spireEnabled bool) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { var taskResults []v1beta1.TaskRunResult var pipelineResourceResults []v1beta1.PipelineResourceResult var filteredResults []v1beta1.PipelineResourceResult @@ -282,6 +311,12 @@ func filterResultsAndResources(results []v1beta1.PipelineResourceResult, specRes if err != nil { continue } + if spireEnabled { + if r.Key == spire.KeySVID || r.Key == spire.KeyResultManifest || strings.HasSuffix(r.Key, spire.KeySignatureSuffix) { + filteredResults = append(filteredResults, r) + continue + } + } taskRunResult = v1beta1.TaskRunResult{ Name: r.Key, Type: v1beta1.ResultsType(v.Type), @@ -362,10 +397,13 @@ func updateCompletedTaskRunStatus(logger *zap.SugaredLogger, trs *v1beta1.TaskRu trs.CompletionTime = &metav1.Time{Time: time.Now()} } -func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { +func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod, spireEnabled bool) { switch pod.Status.Phase { case corev1.PodRunning: markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + if spireEnabled { + markStatusSignedResultsRunning(trs) + } case corev1.PodPending: switch { case IsPodExceedingNodeResources(pod): @@ -376,6 +414,9 @@ func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) markStatusRunning(trs, ReasonPullImageFailed, getWaitingMessage(pod)) default: markStatusRunning(trs, ReasonPending, getWaitingMessage(pod)) + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } } } @@ -592,6 +633,36 @@ func markStatusSuccess(trs *v1beta1.TaskRunStatus) { }) } +// markStatusResultsVerified sets taskrun status to verified +func markStatusSignedResultsVerified(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) +} + +// markStatusFailure sets taskrun status to failure with specified reason +func markStatusSignedResultsFailure(trs *v1beta1.TaskRunStatus, message string) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: message, + }) +} + +// markStatusRunning sets taskrun status to running +func markStatusSignedResultsRunning(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) +} + // sortPodContainerStatuses reorders a pod's container statuses so that // they're in the same order as the step containers from the TaskSpec. func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) { diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index 78065ac6e14..d28470b9caa 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -18,7 +18,9 @@ package pod import ( "context" + "encoding/json" "fmt" + "sort" "strings" "testing" "time" @@ -29,6 +31,8 @@ import ( "github.com/tektoncd/pipeline/internal/sidecarlogresults" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -88,7 +92,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { logger, _ := logging.NewLogger("", "status") kubeclient := fakek8s.NewSimpleClientset() - merr := setTaskRunStatusBasedOnStepStatus(context.Background(), logger, c.ContainerStatuses, &tr, corev1.PodRunning, kubeclient, &v1beta1.TaskSpec{}) + merr := setTaskRunStatusBasedOnStepStatus(context.Background(), logger, c.ContainerStatuses, &tr, corev1.PodRunning, kubeclient, &v1beta1.TaskSpec{}, false, nil) if merr != nil { t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr) } @@ -163,7 +167,7 @@ func TestSetTaskRunStatusBasedOnStepStatus_sidecar_logs(t *testing.T) { }) var wantErr *multierror.Error wantErr = multierror.Append(wantErr, c.wantErr) - merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, []corev1.ContainerStatus{{}}, &tr, pod.Status.Phase, kubeclient, &v1beta1.TaskSpec{}) + merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, []corev1.ContainerStatus{{}}, &tr, pod.Status.Phase, kubeclient, &v1beta1.TaskSpec{}, false, nil) if d := cmp.Diff(wantErr.Error(), merr.Error()); d != "" { t.Errorf("Got unexpected error %s", diff.PrintWantGot(d)) @@ -172,6 +176,395 @@ func TestSetTaskRunStatusBasedOnStepStatus_sidecar_logs(t *testing.T) { } } +func TestMakeTaskRunStatusVerify(t *testing.T) { + sc := &spire.MockClient{} + processConditions := cmp.Transformer("sortConditionsAndFilterMessages", func(in []apis.Condition) []apis.Condition { + for i := range in { + in[i].Message = "" + } + sort.Slice(in, func(i, j int) bool { + return in[i].Type < in[j].Type + }) + return in + }) + + terminationMessageTrans := cmp.Transformer("sortAndPrint", func(in *corev1.ContainerStateTerminated) *corev1.ContainerStateTerminated { + prs, err := termination.ParseMessage(nil, in.Message) + if err != nil { + return in + } + sort.Slice(prs, func(i, j int) bool { + return prs[i].Key < prs[j].Key + }) + + b, _ := json.Marshal(prs) + in.Message = string(b) + + return in + }) + + // test awaiting results - OK + // results + test signed termination message - OK + // results + test unsigned termination message - OK + + // no task results, no result + test signed termiantion message + // no task results, no result + test unsigned termiantion message + // force task result, no result + test unsigned termiantion message + + statusSRVUnknown := func() duckv1.Status { + status := statusRunning() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) + return status + } + + statusSRVVerified := func() duckv1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) + return status + } + + statusSRVUnverified := func() duckv1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: "", + }) + return status + } + + for _, c := range []struct { + desc string + specifyTaskRunResult bool + resultOut []v1beta1.PipelineResourceResult + podStatus corev1.PodStatus + pod corev1.Pod + want v1beta1.TaskRunStatus + }{{ + // test awaiting results + desc: "running pod awaiting results", + podStatus: corev1.PodStatus{}, + + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnknown(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{}, + Sidecars: []v1beta1.SidecarState{}, + }, + }, + }, { + desc: "test result with pipeline result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:1234", + ResourceName: "source-image", + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewStructuredValues("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with pipeline result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "resultValue", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewStructuredValues("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test array result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "[\"hello\",\"world\"]", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeArray, + Value: *v1beta1.NewStructuredValues("hello", "world"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{}, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result (with task run result defined) with no result without signed termination message", + specifyTaskRunResult: true, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + now := metav1.Now() + ctx := context.Background() + if cmp.Diff(c.pod, corev1.Pod{}) == "" { + c.pod = corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + CreationTimestamp: now, + }, + Status: c.podStatus, + } + } + + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) + tr := v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: startTime}, + }, + }, + } + if c.specifyTaskRunResult { + // Specify result + tr.Status.TaskSpec = &v1beta1.TaskSpec{ + Results: []v1beta1.TaskResult{{ + Name: "some-task-result", + }}, + } + + c.want.TaskSpec = tr.Status.TaskSpec + } + + if err := sc.CreateEntries(ctx, &tr, &c.pod, 10000); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + + if c.resultOut != nil { + id := sc.GetIdentity(&tr) + for i := 0; i < 20; i++ { + sc.SignIdentities = append(sc.SignIdentities, id) + } + sigs, err := sc.Sign(ctx, c.resultOut) + if err != nil { + t.Fatalf("failed to sign: %v", err) + } + c.resultOut = append(c.resultOut, sigs...) + s, err := createMessageFromResults(c.resultOut) + if err != nil { + t.Fatalf("failed to create message from result: %v", err) + } + + c.podStatus.ContainerStatuses[0].State.Terminated.Message = s + c.want.TaskRunStatusFields.Steps[0].ContainerState.Terminated.Message = s + } + + logger, _ := logging.NewLogger("", "status") + kubeclient := fakek8s.NewSimpleClientset() + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, kubeclient, &v1beta1.TaskSpec{}, true, sc) + if err != nil { + t.Errorf("MakeTaskRunResult: %s", err) + } + + // Common traits, set for test case brevity. + c.want.PodName = "pod" + c.want.StartTime = &metav1.Time{Time: startTime} + + ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { + if x == nil { + return y == nil + } + return y != nil + }) + if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil, processConditions, terminationMessageTrans); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + if tr.Status.StartTime.Time != c.want.StartTime.Time { + t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) + } + + if err := sc.DeleteEntry(ctx, &tr, &c.pod); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + }) + } +} + func TestMakeTaskRunStatus(t *testing.T) { for _, c := range []struct { desc string @@ -1232,7 +1625,7 @@ func TestMakeTaskRunStatus(t *testing.T) { } logger, _ := logging.NewLogger("", "status") kubeclient := fakek8s.NewSimpleClientset() - got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, kubeclient, &v1beta1.TaskSpec{}) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, kubeclient, &v1beta1.TaskSpec{}, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1530,7 +1923,7 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) { } logger, _ := logging.NewLogger("", "status") kubeclient := fakek8s.NewSimpleClientset() - got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, kubeclient, &c.taskSpec) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, kubeclient, &c.taskSpec, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1650,7 +2043,7 @@ func TestMakeRunStatusJSONError(t *testing.T) { logger, _ := logging.NewLogger("", "status") kubeclient := fakek8s.NewSimpleClientset() - gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod, kubeclient, &v1beta1.TaskSpec{}) + gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod, kubeclient, &v1beta1.TaskSpec{}, false, nil) if err == nil { t.Error("Expected error, got nil") } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 040974f53e9..1d74960012c 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -32,6 +32,7 @@ import ( cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" resolution "github.com/tektoncd/pipeline/pkg/resolution/resource" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" "go.opentelemetry.io/otel/trace" "k8s.io/client-go/tools/cache" @@ -55,7 +56,8 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock, tracerProvi limitrangeInformer := limitrangeinformer.Get(ctx) verificationpolicyInformer := verificationpolicyinformer.Get(ctx) resolutionInformer := resolutioninformer.Get(ctx) - configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger)) + spireControllerAPI := spire.GetControllerAPIClient(ctx) + configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger), spire.OnStore(ctx, logger)) configStore.WatchConfigs(cmw) entrypointCache, err := pod.NewEntrypointCache(kubeclientset) @@ -67,6 +69,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock, tracerProvi KubeClientSet: kubeclientset, PipelineClientSet: pipelineclientset, Images: opts.Images, + SpireClient: spireControllerAPI, Clock: clock, taskRunLister: taskRunInformer.Lister(), limitrangeLister: limitrangeInformer.Lister(), diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index b1117abbdae..13ced345bbb 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -46,6 +46,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/remote" resolution "github.com/tektoncd/pipeline/pkg/resolution/resource" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" _ "github.com/tektoncd/pipeline/pkg/taskrunmetrics/fake" // Make sure the taskrunmetrics are setup "github.com/tektoncd/pipeline/pkg/trustedresources" @@ -76,6 +77,7 @@ type Reconciler struct { KubeClientSet kubernetes.Interface PipelineClientSet clientset.Interface Images pipeline.Images + SpireClient spire.ControllerAPIClient Clock clock.PassiveClock // listers index properties about resources @@ -104,6 +106,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg ctx = cloudevent.ToContext(ctx, c.cloudEventClient) ctx = initTracing(ctx, c.tracerProvider, tr) ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "TaskRun:ReconcileKind") + spireEnabled := config.IsSpireEnabled(ctx) defer span.End() span.SetAttributes(attribute.String("taskrun", tr.Name), attribute.String("namespace", tr.Namespace)) @@ -134,6 +137,23 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // on the event to perform user facing initialisations, such has reset a CI check status afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded) events.Emit(ctx, nil, afterCondition, tr) + } else if spireEnabled { + // Verify that the TaskRun Status has not been modified. + // The TaskRun status is signed only after the first reconcile. + // As such skip the verification for the first reconcile. + var verified = false + if c.SpireClient != nil { + if err := c.SpireClient.VerifyStatusInternalAnnotation(ctx, tr, logger); err == nil { + verified = true + } + if !verified { + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[spire.VerifiedAnnotation] = "no" + } + logger.Infof("taskrun verification status: %t with hash %v \n", verified, tr.Status.Annotations[spire.TaskRunStatusHashAnnotation]) + } } // If the TaskRun is complete, run some post run fixtures when applicable @@ -305,13 +325,31 @@ func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, tr *v1 // Send k8s events and cloud events (when configured) events.Emit(ctx, beforeCondition, afterCondition, tr) - _, err := c.updateLabelsAndAnnotations(ctx, tr) + var err error + spireEnabled := config.IsSpireEnabled(ctx) + // Add status internal annotations hash only if it was verified + if spireEnabled && c.SpireClient != nil && c.SpireClient.CheckSpireVerifiedFlag(tr) { + if err := spire.CheckStatusInternalAnnotation(tr); err != nil { + err = c.SpireClient.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + logger.Warn("Failed to sign TaskRun internal status hash", zap.Error(err)) + events.EmitError(controller.GetEventRecorder(ctx), err, tr) + } else { + logger.Infof("Successfully signed TaskRun internal status with hash: %v", + tr.Status.Annotations[spire.TaskRunStatusHashAnnotation]) + } + } + } + + merr := multierror.Append(previousError, err).ErrorOrNil() + + _, err = c.updateLabelsAndAnnotations(ctx, tr) if err != nil { logger.Warn("Failed to update TaskRun labels/annotations", zap.Error(err)) events.EmitError(controller.GetEventRecorder(ctx), err, tr) } - merr := multierror.Append(previousError, err).ErrorOrNil() + merr = multierror.Append(merr, err).ErrorOrNil() if controller.IsPermanentError(previousError) { return controller.NewPermanentError(merr) } @@ -450,6 +488,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re // Get the TaskRun's Pod if it should have one. Otherwise, create the Pod. var pod *corev1.Pod + spireEnabled := config.IsSpireEnabled(ctx) if tr.Status.PodName != "" { pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName) @@ -525,6 +564,16 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } if podconvert.SidecarsReady(pod.Status) { + if spireEnabled { + // TTL for the entry is in seconds + ttl := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute + if err = c.SpireClient.CreateEntries(ctx, tr, pod, ttl); err != nil { + logger.Errorf("Failed to create workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Created SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil { return err } @@ -534,7 +583,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } // Convert the Pod's status to the equivalent TaskRun Status. - tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, c.KubeClientSet, rtr.TaskSpec) + tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, c.KubeClientSet, rtr.TaskSpec, spireEnabled, c.SpireClient) if err != nil { return err } @@ -544,6 +593,14 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re return err } + if spireEnabled && tr.IsDone() { + if err := c.SpireClient.DeleteEntry(ctx, tr, pod); err != nil { + logger.Infof("Failed to remove workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Deleted SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, tr.Status.GetCondition(apis.ConditionSucceeded)) return nil } diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 07cee342fb4..f4b285d02ab 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -45,6 +45,7 @@ import ( ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/trustedresources" "github.com/tektoncd/pipeline/pkg/workspace" "github.com/tektoncd/pipeline/test" @@ -65,6 +66,7 @@ import ( "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" cminformer "knative.dev/pkg/configmap/informer" "knative.dev/pkg/controller" "knative.dev/pkg/kmeta" @@ -293,6 +295,7 @@ var ( EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } + mockSpire = &spire.MockClient{} ) const fakeVersion string = "unknown" @@ -356,10 +359,12 @@ func initializeTaskRunControllerAssets(t *testing.T, d test.Data, opts pipeline. t.Helper() ctx, _ := ttesting.SetupFakeContext(t) ctx = ttesting.SetupFakeCloudClientContext(ctx, d.ExpectedCloudEventCount) + ctx = spire.InjectClient(ctx, mockSpire) ctx, cancel := context.WithCancel(ctx) test.EnsureConfigurationConfigMapsExist(&d) c, informers := test.SeedTestData(t, ctx, d) configMapWatcher := cminformer.NewInformedWatcher(c.Kube, system.Namespace()) + ctl := NewController(&opts, testClock, trace.NewNoopTracerProvider())(ctx, configMapWatcher) if err := configMapWatcher.Start(ctx.Done()); err != nil { t.Fatalf("error starting configmap watcher: %v", err) @@ -426,7 +431,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -434,7 +439,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { saName := tc.taskRun.Spec.ServiceAccountName @@ -757,7 +762,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -769,7 +774,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "params", taskRun: taskRunSubstitution, @@ -805,7 +810,7 @@ spec: cmd: "/mycmd", args: []string{"--my-other-arg=https://foo.git"}, }, - }), + }, false), }, { name: "taskrun-with-taskspec", taskRun: taskRunWithTaskSpec, @@ -819,7 +824,7 @@ spec: image: "myimage", cmd: "/mycmd", }, - }), + }, false), }, { name: "success-with-cluster-task", taskRun: taskRunWithClusterTask, @@ -831,7 +836,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-pod", taskRun: taskRunWithPod, @@ -843,7 +848,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-credentials-variable-default-tekton-creds", taskRun: taskRunWithCredentialsVariable, @@ -855,7 +860,7 @@ spec: name: "mycontainer", image: "myimage", cmd: "/mycmd /tekton/creds", - }}), + }}, false), }, { name: "remote-task", taskRun: taskRunBundle, @@ -867,7 +872,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -929,6 +934,7 @@ spec: func TestAlphaReconcile(t *testing.T) { names.TestingSeed() + readonly := true taskRunWithOutputConfig := parse.MustParseV1beta1TaskRun(t, ` metadata: name: test-taskrun-with-output-config @@ -971,14 +977,13 @@ spec: cms := []*corev1.ConfigMap{{ ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, Data: map[string]string{ - "enable-api-fields": config.AlphaAPIFields, + "enable-api-fields": config.AlphaAPIFields, + "enforce-nonfalsifiability": config.EnforceNonfalsifiabilityWithSpire, }, }} d := test.Data{ - ConfigMaps: cms, - TaskRuns: taskruns, - Tasks: []*v1beta1.Task{simpleTask, saTask, templatedTask}, - ClusterTasks: []*v1beta1.ClusterTask{clustertask}, + ConfigMaps: cms, + TaskRuns: taskruns, } for _, tc := range []struct { name string @@ -992,12 +997,30 @@ spec: "Normal Started ", "Normal Running Not all Steps", }, - wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{ - name: "mycontainer", - image: "myimage", - stdoutPath: "stdout.txt", - cmd: "/mycmd", - }}), + wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, + []corev1.Volume{ + { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }}, []stepForExpectedPod{{ + name: "mycontainer", + image: "myimage", + stdoutPath: "stdout.txt", + cmd: "/mycmd", + }}, true), + []corev1.VolumeMount{ + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }, + }, + ), }, { name: "taskrun-with-output-config-ws", taskRun: taskRunWithOutputConfigAndWorkspace, @@ -1006,22 +1029,40 @@ spec: "Normal Running Not all Steps", }, wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "foo", config.DefaultServiceAccountValue, false, - []corev1.Volume{{ - Name: "ws-9l9zj", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + []corev1.Volume{ + { + Name: "ws-9l9zj", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, }, - }}, + }, []stepForExpectedPod{{ name: "mycontainer", image: "myimage", stdoutPath: "stdout.txt", cmd: "/mycmd", - }}), - []corev1.VolumeMount{{ - Name: "ws-9l9zj", - MountPath: "/workspace/data", - }}), + }}, true), + []corev1.VolumeMount{ + { + Name: "ws-9l9zj", + MountPath: "/workspace/data", + }, + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }, + }, + ), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -1082,12 +1123,186 @@ spec: } func addVolumeMounts(p *corev1.Pod, vms []corev1.VolumeMount) *corev1.Pod { - for i, vm := range vms { - p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vm) + for i := range p.Spec.Containers { + p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vms...) } return p } +func TestSpireFlowReconcile(t *testing.T) { + names.TestingSeed() + readonly := true + taskRunWithOutputConfig := parse.MustParseV1beta1TaskRun(t, ` +metadata: + name: test-taskrun-with-output-config + namespace: foo +spec: + taskSpec: + steps: + - command: + - /mycmd + image: myimage + name: mycontainer + stdoutConfig: + path: stdout.txt +`) + taskruns := []*v1beta1.TaskRun{ + taskRunWithOutputConfig, + } + + cms := []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + "enforce-nonfalsifiability": config.EnforceNonfalsifiabilityWithSpire, + }, + }} + d := test.Data{ + ConfigMaps: cms, + TaskRuns: taskruns, + } + for _, tc := range []struct { + name string + taskRun *v1beta1.TaskRun + wantPod *corev1.Pod + wantEvents []string + }{{ + name: "taskrun-with-output-config", + taskRun: taskRunWithOutputConfig, + wantEvents: []string{ + "Normal Started ", + "Normal Running Not all Steps", + }, + wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, + []corev1.Volume{ + { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }}, []stepForExpectedPod{{ + name: "mycontainer", + image: "myimage", + stdoutPath: "stdout.txt", + cmd: "/mycmd", + }}, true), + []corev1.VolumeMount{ + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }, + }, + ), + }} { + t.Run(tc.name, func(t *testing.T) { + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + saName := tc.taskRun.Spec.ServiceAccountName + createServiceAccount(t, testAssets, saName, tc.taskRun.Namespace) + + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err == nil { + t.Error("Wanted a wrapped requeue error, but got nil.") + } else if ok, _ := controller.IsRequeueKey(err); !ok { + t.Errorf("expected no error. Got error %v", err) + } + + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("getting updated taskrun: %v", err) + } + + if err := spire.CheckStatusInternalAnnotation(tr); err != nil { + t.Fatalf("Error %s in checking internal status after first reconcile.", err) + } + + if tr.Status.PodName == "" { + t.Fatalf("Reconcile didn't set pod name") + } + pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(testAssets.Ctx, tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to fetch build pod: %v", err) + } + pod.Status = corev1.PodStatus{ + Phase: corev1.PodRunning, + } + if _, err := clients.Kube.CoreV1().Pods(tr.Namespace).UpdateStatus(testAssets.Ctx, pod, metav1.UpdateOptions{}); err != nil { + t.Errorf("Unexpected error while updating build: %v", err) + } + // Before calling Reconcile again, we need to ensure that the informer's + // lister cache is update to reflect the result of the previous Reconcile. + testAssets.Informers.TaskRun.Informer().GetIndexer().Add(tr) + + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr)); err == nil { + t.Error("Wanted a wrapped requeue error, but got nil.") + } else if ok, _ := controller.IsRequeueKey(err); !ok { + t.Fatalf("Unexpected error when Reconcile(): %v", err) + } + + tr, err = clients.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Get(testAssets.Ctx, tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tr.Name, err) + } + if err := spire.CheckStatusInternalAnnotation(tr); err != nil { + t.Fatalf("Error %s in checking internal status after second reconcile.", err) + } + + spireTaskRunEntryID := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) + if _, err := mockSpire.Entries[spireTaskRunEntryID]; !err { + t.Fatalf("%s expected to be added as a SPIFFE id.", spireTaskRunEntryID) + } + + pod.Status = corev1.PodStatus{ + Phase: corev1.PodSucceeded, + } + if _, err := clients.Kube.CoreV1().Pods(tr.Namespace).UpdateStatus(testAssets.Ctx, pod, metav1.UpdateOptions{}); err != nil { + t.Errorf("Unexpected error while updating build: %v", err) + } + testAssets.Informers.TaskRun.Informer().GetIndexer().Add(tr) + + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr)); err == nil { + t.Error("Wanted a wrapped requeue error, but got nil.") + } else if ok, _ := controller.IsRequeueKey(err); !ok { + t.Fatalf("Unexpected error when Reconcile(): %v", err) + } + + tr, err = clients.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Get(testAssets.Ctx, tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tr.Name, err) + } + if err := spire.CheckStatusInternalAnnotation(tr); err != nil { + t.Fatalf("Error %s in checking internal status after third reconcile.", err) + } + + if _, err := mockSpire.Entries[spireTaskRunEntryID]; err { + t.Fatalf("SPIFFE id %s expected to be deleted.", spireTaskRunEntryID) + } + + if d := cmp.Diff(tc.wantPod.ObjectMeta, pod.ObjectMeta, ignoreRandomPodNameSuffix); d != "" { + t.Errorf("Pod metadata doesn't match %s", diff.PrintWantGot(d)) + } + + pod.Name = tc.wantPod.Name // Ignore pod name differences, the pod name is generated and tested in pod_test.go + if d := cmp.Diff(tc.wantPod.Spec, pod.Spec, resourceQuantityCmp, volumeSort, volumeMountSort, ignoreEnvVarOrdering); d != "" { + t.Errorf("Pod spec doesn't match %s", diff.PrintWantGot(d)) + } + if d := cmp.Diff(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonSuccessful.String(), + Message: "All Steps have completed executing", + }, tr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" { + t.Errorf("Did not get expected condition %s", diff.PrintWantGot(d)) + } + }) + } +} + // TestReconcileWithResolver checks that a TaskRun with a populated Resolver // field creates a ResolutionRequest object for that Resolver's type, and // that when the request is successfully resolved the TaskRun begins running. @@ -4123,6 +4338,126 @@ status: } } +func TestReconcileOnTaskRunSign(t *testing.T) { + taskSt := &apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: "Build succeeded", + Message: "Build succeeded", + } + taskRunStartedUnsigned := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-started-unsigned", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{ + *taskSt, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: now.Add(-15 * time.Second)}, + }, + }, + } + taskRunUnstarted := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-unstarted", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{ + *taskSt, + }, + }, + }, + } + taskRunStartedSigned := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-started-signed", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{ + *taskSt, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: now.Add(-15 * time.Second)}, + }, + }, + } + if err := mockSpire.AppendStatusInternalAnnotation(context.Background(), taskRunStartedSigned); err != nil { + t.Fatal("failed to sign test taskrun") + } + + d := test.Data{ + ConfigMaps: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + "enforce-nonfalsifiability": config.EnforceNonfalsifiabilityWithSpire, + }, + }}, + + TaskRuns: []*v1beta1.TaskRun{ + taskRunStartedUnsigned, taskRunUnstarted, taskRunStartedSigned, + }, + Tasks: []*v1beta1.Task{simpleTask}, + } + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + + testCases := []struct { + name string + tr *v1beta1.TaskRun + verifiable bool + }{ + { + name: "sign/verify unstarted taskrun", + tr: taskRunUnstarted, + verifiable: true, + }, + { + name: "sign/verify signed started taskrun", + tr: taskRunStartedSigned, + verifiable: true, + }, + { + name: "sign/verify unsigned started taskrun should fail", + tr: taskRunStartedUnsigned, + verifiable: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.tr)); err != nil { + t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) + } + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.tr.Namespace).Get(testAssets.Ctx, tc.tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", tc.tr.Name, err) + } + verified := mockSpire.CheckSpireVerifiedFlag(newTr) + if verified != tc.verifiable { + t.Fatalf("expected verifiable: %v, got %v", tc.verifiable, verified) + } + }) + } +} + func Test_validateTaskSpecRequestResources_ValidResources(t *testing.T) { tcs := []struct { name string @@ -4335,7 +4670,7 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount { return mnts } -func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int) []string { +func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int, enableSpire bool) []string { args := []string{ "-wait_file", } @@ -4352,6 +4687,9 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs [] "-step_metadata_dir", fmt.Sprintf("/tekton/run/%d/status", idx), ) + if enableSpire { + args = append(args, "-enable_spire") + } if stdoutPath != "" { args = append(args, "-stdout_path", stdoutPath) } @@ -4413,11 +4751,24 @@ type stepForExpectedPod struct { stderrPath string } -func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod { +func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod, enableSpire bool) *corev1.Pod { stepNames := make([]string, 0, len(steps)) for _, s := range steps { stepNames = append(stepNames, fmt.Sprintf("step-%s", s.name)) } + + initContainers := []corev1.Container{placeToolsInitContainer(stepNames)} + if enableSpire { + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + } + p := &corev1.Pod{ ObjectMeta: podObjectMeta(podName, taskName, taskRunName, ns, isClusterTask), Spec: corev1.PodSpec{ @@ -4429,7 +4780,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas binVolume, downwardVolume, }, - InitContainers: []corev1.Container{placeToolsInitContainer(stepNames)}, + InitContainers: initContainers, RestartPolicy: corev1.RestartPolicyNever, ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, ServiceAccountName: saName, @@ -4450,7 +4801,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas VolumeMounts: podVolumeMounts(idx, len(steps)), TerminationMessagePath: "/tekton/termination", } - stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx) + stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx, enableSpire) for k, v := range s.envVars { stepContainer.Env = append(stepContainer.Env, corev1.EnvVar{ diff --git a/pkg/spire/controller.go b/pkg/spire/controller.go index 410c9c2ad63..dbf91ee9b22 100644 --- a/pkg/spire/controller.go +++ b/pkg/spire/controller.go @@ -27,8 +27,10 @@ import ( "github.com/spiffe/go-spiffe/v2/workloadapi" entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" spiffetypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -45,6 +47,22 @@ func init() { // controllerKey is a way to associate the ControllerAPIClient from inside the context.Context type controllerKey struct{} +// OnStore stores the changed spire config into the SpireClientApi +func OnStore(ctx context.Context, logger *zap.SugaredLogger) func(name string, value interface{}) { + return func(name string, value interface{}) { + if name == config.GetSpireConfigName() { + cfg, ok := value.(*spireconfig.SpireConfig) + if !ok { + logger.Error("Failed to do type assertion for extracting SPIRE config") + return + } + controllerAPIClient := GetControllerAPIClient(ctx) + controllerAPIClient.Close() + controllerAPIClient.SetConfig(*cfg) + } + } +} + // GetControllerAPIClient extracts the ControllerAPIClient from the context. func GetControllerAPIClient(ctx context.Context) ControllerAPIClient { untyped := ctx.Value(controllerKey{}) @@ -52,7 +70,7 @@ func GetControllerAPIClient(ctx context.Context) ControllerAPIClient { logging.FromContext(ctx).Errorf("Unable to fetch client from context.") return nil } - return untyped.(*spireControllerAPIClient) + return untyped.(ControllerAPIClient) } func withControllerClient(ctx context.Context, cfg *rest.Config) context.Context { @@ -297,18 +315,24 @@ func (sc *spireControllerAPIClient) Close() error { if err != nil { return err } + sc.serverConn = nil } if sc.workloadAPI != nil { err = sc.workloadAPI.Close() if err != nil { return err } + sc.workloadAPI = nil } if sc.workloadConn != nil { err = sc.workloadConn.Close() if err != nil { return err } + sc.workloadConn = nil } + sc.entryClient = nil return nil } + +var _ ControllerAPIClient = (*spireControllerAPIClient)(nil) diff --git a/pkg/spire/spire_mock.go b/pkg/spire/spire_mock.go index a5de75b3a80..bde778ea4e6 100644 --- a/pkg/spire/spire_mock.go +++ b/pkg/spire/spire_mock.go @@ -37,7 +37,12 @@ func init() { } func withFakeControllerClient(ctx context.Context, cfg *rest.Config) context.Context { - return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{}) + return context.WithValue(ctx, controllerKey{}, &MockClient{}) +} + +// InjectClient injects MockClient into the given context as SpireControllerApiClient. +func InjectClient(ctx context.Context, spireMock *MockClient) context.Context { + return context.WithValue(ctx, controllerKey{}, spireMock) } // MockClient is a client used for mocking the this package for unit testing @@ -300,3 +305,6 @@ func (*MockClient) Close() error { return nil } // SetConfig sets the spire configuration for MockClient func (*MockClient) SetConfig(spireconfig.SpireConfig) {} + +var _ ControllerAPIClient = (*MockClient)(nil) +var _ EntrypointerAPIClient = (*MockClient)(nil) diff --git a/pkg/spire/spire_test.go b/pkg/spire/spire_test.go index e109ac1d19f..98b780683a8 100644 --- a/pkg/spire/spire_test.go +++ b/pkg/spire/spire_test.go @@ -25,6 +25,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/go-spiffe/v2/svid/x509svid" + pconf "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/spire/config" @@ -665,6 +666,30 @@ func TestTaskRunResultsSignTamper(t *testing.T) { } } +func TestOnStore(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + logger := logging.FromContext(ctx) + ctx = context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{ + config: &config.SpireConfig{ + TrustDomain: "before_test_domain", + SocketPath: "before_test_socket_path", + ServerAddr: "before_test_server_path", + NodeAliasPrefix: "before_test_node_alias_prefix", + }, + }) + want := config.SpireConfig{ + TrustDomain: "after_test_domain", + SocketPath: "after_test_socket_path", + ServerAddr: "after_test_server_path", + NodeAliasPrefix: "after_test_node_alias_prefix", + } + OnStore(ctx, logger)(pconf.GetSpireConfigName(), &want) + got := *GetControllerAPIClient(ctx).(*spireControllerAPIClient).config + if got != want { + t.Fatalf("test TestOnStore expected %v but got %v", got, want) + } +} + func x509svids(ca *test.CA, ids ...spiffeid.ID) []*x509svid.SVID { svids := []*x509svid.SVID{} for _, id := range ids { diff --git a/pkg/spire/verify.go b/pkg/spire/verify.go index 98d8d3372f6..639c952a673 100644 --- a/pkg/spire/verify.go +++ b/pkg/spire/verify.go @@ -32,9 +32,10 @@ import ( "strings" "github.com/pkg/errors" + "go.uber.org/zap" + "github.com/spiffe/go-spiffe/v2/workloadapi" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "go.uber.org/zap" ) // VerifyTaskRunResults ensures that the TaskRun results are valid and have not been tampered with @@ -96,7 +97,7 @@ func (sc *spireControllerAPIClient) VerifyStatusInternalAnnotation(ctx context.C } if !sc.CheckSpireVerifiedFlag(tr) { - return errors.New("annotation tekton.dev/not-verified = yes failed spire verification") + return errors.New("annotation tekton.dev/verified = no failed spire verification") } annotations := tr.Status.Annotations diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 968dd31ec2c..d0936895174 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -48,6 +48,65 @@ function install_pipeline_crd_version() { verify_pipeline_installation } +function spire_apply() { + if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then + echo "spire_apply requires a spiffeID as the first arg" >&2 + exit 1 + fi + show=$(kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry show $1 $2) + if [ "$show" != "Found 0 entries" ]; then + # delete to recreate + entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:) + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry delete -entryID $entryid + fi + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create "$@" +} + +function install_spire() { + echo ">> Deploying Spire" + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + echo "Creating SPIRE namespace..." + kubectl create ns spire + + echo "Applying SPIFFE CSI Driver configuration..." + kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml + + echo "Deploying SPIRE server" + kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml + + echo "Deploying SPIRE agent" + kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml + + wait_until_pods_running spire || fail_test "SPIRE did not come up" + + spire_apply \ + -spiffeID spiffe://example.org/ns/spire/node/example \ + -selector k8s_psat:cluster:example-cluster \ + -selector k8s_psat:agent_ns:spire \ + -selector k8s_psat:agent_sa:spire-agent \ + -node + spire_apply \ + -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \ + -parentID spiffe://example.org/ns/spire/node/example \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller \ + -admin +} + +function patch_pipline_spire() { + kubectl patch \ + deployment tekton-pipelines-controller \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json + + verify_pipeline_installation +} + function verify_pipeline_installation() { # Make sure that everything is cleaned up in the current namespace. delete_pipeline_resources diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 66d39911000..d4d41c8ecc3 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -40,6 +40,21 @@ header "Setting up environment" install_pipeline_crd failed=0 +function add_spire() { + local gate="$1" + if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then + printf "Invalid gate %s\n" ${gate} + exit 255 + fi + if [ "$gate" == "alpha" ] ; then + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + printf "Setting up environment for alpha features" + install_spire + patch_pipline_spire + kubectl apply -n tekton-pipelines -f "$DIR"/testdata/spire/config-spire.yaml + failed=0 + fi +} function set_feature_gate() { local gate="$1" @@ -91,6 +106,7 @@ function run_e2e() { fi } +add_spire "$PIPELINE_FEATURE_GATE" set_feature_gate "$PIPELINE_FEATURE_GATE" set_result_extraction_method "$RESULTS_FROM" run_e2e diff --git a/test/featureflags.go b/test/featureflags.go index 099c50ebbbc..aecd983b2cf 100644 --- a/test/featureflags.go +++ b/test/featureflags.go @@ -13,6 +13,39 @@ import ( "knative.dev/pkg/system" ) +var spireFeatureGates = map[string]string{ + "enable-nonfalsifiability": "spire", + "enable-api-fields": "alpha", +} + +func isSpireEnabled(ctx context.Context, t *testing.T, c *clients, namespace string) bool { + t.Helper() + isSpireEnabled, _ := hasAllFeatureGates(ctx, t, spireFeatureGates, c, namespace) + return isSpireEnabled +} + +func hasAllFeatureGates(ctx context.Context, t *testing.T, gates map[string]string, c *clients, namespace string) (bool, string) { + t.Helper() + featureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) + } + pairs := []string{} + for name, value := range gates { + actual, ok := featureFlagsCM.Data[name] + if !ok || value != actual { + pairs = append(pairs, fmt.Sprintf("%q is %q, want %s", name, actual, value)) + } + } + if len(pairs) > 0 { + status := fmt.Sprintf( + "Some feature flags in namespace %q not matching %s\nExisting feature flag: %#v\n", + system.Namespace(), strings.Join(pairs, " and "), featureFlagsCM.Data) + return false, status + } + return true, "" +} + // requireAnyGate returns a setup func that will skip the current // test if none of the feature-flags in the given map match // what's in the feature-flags ConfigMap. It will fatally fail diff --git a/test/init_test.go b/test/init_test.go index ea21a337fb0..060d647ff74 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -31,7 +31,9 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/spire" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -324,3 +326,43 @@ func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) { return output, nil } + +// Verifies if the taskrun results should not be verified by spire +func spireShouldFailTaskRunResultsVerify(t *testing.T, tr *v1beta1.TaskRun) { + t.Helper() + if tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition should not be verified as taskrun failed", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as false, which is valid", tr.Name) +} + +// Verifies if the taskrun results are verified by spire +func spireShouldPassTaskRunResultsVerify(t *testing.T, tr *v1beta1.TaskRun) { + t.Helper() + if !tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition not verified. Spire taskrun results verification failure", tr.Name) + } else { + t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name) +} + +// Verifies if the taskrun status annotation does not contain "not-verified" +func spireShouldPassSpireAnnotation(t *testing.T, tr *v1beta1.TaskRun) { + t.Helper() + if _, notVerified := tr.Status.Annotations[spire.VerifiedAnnotation]; notVerified { + t.Errorf("Taskrun `%s` status not verified. Spire annotation tekton.dev/spire-verified = no. Failed spire verification", tr.Name) + } + t.Logf("Taskrun `%s` status spire annotation verified", tr.Name) +} + +// Verifies if the taskrun status annotation does contain "not-verified" +func spireShouldFailSpireAnnotation(t *testing.T, tr *v1beta1.TaskRun) { + t.Helper() + _, notVerified := tr.Status.Annotations[spire.VerifiedAnnotation] + _, hash := tr.Status.Annotations[spire.TaskRunStatusHashAnnotation] + if !notVerified && hash { + t.Errorf("Taskrun `%s` status should be not verified missing spire Annotation tekton.dev/not-verified = yes", tr.Name) + } + t.Logf("Taskrun `%s` status spire annotation not verified, which is valid", tr.Name) +} diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 3cf8762c3e9..dd35001f090 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -21,18 +21,24 @@ package test import ( "context" + "encoding/json" "fmt" "regexp" "strings" "testing" + jsonpatch "gomodules.xyz/jsonpatch/v2" + "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/pod" "github.com/tektoncd/pipeline/test/parse" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/system" knativetest "knative.dev/pkg/test" "knative.dev/pkg/test/helpers" ) @@ -92,6 +98,11 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if isSpireEnabled(ctx, t, c, namespace) { + spireShouldFailTaskRunResultsVerify(t, taskrun) + spireShouldPassSpireAnnotation(t, taskrun) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -184,6 +195,11 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if isSpireEnabled(ctx, t, c, namespace) { + spireShouldPassTaskRunResultsVerify(t, taskrun) + spireShouldPassSpireAnnotation(t, taskrun) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -209,3 +225,218 @@ spec: t.Fatalf("-got, +want: %v", d) } } + +// TestTaskRunModificationSpire is an exclusive test for SPIRE integration into taskrun. +// The test starts a taskrun which has a sleep. While the taskrun is "sleep"ing, +// the text modifies the taskrun results. +// This change is caught by the taskrun reconciler when it tries to verify the results. +func TestTaskRunModificationSpire(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var c *clients + var namespace string + var oldConfigMap map[string]string + var configSpire = map[string]string{ + "results-from": "termination-message", + "enforce-nonfalsifiability": "spire", + } + + c, namespace, oldConfigMap = setupWithFlags(ctx, t, configSpire, requireAllGates(spireFeatureGates)) + defer resetFlags(ctx, t, c, oldConfigMap) + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + taskRunName := "non-falsifiable-provenance-fail" + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: non-falsifiable + namespace: %s +spec: + results: + - name: foo + - name: bar + steps: + - image: ubuntu + script: | + #!/usr/bin/env bash + sleep 20 + printf "hello" > "$(results.foo.path)" + printf "world" > "$(results.bar.path)" +`, namespace)) + if _, err := c.V1beta1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := parse.MustParseV1beta1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: non-falsifiable +`, taskRunName, namespace)) + if _, err := c.V1beta1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to be in running state", namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, Running(taskRunName), "TaskRunRunning", v1beta1Version); err != nil { + t.Errorf("Error waiting for TaskRun to start running: %s", err) + } + + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/status/taskSpec/steps/0/image", + Value: "not-ubuntu", + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + t.Fatalf("failed to marshal patch bytes in order to stop") + } + t.Logf("Patching TaskRun %s in namespace %s mid run for spire to catch the un-authorized changed", taskRunName, namespace) + if _, err := c.V1beta1TaskRunClient.Patch(ctx, taskRunName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil { + t.Fatalf("Failed to patch taskrun `%s`: %s", taskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to succeed", taskRunName, namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed", v1beta1Version); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.V1beta1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + if isSpireEnabled(ctx, t, c, namespace) { + spireShouldFailTaskRunResultsVerify(t, taskrun) + spireShouldFailSpireAnnotation(t, taskrun) + } + + expectedStepState := []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }} + + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } +} + +func TestTaskRunSpire(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var c *clients + var namespace string + var oldConfigMap map[string]string + var configSpire = map[string]string{ + "results-from": "termination-message", + "enforce-nonfalsifiability": "spire", + } + + c, namespace, oldConfigMap = setupWithFlags(ctx, t, configSpire, requireAllGates(spireFeatureGates)) + defer resetFlags(ctx, t, c, oldConfigMap) + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + taskRunName := "non-falsifiable-provenance-pass" + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: non-falsifiable + namespace: %s +spec: + results: + - name: foo + - name: bar + steps: + - image: ubuntu + script: | + #!/usr/bin/env bash + sleep 20 + printf "hello" > "$(results.foo.path)" + printf "world" > "$(results.bar.path)" +`, namespace)) + if _, err := c.V1beta1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := parse.MustParseV1beta1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: non-falsifiable +`, taskRunName, namespace)) + if _, err := c.V1beta1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to succeed", taskRunName, namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1beta1Version); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.V1beta1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + if isSpireEnabled(ctx, t, c, namespace) { + spireShouldPassTaskRunResultsVerify(t, taskrun) + spireShouldPassSpireAnnotation(t, taskrun) + } + + expectedStepState := []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }} + + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } +} + +func setupWithFlags(ctx context.Context, t *testing.T, configMapData map[string]string, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string, map[string]string) { + t.Helper() + c, ns := setup(ctx, t, fn...) + featureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } + return c, ns, featureFlagsCM.Data +} + +func resetFlags(ctx context.Context, t *testing.T, c *clients, configMapData map[string]string) { + t.Helper() + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } +} diff --git a/test/testdata/patch/pipeline-controller-spire.json b/test/testdata/patch/pipeline-controller-spire.json new file mode 100644 index 00000000000..6c08f20dfe9 --- /dev/null +++ b/test/testdata/patch/pipeline-controller-spire.json @@ -0,0 +1,56 @@ +{ + "spec":{ + "template":{ + "spec":{ + "$setElementOrder/containers":[ + { + "name":"tekton-pipelines-controller" + } + ], + "$setElementOrder/volumes":[ + { + "name":"config-logging" + }, + { + "name":"config-registry-cert" + }, + { + "name":"spiffe-workload-api" + } + ], + "containers":[ + { + "$setElementOrder/volumeMounts":[ + { + "mountPath":"/etc/config-logging" + }, + { + "mountPath":"/etc/config-registry-cert" + }, + { + "mountPath":"/spiffe-workload-api" + } + ], + "name":"tekton-pipelines-controller", + "volumeMounts":[ + { + "mountPath":"/spiffe-workload-api", + "name":"spiffe-workload-api", + "readOnly":true + } + ] + } + ], + "volumes":[ + { + "csi":{ + "driver":"csi.spiffe.io", + "readOnly":true + }, + "name":"spiffe-workload-api" + } + ] + } + } + } +} diff --git a/test/testdata/spire/config-spire.yaml b/test/testdata/spire/config-spire.yaml new file mode 100644 index 00000000000..30837a0e65d --- /dev/null +++ b/test/testdata/spire/config-spire.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-spire + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # spire-trust-domain specifies the SPIRE trust domain to use. + spire-trust-domain: "example.org" + # spire-socket-path specifies the SPIRE agent socket for SPIFFE workload API. + spire-socket-path: "unix:///spiffe-workload-api/spire-agent.sock" + # spire-server-addr specifies the SPIRE server address for workload/node registration. + spire-server-addr: "spire-server.spire.svc.cluster.local:8081" + # spire-node-alias-prefix specifies the SPIRE node alias prefix to use. + spire-node-alias-prefix: "/tekton-node/" \ No newline at end of file diff --git a/test/testdata/spire/spiffe-csi-driver.yaml b/test/testdata/spire/spiffe-csi-driver.yaml new file mode 100644 index 00000000000..e9d07bc5683 --- /dev/null +++ b/test/testdata/spire/spiffe-csi-driver.yaml @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "csi.spiffe.io" +spec: + # Only ephemeral, inline volumes are supported. There is no need for a + # controller to provision and attach volumes. + attachRequired: false + + # Request the pod information which the CSI driver uses to verify that an + # ephemeral mount was requested. + podInfoOnMount: true + + # Don't change ownership on the contents of the mount since the Workload API + # Unix Domain Socket is typically open to all (i.e. 0777). + fsGroupPolicy: None + + # Declare support for ephemeral volumes only. + volumeLifecycleModes: + - Ephemeral diff --git a/test/testdata/spire/spire-agent.yaml b/test/testdata/spire/spire-agent.yaml new file mode 100644 index 00000000000..4e848a51388 --- /dev/null +++ b/test/testdata/spire/spire-agent.yaml @@ -0,0 +1,208 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/spire-agent.sock" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + skip_kubelet_verification = true + } + } + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + containers: + - name: spire-agent + image: ghcr.io/spiffe/spire-agent:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-token + mountPath: /var/run/secrets/tokens + - name: spire-agent-socket-dir + mountPath: /run/spire/sockets + # This is the container which runs the SPIFFE CSI driver. + - name: spiffe-csi-driver + image: ghcr.io/spiffe/spiffe-csi-driver:nightly + imagePullPolicy: IfNotPresent + args: [ + "-workload-api-socket-dir", "/spire-agent-socket", + "-csi-socket-path", "/spiffe-csi/csi.sock", + ] + env: + # The CSI driver needs a unique node ID. The node name can be + # used for this purpose. + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + # The volume containing the SPIRE agent socket. The SPIFFE CSI + # driver will mount this directory into containers. + - mountPath: /spire-agent-socket + name: spire-agent-socket-dir + readOnly: true + # The volume that will contain the CSI driver socket shared + # with the kubelet and the driver registrar. + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The volume containing mount points for containers. + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + securityContext: + privileged: true + # This container runs the CSI Node Driver Registrar which takes care + # of all the little details required to register a CSI driver with + # the kubelet. + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + imagePullPolicy: IfNotPresent + args: [ + "-csi-address", "/spiffe-csi/csi.sock", + "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock", + ] + volumeMounts: + # The registrar needs access to the SPIFFE CSI driver socket + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The registrar needs access to the Kubelet plugin registration + # directory + - name: kubelet-plugin-registration-dir + mountPath: /registration + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server + # This volume is used to share the Workload API socket between the CSI + # driver and SPIRE agent. Note, an emptyDir volume could also be used, + # however, this can lead to broken bind mounts in the workload + # containers if the agent pod is restarted (since the emptyDir + # directory on the node that was mounted into workload containers by + # the CSI driver belongs to the old pod instance and is no longer + # valid). + - name: spire-agent-socket-dir + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + # This volume is where the socket for kubelet->driver communication lives + - name: spiffe-csi-socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.spiffe.io + type: DirectoryOrCreate + # This volume is where the SPIFFE CSI driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + # This volume is where the node-driver-registrar registers the plugin + # with kubelet + - name: kubelet-plugin-registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory diff --git a/test/testdata/spire/spire-server.yaml b/test/testdata/spire/spire-server.yaml new file mode 100644 index 00000000000..ceec824613d --- /dev/null +++ b/test/testdata/spire/spire-server.yaml @@ -0,0 +1,211 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: ghcr.io/spiffe/spire-server:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server