From ae97acd3d2fb8c63deb4f7474fde499534632ed8 Mon Sep 17 00:00:00 2001
From: jagathprakash <31057312+jagathprakash@users.noreply.github.com>
Date: Thu, 3 Nov 2022 11:13:28 -0400
Subject: [PATCH] [TEP-0089] Enable SPIRE for signing taskrun results in alpha.
 Breaking down PR #4759 originally proposed by @pxp928 to address TEP-0089
 according @lumjjb suggestions. Plan for breaking down PR is PR 1.1: api PR
 1.2: entrypointer (+cmd line + test/entrypointer) Entrypoint takes results
 and signs the results (termination message). PR 1.3: reconciler + pod +
 cmd/controller + integration tests Controller will verify the signed result.
 This commit corresponds to 1.3 above.

---
 cmd/controller/main.go                        |   8 +
 cmd/imagedigestexporter/main.go               |  20 +
 config/config-feature-flags.yaml              |   4 +
 docs/spire.md                                 | 285 ++++++++++++
 pkg/apis/pipeline/options.go                  |   7 +-
 pkg/pod/pod.go                                |  39 +-
 pkg/pod/pod_test.go                           | 219 +++++++++-
 pkg/pod/status.go                             |  93 +++-
 pkg/pod/status_test.go                        | 404 +++++++++++++++++-
 pkg/reconciler/taskrun/controller.go          |   4 +
 .../taskrun/resources/image_exporter.go       |  18 +-
 .../taskrun/resources/image_exporter_test.go  | 170 +++++++-
 pkg/reconciler/taskrun/taskrun.go             |  30 +-
 pkg/reconciler/taskrun/taskrun_test.go        | 134 ++++--
 test/e2e-common.sh                            |  59 +++
 test/e2e-tests.sh                             |  14 +
 test/embed_test.go                            |  30 +-
 test/entrypoint_test.go                       |  31 +-
 test/helm_task_test.go                        |  31 +-
 test/hermetic_taskrun_test.go                 |  35 +-
 test/ignore_step_error_test.go                |  23 +-
 test/init_test.go                             |  21 +
 test/kaniko_task_test.go                      |  23 +-
 test/pipelinefinally_test.go                  | 139 +++++-
 test/pipelinerun_test.go                      |  83 +++-
 test/status_test.go                           |  41 +-
 test/taskrun_test.go                          | 155 ++++++-
 .../patch/pipeline-controller-spire.json      |  55 +++
 test/testdata/spire/spiffe-csi-driver.yaml    |  20 +
 test/testdata/spire/spire-agent.yaml          | 208 +++++++++
 test/testdata/spire/spire-server.yaml         | 211 +++++++++
 31 files changed, 2537 insertions(+), 77 deletions(-)
 create mode 100644 docs/spire.md
 create mode 100644 test/testdata/patch/pipeline-controller-spire.json
 create mode 100644 test/testdata/spire/spiffe-csi-driver.yaml
 create mode 100644 test/testdata/spire/spire-agent.yaml
 create mode 100644 test/testdata/spire/spire-server.yaml

diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 38b3100fd6b..46df9c2c1b2 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -62,12 +62,20 @@ func main() {
 	flag.StringVar(&opts.Images.ImageDigestExporterImage, "imagedigest-exporter-image", "", "The container image containing our image digest exporter binary.")
 	flag.StringVar(&opts.Images.WorkingDirInitImage, "workingdirinit-image", "", "The container image containing our working dir init binary.")
 
+	flag.StringVar(&opts.SpireConfig.TrustDomain, "spire-trust-domain", "example.org", "Experimental: The SPIRE Trust domain to use.")
+	flag.StringVar(&opts.SpireConfig.SocketPath, "spire-socket-path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.")
+	flag.StringVar(&opts.SpireConfig.ServerAddr, "spire-server-addr", "spire-server.spire.svc.cluster.local:8081", "Experimental: The SPIRE server address for workload/node registration.")
+	flag.StringVar(&opts.SpireConfig.NodeAliasPrefix, "spire-node-alias-prefix", "/tekton-node/", "Experimental: The SPIRE node alias prefix to use.")
+
 	// This parses flags.
 	cfg := injection.ParseAndGetRESTConfigOrDie()
 
 	if err := opts.Images.Validate(); err != nil {
 		log.Fatal(err)
 	}
+	if err := opts.SpireConfig.Validate(); err != nil {
+		log.Fatal(err)
+	}
 	if cfg.QPS == 0 {
 		cfg.QPS = 2 * rest.DefaultQPS
 	}
diff --git a/cmd/imagedigestexporter/main.go b/cmd/imagedigestexporter/main.go
index 33496dab427..95309c963a0 100644
--- a/cmd/imagedigestexporter/main.go
+++ b/cmd/imagedigestexporter/main.go
@@ -17,9 +17,12 @@ limitations under the License.
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"flag"
 
+	"github.com/tektoncd/pipeline/pkg/spire"
+	"github.com/tektoncd/pipeline/pkg/spire/config"
 	"github.com/tektoncd/pipeline/pkg/termination"
 	"knative.dev/pkg/logging"
 
@@ -31,6 +34,8 @@ import (
 var (
 	images                 = flag.String("images", "", "List of images resources built by task in json format")
 	terminationMessagePath = flag.String("terminationMessagePath", "/tekton/termination", "Location of file containing termination message")
+	enableSpire            = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification")
+	socketPath             = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.")
 )
 
 /* The input of this go program will be a JSON string with all the output PipelineResources of type
@@ -76,6 +81,21 @@ func main() {
 
 	}
 
+	if enableSpire != nil && *enableSpire && socketPath != nil && *socketPath != "" {
+		ctx := context.Background()
+		spireConfig := config.SpireConfig{
+			SocketPath: *socketPath,
+		}
+
+		spireWorkloadAPI := spire.NewEntrypointerAPIClient(&spireConfig)
+		signed, err := spireWorkloadAPI.Sign(ctx, output)
+		if err != nil {
+			logger.Fatal(err)
+		}
+
+		output = append(output, signed...)
+	}
+
 	if err := termination.WriteMessage(*terminationMessagePath, output); err != nil {
 		logger.Fatalf("Unexpected error writing message %s to %s", *terminationMessagePath, err)
 	}
diff --git a/config/config-feature-flags.yaml b/config/config-feature-flags.yaml
index 43b3b4c72e9..b29361d37f9 100644
--- a/config/config-feature-flags.yaml
+++ b/config/config-feature-flags.yaml
@@ -85,3 +85,7 @@ data:
   # will fail the taskrun/pipelinerun. "warn" will only log the err message and "skip"
   # will skip the whole verification
   resource-verification-mode: "skip"
+  # Setting this flag to "true" enables spire integration with pipeline.
+  # This is an experimental feature and thus should still be considered
+  # an alpha feature.
+  enable-spire: "false"
diff --git a/docs/spire.md b/docs/spire.md
new file mode 100644
index 00000000000..5bc4ab2a2f7
--- /dev/null
+++ b/docs/spire.md
@@ -0,0 +1,285 @@
+<!--
+---
+linkTitle: "TaskRun Result Attestation"
+weight: 1660
+---
+-->
+# TaskRun Result Attestations
+
+TaskRun result attestations is currently an alpha experimental feature. 
+
+The TaskRun result attestations feature provides the first part of non-falsifiable provenance to the build processes that run in the pipeline. They ensure that the results of the tekton pipeline executions originate from the build workloads themselves and that they have not been tampered with. The second part of non-falsifiable provenance is to ensure that no third party interfered with the build process. Using SPIRE, the TaskRun status is monitored for any activity or change not preformed by the Tekton Pipeline Controller. If a unauthorized change is detected, it will invalidate the TaskRun.
+
+When the TaskRun result attestations feature is enabled, all TaskRuns will produce a signature alongside its results, which can then be used to validate its provenance. For example, a TaskRun result that creates user-specified results `commit` and `url` would look like the following. `SVID`, `RESULT_MANIFEST`, `RESULT_MANIFEST.sig`, `commit.sig` and `url.sig` are generated attestations by the integration of SPIRE and Tekton Controller.
+
+Parsed, the fields would be:
+```
+...
+<truncated>
+...
+πŸ“ Results
+
+ NAME                    VALUE
+ βˆ™ RESULT_MANIFEST       commit,url,SVID,commit.sig,url.sig
+ βˆ™ RESULT_MANIFEST.sig   MEUCIQD55MMII9SEk/esQvwNLGC43y7efNGZ+7fsTdq+9vXYFAIgNoRW7cV9WKriZkcHETIaAKqfcZVJfsKbEmaDyohDSm4=
+ βˆ™ SVID                  -----BEGIN CERTIFICATE-----
+MIICGzCCAcGgAwIBAgIQH9VkLxKkYMidPIsofckRQTAKBggqhkjOPQQDAjAeMQsw
+CQYDVQQGEwJVUzEPMA0GA1UEChMGU1BJRkZFMB4XDTIyMDIxMTE2MzM1MFoXDTIy
+MDIxMTE3MzQwMFowHTELMAkGA1UEBhMCVVMxDjAMBgNVBAoTBVNQSVJFMFkwEwYH
+KoZIzj0CAQYIKoZIzj0DAQcDQgAEBRdg3LdxVAELeH+lq8wzdEJd4Gnt+m9G0Qhy
+NyWoPmFUaj9vPpvOyRgzxChYnW0xpcDWihJBkq/EbusPvQB8CKOB4TCB3jAOBgNV
+HQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1Ud
+EwEB/wQCMAAwHQYDVR0OBBYEFID7ARM5+vwzvnLPMO7Icfnj7l7hMB8GA1UdIwQY
+MBaAFES3IzpGDqgV3QcQNgX8b/MBwyAtMF8GA1UdEQRYMFaGVHNwaWZmZTovL2V4
+YW1wbGUub3JnL25zL2RlZmF1bHQvdGFza3J1bi9jYWNoZS1pbWFnZS1waXBlbGlu
+ZXJ1bi04ZHE5Yy1mZXRjaC1mcm9tLWdpdDAKBggqhkjOPQQDAgNIADBFAiEAi+LR
+JkrZn93PZPslaFmcrQw3rVcEa4xKmPleSvQaBoACIF1QB+q1uwH6cNvWdbLK9g+W
+T9Np18bK0xc6p5SuTM2C
+-----END CERTIFICATE-----
+ βˆ™ commit       aa79de59c4bae24e32f15fda467d02ae9cd94b01
+ βˆ™ commit.sig   MEQCIEJHk+8B+mCFozp0F52TQ1AadlhEo1lZNOiOnb/ht71aAiBCE0otKB1R0BktlPvweFPldfZfjG0F+NUSc2gPzhErzg==
+ βˆ™ url          https://github.com/buildpacks/samples
+ βˆ™ url.sig      MEUCIF0Fuxr6lv1MmkreqDKcPH3m+eXp+gY++VcxWgGCx7T1AiEA9U/tROrKuCGfKApLq2A9EModbdoGXyQXFOpAa0aMpOg=
+```
+
+However, the verification materials are removed from the final results as part of the TaskRun status. It is stored in the termination messages (more details below):
+
+```
+$ tkn tr describe cache-image-pipelinerun-8dq9c-fetch-from-git
+...
+<truncated>
+...
+πŸ“ Results
+ NAME                    VALUE
+ βˆ™ commit       aa79de59c4bae24e32f15fda467d02ae9cd94b01
+ βˆ™ url          https://github.com/buildpacks/samples
+```
+
+## Architecture Overview
+
+This feature relies on a SPIRE installation. This is how it integrates into the architecture of Tekton:
+
+```
+β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”  Register TaskRun Workload Identity           β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
+β”‚             β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚          β”‚
+β”‚  Tekton     β”‚                                               β”‚  SPIRE   β”‚
+β”‚  Controller │◄───────────┐                                  β”‚  Server  β”‚
+β”‚             β”‚            β”‚ Listen on TaskRun                β”‚          β”‚
+β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”˜            β”‚                                  β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
+ β–²           β”‚     β”Œβ”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”     β–²
+ β”‚           β”‚     β”‚           Tekton TaskRun              β”‚     β”‚
+ β”‚           β”‚     β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜     β”‚
+ β”‚  Configureβ”‚                                          β–²        β”‚ Attest
+ β”‚  Pod &    β”‚                                          β”‚        β”‚   +
+ β”‚  check    β”‚                                          β”‚        β”‚ Request
+ β”‚  ready    β”‚     β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”                        β”‚        β”‚ SVIDs
+ β”‚           └────►│  TaskRun  β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜        β”‚
+ β”‚                 β”‚  Pod      β”‚                                 β”‚
+ β”‚                 β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜     TaskRun Entrypointer        β”‚
+ β”‚                   β–²               Sign Result and update      β”‚
+ β”‚ Get               β”‚ Get SVID      TaskRun status with         β”‚
+ β”‚ SPIRE             β”‚               signature + cert            β”‚
+ β”‚ server            β”‚                                           β”‚
+ β”‚ Credentials       β”‚                                           β–Ό
+β”Œβ”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
+β”‚                                                                          β”‚
+β”‚   SPIRE Agent    ( Runs as   )                                           β”‚
+β”‚   + CSI Driver   ( Daemonset )                                           β”‚
+β”‚                                                                          β”‚
+β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
+```
+
+Initial Setup:
+1. As part of the SPIRE deployment, the SPIRE server attests the agents running on each node in the cluster.
+2. The Tekton Controller is configured to have workload identity entry creation permissions to the SPIRE server.
+3. As part of the Tekton Controller operations, the Tekton Controller will retrieve an identity that it can use to talk to the SPIRE server to register TaskRun workloads.
+
+When a TaskRun is created:
+1. The Tekton Controller creates a TaskRun pod and its associated resources
+1. When the TaskRun pod is ready, the Tekton Controller registers an identity with the information of the pod to the SPIRE server. This will tell the SPIRE server the identity of the TaskRun to use as well as how to attest the workload/pod.
+1. After the TaskRun steps complete, as part of the entrypointer code, it requests an SVID from SPIFFE workload API (via the SPIRE agent socket)
+1. The SPIRE agent will attest the workload and request an SVID.
+1. The entrypointer receives an x509 SVID, containing the x509 certificate and associated private key. 
+1. The entrypointer signs the results of the TaskRun and emits the signatures and x509 certificate to the TaskRun results for later verification.
+
+## Enabling TaskRun result attestations
+
+To enable TaskRun attestations:
+1. Make sure `enable-spire` is set to `"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior) for details
+1. Create a SPIRE deployment containing a SPIRE server, SPIRE agents and the SPIRE CSI driver, for convenience, [this sample single cluster deployment](https://github.com/spiffe/spiffe-csi/tree/main/example/config) can be used.
+1. Register the SPIRE workload entry for Tekton with the "Admin" flag, which will allow the Tekton controller to communicate with the SPIRE server to manage the TaskRun identities dynamically.
+    ```
+    # This example is assuming use of the above SPIRE deployment
+    # Example where trust domain is "example.org" and cluster name is "example-cluster"
+    
+    # Register a node alias for all nodes of which the Tekton Controller may reside
+    kubectl -n spire exec -it \
+        deployment/spire-server -- \
+        /opt/spire/bin/spire-server entry create \
+            -node \
+            -spiffeID spiffe://example.org/allnodes \
+            -selector k8s_psat:cluster:example-cluster
+    
+    # Register the tekton controller workload to have access to creating entries in the SPIRE server
+    kubectl -n spire exec -it \
+        deployment/spire-server -- \
+        /opt/spire/bin/spire-server entry create \
+            -admin \
+            -spiffeID spiffe://example.org/tekton/controller \
+            -parentID spiffe://example.org/allnode \
+            -selector k8s:ns:tekton-pipelines \
+            -selector k8s:pod-label:app:tekton-pipelines-controller \
+            -selector k8s:sa:tekton-pipelines-controller
+    
+    ```
+1. Modify the controller (`config/controller.yaml`) to provide access to the SPIRE agent socket.
+    ```yaml
+    # Add the following the volumeMounts of the "tekton-pipelines-controller" container
+    - name: spiffe-workload-api
+      mountPath: /spiffe-workload-api
+      readOnly: true
+    
+    # Add the following to the volumes of the controller pod
+    - name: spiffe-workload-api
+      csi:
+        driver: "csi.spiffe.io"
+    ```
+1. (Optional) Modify the controller (`config/controller.yaml`) to configure non-default SPIRE options by adding arguments to the CLI.
+    ```yaml
+          containers:
+          - name: tekton-pipelines-controller
+            image: ko://github.com/tektoncd/pipeline/cmd/controller
+            args: [
+              # These images are built on-demand by `ko resolve` and are replaced
+              # by image references by digest.
+              "-kubeconfig-writer-image", "ko://github.com/tektoncd/pipeline/cmd/kubeconfigwriter",
+              "-git-image", "ko://github.com/tektoncd/pipeline/cmd/git-init",
+              "-entrypoint-image", "ko://github.com/tektoncd/pipeline/cmd/entrypoint",
+              "-nop-image", "ko://github.com/tektoncd/pipeline/cmd/nop",
+              "-imagedigest-exporter-image", "ko://github.com/tektoncd/pipeline/cmd/imagedigestexporter",
+              "-pr-image", "ko://github.com/tektoncd/pipeline/cmd/pullrequest-init",
+              "-workingdirinit-image", "ko://github.com/tektoncd/pipeline/cmd/workingdirinit",
+    
+              # Configure optional SPIRE arguments
+    +         "-spire-trust-domain", "example.org",
+    +         "-spire-socket-path", "/spiffe-workload-api/spire-agent.sock",
+    +         "spire-server-addr", "spire-server.spire.svc.cluster.local:8081"
+    +         "spire-node-alias-prefix", "/tekton-node/",
+    
+              # This is gcr.io/google.com/cloudsdktool/cloud-sdk:302.0.0-slim
+              "-gsutil-image", "gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f",
+              # The shell image must be root in order to create directories and copy files to PVCs.
+              # gcr.io/distroless/base:debug as of October 21, 2021
+              # image shall not contains tag, so it will be supported on a runtime like cri-o
+              "-shell-image", "gcr.io/distroless/base@sha256:cfdc553400d41b47fd231b028403469811fcdbc0e69d66ea8030c5a0b5fbac2b",
+              # for script mode to work with windows we need a powershell image
+              # pinning to nanoserver tag as of July 15 2021
+              "-shell-image-win", "mcr.microsoft.com/powershell:nanoserver@sha256:b6d5ff841b78bdf2dfed7550000fd4f3437385b8fa686ec0f010be24777654d6",
+            ]
+    ```
+
+## Sample TaskRun attestation
+
+The following example shows how this feature works:
+
+```yaml
+kind: TaskRun
+apiVersion: tekton.dev/v1beta1
+metadata:
+  name: non-falsifiable-provenance
+spec:
+  timeout: 60s
+  taskSpec:
+    steps:
+    - name: non-falsifiable
+      image: ubuntu
+      script: |
+        #!/usr/bin/env bash
+        printf "%s" "hello" > "$(results.foo.path)"
+        printf "%s" "world" > "$(results.bar.path)"
+    results:
+    - name: foo
+    - name: bar
+```
+
+
+The termination message is:
+```
+message: '[{"key":"RESULT_MANIFEST","value":"foo,bar","type":1},{"key":"RESULT_MANIFEST.sig","value":"MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw==","type":1},{"key":"SVID","value":"-----BEGIN
+        CERTIFICATE-----\nMIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL\nMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y\nMjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG\nByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7\nHMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD\nVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV\nHRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME\nGDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l\neGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy\nb3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56\n4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA==\n-----END
+        CERTIFICATE-----\n","type":1},{"key":"bar","value":"world","type":1},{"key":"bar.sig","value":"MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M=","type":1},{"key":"foo","value":"hello","type":1},{"key":"foo.sig","value":"MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww==","type":1}]'
+```
+
+Parsed, the fields are:
+- `RESULT_MANIFEST`: List of results that should be present, to prevent pick and choose attacks
+- `RESULT_MANIFEST.sig`: The signature of the result manifest
+- `SVID`: The x509 certificate that will be used to verify the signature trust chain to the authority
+- `*.sig`: The signature of each individual result output
+```
+ βˆ™ RESULT_MANIFEST       foo,bar
+ βˆ™ RESULT_MANIFEST.sig   MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw==
+ βˆ™ SVID                  -----BEGIN CERTIFICATE-----
+MIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y
+MjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG
+ByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7
+HMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD
+VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV
+HRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME
+GDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l
+eGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy
+b3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56
+4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA==
+-----END CERTIFICATE-----
+ βˆ™ bar       world
+ βˆ™ bar.sig   MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M=
+ βˆ™ foo       hello
+ βˆ™ foo.sig   MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww==
+```
+
+
+However, the verification materials are removed from the results as part of the TaskRun status:
+```console
+$ tkn tr describe non-falsifiable-provenance
+Name:              non-falsifiable-provenance
+Namespace:         default
+Service Account:   default
+Timeout:           1m0s
+Labels:
+ app.kubernetes.io/managed-by=tekton-pipelines
+
+🌑️  Status
+
+STARTED          DURATION     STATUS
+38 seconds ago   36 seconds   Succeeded
+
+πŸ“ Results
+
+ NAME        VALUE
+ βˆ™ bar       world
+ βˆ™ foo       hello
+
+🦢 Steps
+
+ NAME                STATUS
+ βˆ™ non-falsifiable   Completed
+```
+
+## How is the result being verified
+
+The signatures are being verified by the Tekton controller, the process of verification is as follows:
+
+- Verifying the SVID
+  - Obtain the trust bundle from the SPIRE server
+  - Verify the SVID with the trust bundle
+  - Verify that the SVID spiffe ID is for the correct TaskRun
+- Verifying the result manifest
+  - Verify the content of `RESULT_MANIFEST` with the field `RESULT_MANIFEST.sig` with the SVID public key
+  - Verify that there is a corresponding field for all items listed in `RESULT_MANIFEST` (besides SVID and `*.sig` fields)
+- Verify individual result fields
+  - For each of the items in the results, verify its content against its associated `.sig` field
+
+
+## Further Details
+
+To learn more about SPIRE TaskRun attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md).
\ No newline at end of file
diff --git a/pkg/apis/pipeline/options.go b/pkg/apis/pipeline/options.go
index 2e75adca4c1..6c15c86f365 100644
--- a/pkg/apis/pipeline/options.go
+++ b/pkg/apis/pipeline/options.go
@@ -16,8 +16,13 @@ limitations under the License.
 
 package pipeline
 
+import (
+	spireconfig "github.com/tektoncd/pipeline/pkg/spire/config"
+)
+
 // Options holds options passed to the Tekton Pipeline controllers
 // typically via command-line flags.
 type Options struct {
-	Images Images
+	Images      Images
+	SpireConfig spireconfig.SpireConfig
 }
diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go
index 30bf9d8fa1c..fa489b0edd0 100644
--- a/pkg/pod/pod.go
+++ b/pkg/pod/pod.go
@@ -30,6 +30,7 @@ import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
 	"github.com/tektoncd/pipeline/pkg/internal/computeresources/tasklevel"
 	"github.com/tektoncd/pipeline/pkg/names"
+	"github.com/tektoncd/pipeline/pkg/spire"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
@@ -120,6 +121,12 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec
 	featureFlags := config.FromContextOrDefaults(ctx).FeatureFlags
 	alphaAPIEnabled := featureFlags.EnableAPIFields == config.AlphaAPIFields
 
+	// Entrypoint arg to enable or disable spire
+	var commonExtraEntrypointArgs []string
+	if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire {
+		commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire")
+	}
+
 	// Add our implicit volumes first, so they can be overridden by the user if they prefer.
 	volumes = append(volumes, implicitVolumes...)
 	volumeMounts = append(volumeMounts, implicitVolumeMounts...)
@@ -190,11 +197,13 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec
 	}
 
 	readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars)
+	// append credEntrypointArgs with entrypoint arg that contains if spire is enabled by configmap
+	commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...)
 
 	if alphaAPIEnabled {
-		stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately)
+		stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately)
 	} else {
-		stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately)
+		stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately)
 	}
 	if err != nil {
 		return nil, err
@@ -275,6 +284,32 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec
 		return nil, err
 	}
 
+	if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire {
+		volumes = append(volumes, corev1.Volume{
+			Name: spire.WorkloadAPI,
+			VolumeSource: corev1.VolumeSource{
+				CSI: &corev1.CSIVolumeSource{
+					Driver: "csi.spiffe.io",
+				},
+			},
+		})
+
+		for i := range stepContainers {
+			c := &stepContainers[i]
+			c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+				Name:      spire.WorkloadAPI,
+				MountPath: spire.VolumeMountPath,
+			})
+		}
+		for i := range initContainers {
+			c := &initContainers[i]
+			c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+				Name:      spire.WorkloadAPI,
+				MountPath: spire.VolumeMountPath,
+			})
+		}
+	}
+
 	mergedPodContainers := stepContainers
 
 	// Merge sidecar containers with step containers.
diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go
index ee9bcc31e71..790175f54ff 100644
--- a/pkg/pod/pod_test.go
+++ b/pkg/pod/pod_test.go
@@ -35,6 +35,7 @@ import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+	"github.com/tektoncd/pipeline/pkg/spire"
 	"github.com/tektoncd/pipeline/test/diff"
 	"github.com/tektoncd/pipeline/test/names"
 	corev1 "k8s.io/api/core/v1"
@@ -87,6 +88,15 @@ func TestPodBuild(t *testing.T) {
 	priorityClassName := "system-cluster-critical"
 	taskRunName := "taskrun-name"
 
+	initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}
+	for i := range initContainers {
+		c := &initContainers[i]
+		c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+			Name:      spire.WorkloadAPI,
+			MountPath: spire.VolumeMountPath,
+		})
+	}
+
 	for _, c := range []struct {
 		desc            string
 		trs             v1beta1.TaskRunSpec
@@ -1522,7 +1532,7 @@ _EOF_
 			},
 			want: &corev1.PodSpec{
 				RestartPolicy:  corev1.RestartPolicyNever,
-				InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})},
+				InitContainers: initContainers,
 				Containers: []corev1.Container{{
 					Name:    "step-name",
 					Image:   "image",
@@ -1537,6 +1547,7 @@ _EOF_
 						"/tekton/termination",
 						"-step_metadata_dir",
 						"/tekton/run/0/status",
+						"-enable_spire",
 						"-entrypoint",
 						"cmd",
 						"--",
@@ -1544,6 +1555,9 @@ _EOF_
 					VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
 						Name:      "tekton-creds-init-home-0",
 						MountPath: "/tekton/creds",
+					}, {
+						Name:      spire.WorkloadAPI,
+						MountPath: spire.VolumeMountPath,
 					}}, implicitVolumeMounts...),
 					TerminationMessagePath: "/tekton/termination",
 					Env: []corev1.EnvVar{
@@ -1553,6 +1567,13 @@ _EOF_
 				Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
 					Name:         "tekton-creds-init-home-0",
 					VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
+				}, corev1.Volume{
+					Name: spire.WorkloadAPI,
+					VolumeSource: corev1.VolumeSource{
+						CSI: &corev1.CSIVolumeSource{
+							Driver: "csi.spiffe.io",
+						},
+					},
 				}),
 				ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
 			},
@@ -1572,7 +1593,7 @@ _EOF_
 			},
 			want: &corev1.PodSpec{
 				RestartPolicy:  corev1.RestartPolicyNever,
-				InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})},
+				InitContainers: initContainers,
 				Containers: []corev1.Container{{
 					Name:    "step-name",
 					Image:   "image",
@@ -1587,6 +1608,7 @@ _EOF_
 						"/tekton/termination",
 						"-step_metadata_dir",
 						"/tekton/run/0/status",
+						"-enable_spire",
 						"-entrypoint",
 						"cmd",
 						"--",
@@ -1594,6 +1616,9 @@ _EOF_
 					VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
 						Name:      "tekton-creds-init-home-0",
 						MountPath: "/tekton/creds",
+					}, {
+						Name:      spire.WorkloadAPI,
+						MountPath: spire.VolumeMountPath,
 					}}, implicitVolumeMounts...),
 					TerminationMessagePath: "/tekton/termination",
 					Env: []corev1.EnvVar{
@@ -1605,6 +1630,13 @@ _EOF_
 				Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
 					Name:         "tekton-creds-init-home-0",
 					VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
+				}, corev1.Volume{
+					Name: spire.WorkloadAPI,
+					VolumeSource: corev1.VolumeSource{
+						CSI: &corev1.CSIVolumeSource{
+							Driver: "csi.spiffe.io",
+						},
+					},
 				}),
 				ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
 			},
@@ -1930,9 +1962,21 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7
 `},
 	}
 
+	initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer}
+	for i := range initContainers {
+		c := &initContainers[i]
+		c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+			Name:      spire.WorkloadAPI,
+			MountPath: spire.VolumeMountPath,
+		})
+	}
+
 	containersVolumeMounts := append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
 		Name:      "tekton-creds-init-home-0",
 		MountPath: "/tekton/creds",
+	}, {
+		Name:      spire.WorkloadAPI,
+		MountPath: spire.VolumeMountPath,
 	}}, implicitVolumeMounts...)
 	containersVolumeMounts = append(containersVolumeMounts, debugScriptsVolumeMount)
 	containersVolumeMounts = append(containersVolumeMounts, corev1.VolumeMount{
@@ -1963,7 +2007,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7
 		},
 		want: &corev1.PodSpec{
 			RestartPolicy:  corev1.RestartPolicyNever,
-			InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer},
+			InitContainers: initContainers,
 			Containers: []corev1.Container{{
 				Name:    "step-name",
 				Image:   "image",
@@ -1978,6 +2022,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7
 					"/tekton/termination",
 					"-step_metadata_dir",
 					"/tekton/run/0/status",
+					"-enable_spire",
 					"-breakpoint_on_failure",
 					"-entrypoint",
 					"cmd",
@@ -1989,6 +2034,13 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7
 			Volumes: append(implicitVolumes, debugScriptsVolume, debugInfoVolume, binVolume, scriptsVolume, runVolume(0), downwardVolume, corev1.Volume{
 				Name:         "tekton-creds-init-home-0",
 				VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
+			}, corev1.Volume{
+				Name: spire.WorkloadAPI,
+				VolumeSource: corev1.VolumeSource{
+					CSI: &corev1.CSIVolumeSource{
+						Driver: "csi.spiffe.io",
+					},
+				},
 			}),
 			ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
 		},
@@ -2273,6 +2325,167 @@ func TestPodBuild_TaskLevelResourceRequirements(t *testing.T) {
 	}
 }
 
+func TestPodBuildwithSpireEnabled(t *testing.T) {
+	initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}
+	for i := range initContainers {
+		c := &initContainers[i]
+		c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+			Name:      spire.WorkloadAPI,
+			MountPath: spire.VolumeMountPath,
+		})
+	}
+
+	for _, c := range []struct {
+		desc            string
+		trs             v1beta1.TaskRunSpec
+		trAnnotation    map[string]string
+		ts              v1beta1.TaskSpec
+		want            *corev1.PodSpec
+		wantAnnotations map[string]string
+	}{{
+		desc: "simple with debug breakpoint onFailure",
+		trs: v1beta1.TaskRunSpec{
+			Debug: &v1beta1.TaskRunDebug{
+				Breakpoint: []string{breakpointOnFailure},
+			},
+		},
+		ts: v1beta1.TaskSpec{
+			Steps: []v1beta1.Step{{
+				Name:    "name",
+				Image:   "image",
+				Command: []string{"cmd"}, // avoid entrypoint lookup.
+			}},
+		},
+		want: &corev1.PodSpec{
+			RestartPolicy:  corev1.RestartPolicyNever,
+			InitContainers: initContainers,
+			Containers: []corev1.Container{{
+				Name:    "step-name",
+				Image:   "image",
+				Command: []string{"/tekton/bin/entrypoint"},
+				Args: []string{
+					"-wait_file",
+					"/tekton/downward/ready",
+					"-wait_file_content",
+					"-post_file",
+					"/tekton/run/0/out",
+					"-termination_path",
+					"/tekton/termination",
+					"-step_metadata_dir",
+					"/tekton/run/0/status",
+					"-enable_spire",
+					"-entrypoint",
+					"cmd",
+					"--",
+				},
+				VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
+					Name:      "tekton-creds-init-home-0",
+					MountPath: "/tekton/creds",
+				}, {
+					Name:      spire.WorkloadAPI,
+					MountPath: spire.VolumeMountPath,
+				}}, implicitVolumeMounts...),
+				TerminationMessagePath: "/tekton/termination",
+			}},
+			Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
+				Name:         "tekton-creds-init-home-0",
+				VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
+			}, corev1.Volume{
+				Name: spire.WorkloadAPI,
+				VolumeSource: corev1.VolumeSource{
+					CSI: &corev1.CSIVolumeSource{
+						Driver: "csi.spiffe.io",
+					},
+				},
+			}),
+			ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
+		},
+	}} {
+		t.Run(c.desc, func(t *testing.T) {
+			featureFlags := map[string]string{
+				"enable-spire": "true",
+			}
+			names.TestingSeed()
+			store := config.NewStore(logtesting.TestLogger(t))
+			store.OnConfigChanged(
+				&corev1.ConfigMap{
+					ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
+					Data:       featureFlags,
+				},
+			)
+			kubeclient := fakek8s.NewSimpleClientset(
+				&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}},
+				&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"},
+					Secrets: []corev1.ObjectReference{{
+						Name: "multi-creds",
+					}},
+				},
+				&corev1.Secret{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "multi-creds",
+						Namespace: "default",
+						Annotations: map[string]string{
+							"tekton.dev/docker-0": "https://us.gcr.io",
+							"tekton.dev/docker-1": "https://docker.io",
+							"tekton.dev/git-0":    "github.com",
+							"tekton.dev/git-1":    "gitlab.com",
+						}},
+					Type: "kubernetes.io/basic-auth",
+					Data: map[string][]byte{
+						"username": []byte("foo"),
+						"password": []byte("BestEver"),
+					},
+				},
+			)
+			var trAnnotations map[string]string
+			if c.trAnnotation == nil {
+				trAnnotations = map[string]string{
+					ReleaseAnnotation: fakeVersion,
+				}
+			} else {
+				trAnnotations = c.trAnnotation
+				trAnnotations[ReleaseAnnotation] = fakeVersion
+			}
+			tr := &v1beta1.TaskRun{
+				ObjectMeta: metav1.ObjectMeta{
+					Name:        "taskrun-name",
+					Namespace:   "default",
+					Annotations: trAnnotations,
+				},
+				Spec: c.trs,
+			}
+
+			// No entrypoints should be looked up.
+			entrypointCache := fakeCache{}
+			builder := Builder{
+				Images:          images,
+				KubeClient:      kubeclient,
+				EntrypointCache: entrypointCache,
+			}
+
+			got, err := builder.Build(store.ToContext(context.Background()), tr, c.ts)
+			if err != nil {
+				t.Fatalf("builder.Build: %v", err)
+			}
+
+			expectedName := kmeta.ChildName(tr.Name, "-pod")
+			if d := cmp.Diff(expectedName, got.Name); d != "" {
+				t.Errorf("Pod name does not match: %q", d)
+			}
+
+			if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp, volumeSort, volumeMountSort); d != "" {
+				t.Errorf("Diff %s", diff.PrintWantGot(d))
+			}
+
+			if c.wantAnnotations != nil {
+				if d := cmp.Diff(c.wantAnnotations, got.ObjectMeta.Annotations, cmpopts.IgnoreMapEntries(ignoreReleaseAnnotation)); d != "" {
+					t.Errorf("Annotation Diff(-want, +got):\n%s", d)
+				}
+			}
+		})
+	}
+}
+
 // verifyTaskLevelComputeResources verifies that the given TaskRun's containers have the expected compute resources.
 func verifyTaskLevelComputeResources(expectedComputeResources []ExpectedComputeResources, containers []corev1.Container) error {
 	if len(expectedComputeResources) != len(containers) {
diff --git a/pkg/pod/status.go b/pkg/pod/status.go
index 0a0894981eb..66e1b0f7854 100644
--- a/pkg/pod/status.go
+++ b/pkg/pod/status.go
@@ -17,6 +17,7 @@ limitations under the License.
 package pod
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"strconv"
@@ -25,6 +26,7 @@ import (
 
 	"github.com/hashicorp/go-multierror"
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+	"github.com/tektoncd/pipeline/pkg/spire"
 	"github.com/tektoncd/pipeline/pkg/termination"
 	"go.uber.org/zap"
 	corev1 "k8s.io/api/core/v1"
@@ -104,11 +106,16 @@ func SidecarsReady(podStatus corev1.PodStatus) bool {
 }
 
 // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status.
-func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) {
+func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, spireEnabled bool,
+	spireAPI spire.ControllerAPIClient) (v1beta1.TaskRunStatus, error) {
 	trs := &tr.Status
 	if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown {
 		// If the taskRunStatus doesn't exist yet, it's because we just started running
 		markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
+
+		if spireEnabled {
+			markStatusSignedResultsRunning(trs)
+		}
 	}
 
 	sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers)
@@ -118,7 +125,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev
 	if complete {
 		updateCompletedTaskRunStatus(logger, trs, pod)
 	} else {
-		updateIncompleteTaskRunStatus(trs, pod)
+		updateIncompleteTaskRunStatus(trs, pod, spireEnabled)
 	}
 
 	trs.PodName = pod.Name
@@ -136,7 +143,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev
 	}
 
 	var merr *multierror.Error
-	if err := setTaskRunStatusBasedOnStepStatus(logger, stepStatuses, &tr); err != nil {
+	if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, spireEnabled, spireAPI); err != nil {
 		merr = multierror.Append(merr, err)
 	}
 
@@ -147,7 +154,30 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev
 	return *trs, merr.ErrorOrNil()
 }
 
-func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error {
+func setTaskRunStatusBasedOnSpireVerification(ctx context.Context, logger *zap.SugaredLogger, tr *v1beta1.TaskRun, trs *v1beta1.TaskRunStatus,
+	filteredResults []v1beta1.PipelineResourceResult, spireAPI spire.ControllerAPIClient) {
+
+	if tr.IsSuccessful() && spireAPI != nil &&
+		((tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Results) >= 1) || len(filteredResults) >= 1) {
+		logger.Info("validating signed results with spire: ", trs.TaskRunResults)
+		if err := spireAPI.VerifyTaskRunResults(ctx, filteredResults, tr); err != nil {
+			logger.Errorf("failed to verify signed results with spire: %w", err)
+			markStatusSignedResultsFailure(trs, err.Error())
+		} else {
+			logger.Info("successfully validated signed results with spire")
+			markStatusSignedResultsVerified(trs)
+		}
+	}
+
+	// If no results and no results requested, set verified unless results were specified as part of task spec
+	if len(filteredResults) == 0 && (tr.Status.TaskSpec == nil || len(tr.Status.TaskSpec.Results) == 0) {
+		markStatusSignedResultsVerified(trs)
+	}
+}
+
+func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun,
+	spireEnabled bool, spireAPI spire.ControllerAPIClient) *multierror.Error {
+
 	trs := &tr.Status
 	var merr *multierror.Error
 
@@ -170,10 +200,13 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [
 					logger.Errorf("error extracting the exit code of step %q in taskrun %q: %v", s.Name, tr.Name, err)
 					merr = multierror.Append(merr, err)
 				}
-				taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results)
+				taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results, spireEnabled)
 				if tr.IsSuccessful() {
 					trs.TaskRunResults = append(trs.TaskRunResults, taskResults...)
 					trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...)
+					if spireEnabled {
+						setTaskRunStatusBasedOnSpireVerification(ctx, logger, tr, trs, filteredResults, spireAPI)
+					}
 				}
 				msg, err = createMessageFromResults(filteredResults)
 				if err != nil {
@@ -224,7 +257,8 @@ func createMessageFromResults(results []v1beta1.PipelineResourceResult) (string,
 	return string(bytes), nil
 }
 
-func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) {
+func filterResultsAndResources(results []v1beta1.PipelineResourceResult, spireEnabled bool) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) {
+
 	var taskResults []v1beta1.TaskRunResult
 	var pipelineResourceResults []v1beta1.PipelineResourceResult
 	var filteredResults []v1beta1.PipelineResourceResult
@@ -236,6 +270,15 @@ func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1be
 			if err != nil {
 				continue
 			}
+			// TODO(#4723): Validate that the type we inferred from aos is matching the
+			// TaskResult Type before setting it to the taskRunResult.
+			// TODO(#4723): Validate the taskrun results against taskresults for object val
+			if spireEnabled {
+				if r.Key == spire.KeySVID || r.Key == spire.KeyResultManifest || strings.HasSuffix(r.Key, spire.KeySignatureSuffix) {
+					filteredResults = append(filteredResults, r)
+					continue
+				}
+			}
 			taskRunResult := v1beta1.TaskRunResult{
 				Name:  r.Key,
 				Type:  v1beta1.ResultsType(v.Type),
@@ -317,10 +360,13 @@ func updateCompletedTaskRunStatus(logger *zap.SugaredLogger, trs *v1beta1.TaskRu
 	trs.CompletionTime = &metav1.Time{Time: time.Now()}
 }
 
-func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) {
+func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod, spireEnabled bool) {
 	switch pod.Status.Phase {
 	case corev1.PodRunning:
 		markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
+		if spireEnabled {
+			markStatusSignedResultsRunning(trs)
+		}
 	case corev1.PodPending:
 		switch {
 		case IsPodExceedingNodeResources(pod):
@@ -331,6 +377,9 @@ func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod)
 			markStatusRunning(trs, ReasonPullImageFailed, getWaitingMessage(pod))
 		default:
 			markStatusRunning(trs, ReasonPending, getWaitingMessage(pod))
+			if spireEnabled {
+				markStatusSignedResultsRunning(trs)
+			}
 		}
 	}
 }
@@ -508,6 +557,36 @@ func markStatusSuccess(trs *v1beta1.TaskRunStatus) {
 	})
 }
 
+// markStatusResultsVerified sets taskrun status to
+func markStatusSignedResultsVerified(trs *v1beta1.TaskRunStatus) {
+	trs.SetCondition(&apis.Condition{
+		Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+		Status:  corev1.ConditionTrue,
+		Reason:  v1beta1.TaskRunReasonResultsVerified.String(),
+		Message: "Successfully verified all spire signed taskrun results",
+	})
+}
+
+// markStatusFailure sets taskrun status to failure with specified reason
+func markStatusSignedResultsFailure(trs *v1beta1.TaskRunStatus, message string) {
+	trs.SetCondition(&apis.Condition{
+		Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+		Status:  corev1.ConditionFalse,
+		Reason:  v1beta1.TaskRunReasonsResultsVerificationFailed.String(),
+		Message: message,
+	})
+}
+
+// markStatusRunning sets taskrun status to running
+func markStatusSignedResultsRunning(trs *v1beta1.TaskRunStatus) {
+	trs.SetCondition(&apis.Condition{
+		Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+		Status:  corev1.ConditionUnknown,
+		Reason:  v1beta1.AwaitingTaskRunResults.String(),
+		Message: "Waiting upon TaskRun results and signatures to verify",
+	})
+}
+
 // sortPodContainerStatuses reorders a pod's container statuses so that
 // they're in the same order as the step containers from the TaskSpec.
 func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) {
diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go
index abb4842badc..dbd6eea5ff1 100644
--- a/pkg/pod/status_test.go
+++ b/pkg/pod/status_test.go
@@ -17,6 +17,9 @@ limitations under the License.
 package pod
 
 import (
+	"context"
+	"encoding/json"
+	"sort"
 	"strings"
 	"testing"
 	"time"
@@ -24,6 +27,8 @@ import (
 	"github.com/google/go-cmp/cmp"
 	"github.com/google/go-cmp/cmp/cmpopts"
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+	"github.com/tektoncd/pipeline/pkg/spire"
+	"github.com/tektoncd/pipeline/pkg/termination"
 	"github.com/tektoncd/pipeline/test/diff"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -66,6 +71,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
 			}},
 	}} {
 		t.Run(c.desc, func(t *testing.T) {
+			ctx := context.Background()
 			startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC)
 			tr := v1beta1.TaskRun{
 				ObjectMeta: metav1.ObjectMeta{
@@ -80,7 +86,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
 			}
 
 			logger, _ := logging.NewLogger("", "status")
-			merr := setTaskRunStatusBasedOnStepStatus(logger, c.ContainerStatuses, &tr)
+			merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, c.ContainerStatuses, &tr, false, nil)
 			if merr != nil {
 				t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr)
 			}
@@ -89,6 +95,396 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
 	}
 }
 
+func TestMakeTaskRunStatusVerify(t *testing.T) {
+	sc := &spire.MockClient{}
+	processConditions := cmp.Transformer("sortConditionsAndFilterMessages", func(in []apis.Condition) []apis.Condition {
+		for i := range in {
+			in[i].Message = ""
+		}
+		sort.Slice(in, func(i, j int) bool {
+			return in[i].Type < in[j].Type
+		})
+		return in
+	})
+
+	terminationMessageTrans := cmp.Transformer("sortAndPrint", func(in *corev1.ContainerStateTerminated) *corev1.ContainerStateTerminated {
+		prs, err := termination.ParseMessage(nil, in.Message)
+		if err != nil {
+			return in
+		}
+		sort.Slice(prs, func(i, j int) bool {
+			return prs[i].Key < prs[j].Key
+		})
+
+		b, _ := json.Marshal(prs)
+		in.Message = string(b)
+
+		return in
+	})
+
+	// test awaiting results - OK
+	// results + test signed termination message - OK
+	// results + test unsigned termination message - OK
+
+	// no task results, no result + test signed termiantion message
+	// no task results, no result + test unsigned termiantion message
+	// force task result, no result + test unsigned termiantion message
+
+	statusSRVUnknown := func() duckv1beta1.Status {
+		status := statusRunning()
+		status.Conditions = append(status.Conditions, apis.Condition{
+			Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+			Status:  corev1.ConditionUnknown,
+			Reason:  v1beta1.AwaitingTaskRunResults.String(),
+			Message: "Waiting upon TaskRun results and signatures to verify",
+		})
+		return status
+	}
+
+	statusSRVVerified := func() duckv1beta1.Status {
+		status := statusSuccess()
+		status.Conditions = append(status.Conditions, apis.Condition{
+			Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+			Status:  corev1.ConditionTrue,
+			Reason:  v1beta1.TaskRunReasonResultsVerified.String(),
+			Message: "Successfully verified all spire signed taskrun results",
+		})
+		return status
+	}
+
+	statusSRVUnverified := func() duckv1beta1.Status {
+		status := statusSuccess()
+		status.Conditions = append(status.Conditions, apis.Condition{
+			Type:    apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()),
+			Status:  corev1.ConditionFalse,
+			Reason:  v1beta1.TaskRunReasonsResultsVerificationFailed.String(),
+			Message: "",
+		})
+		return status
+	}
+
+	for _, c := range []struct {
+		desc                 string
+		specifyTaskRunResult bool
+		resultOut            []v1beta1.PipelineResourceResult
+		podStatus            corev1.PodStatus
+		pod                  corev1.Pod
+		want                 v1beta1.TaskRunStatus
+	}{{
+		// test awaiting results
+		desc:      "running pod awaiting results",
+		podStatus: corev1.PodStatus{},
+
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVUnknown(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps:    []v1beta1.StepState{},
+				Sidecars: []v1beta1.SidecarState{},
+			},
+		},
+	}, {
+		desc: "test result with pipeline result without signed termination message",
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`,
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVUnverified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`,
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				ResourcesResult: []v1beta1.PipelineResourceResult{{
+					Key:          "digest",
+					Value:        "sha256:1234",
+					ResourceName: "source-image",
+				}},
+				TaskRunResults: []v1beta1.TaskRunResult{{
+					Name:  "resultName",
+					Type:  v1beta1.ResultsTypeString,
+					Value: *v1beta1.NewStructuredValues("resultValue"),
+				}},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}, {
+		desc: "test result with pipeline result with signed termination message",
+		resultOut: []v1beta1.PipelineResourceResult{
+			{
+				Key:        "resultName",
+				Value:      "resultValue",
+				ResultType: v1beta1.TaskRunResultType,
+			},
+		},
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: `<to override by signing test routine>`,
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVVerified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: `to be overridden by signing`,
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				TaskRunResults: []v1beta1.TaskRunResult{{
+					Name:  "resultName",
+					Type:  v1beta1.ResultsTypeString,
+					Value: *v1beta1.NewStructuredValues("resultValue"),
+				}},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}, {
+		desc: "test array result with signed termination message",
+		resultOut: []v1beta1.PipelineResourceResult{
+			{
+				Key:        "resultName",
+				Value:      "[\"hello\",\"world\"]",
+				ResultType: v1beta1.TaskRunResultType,
+			},
+		},
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: `<to override by signing test routine>`,
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVVerified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: `to be overridden by signing`,
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				TaskRunResults: []v1beta1.TaskRunResult{{
+					Name:  "resultName",
+					Type:  v1beta1.ResultsTypeArray,
+					Value: *v1beta1.NewStructuredValues("hello", "world"),
+				}},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}, {
+		desc:      "test result with no result with signed termination message",
+		resultOut: []v1beta1.PipelineResourceResult{},
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: `to be overridden by signing`,
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVVerified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: `to be overridden by signing`,
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}, {
+		desc: "test result with no result without signed termination message",
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: "[]",
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVVerified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: "[]",
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}, {
+		desc:                 "test result (with task run result defined) with no result without signed termination message",
+		specifyTaskRunResult: true,
+		podStatus: corev1.PodStatus{
+			Phase: corev1.PodSucceeded,
+			ContainerStatuses: []corev1.ContainerStatus{{
+				Name: "step-bar",
+				State: corev1.ContainerState{
+					Terminated: &corev1.ContainerStateTerminated{
+						Message: "[]",
+					},
+				},
+			}},
+		},
+		want: v1beta1.TaskRunStatus{
+			Status: statusSRVUnverified(),
+			TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+				Steps: []v1beta1.StepState{{
+					ContainerState: corev1.ContainerState{
+						Terminated: &corev1.ContainerStateTerminated{
+							Message: "[]",
+						}},
+					Name:          "bar",
+					ContainerName: "step-bar",
+				}},
+				Sidecars: []v1beta1.SidecarState{},
+				// We don't actually care about the time, just that it's not nil
+				CompletionTime: &metav1.Time{Time: time.Now()},
+			},
+		},
+	}} {
+		t.Run(c.desc, func(t *testing.T) {
+			now := metav1.Now()
+			ctx := context.Background()
+			if cmp.Diff(c.pod, corev1.Pod{}) == "" {
+				c.pod = corev1.Pod{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:              "pod",
+						Namespace:         "foo",
+						CreationTimestamp: now,
+					},
+					Status: c.podStatus,
+				}
+			}
+
+			startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC)
+			tr := v1beta1.TaskRun{
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "task-run",
+					Namespace: "foo",
+				},
+				Status: v1beta1.TaskRunStatus{
+					TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+						StartTime: &metav1.Time{Time: startTime},
+					},
+				},
+			}
+
+			if c.specifyTaskRunResult {
+				// Specify result
+				tr.Status.TaskSpec = &v1beta1.TaskSpec{
+					Results: []v1beta1.TaskResult{{
+						Name: "some-task-result",
+					}},
+				}
+
+				c.want.TaskSpec = tr.Status.TaskSpec
+			}
+
+			if err := sc.CreateEntries(ctx, &tr, &c.pod, 10000); err != nil {
+				t.Fatalf("unable to create entry for tr: %v", tr.Name)
+			}
+
+			if c.resultOut != nil {
+				id := sc.GetIdentity(&tr)
+				for i := 0; i < 20; i++ {
+					sc.SignIdentities = append(sc.SignIdentities, id)
+				}
+				sigs, err := sc.Sign(ctx, c.resultOut)
+				if err != nil {
+					t.Fatalf("failed to sign: %v", err)
+				}
+				c.resultOut = append(c.resultOut, sigs...)
+				s, err := createMessageFromResults(c.resultOut)
+				if err != nil {
+					t.Fatalf("failed to create message from result: %v", err)
+				}
+
+				c.podStatus.ContainerStatuses[0].State.Terminated.Message = s
+				c.want.TaskRunStatusFields.Steps[0].ContainerState.Terminated.Message = s
+			}
+
+			logger, _ := logging.NewLogger("", "status")
+			got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, true, sc)
+			if err != nil {
+				t.Errorf("MakeTaskRunResult: %s", err)
+			}
+
+			// Common traits, set for test case brevity.
+			c.want.PodName = "pod"
+			c.want.StartTime = &metav1.Time{Time: startTime}
+
+			ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool {
+				if x == nil {
+					return y == nil
+				}
+				return y != nil
+			})
+			if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil, processConditions, terminationMessageTrans); d != "" {
+				t.Errorf("Diff %s", diff.PrintWantGot(d))
+			}
+			if tr.Status.StartTime.Time != c.want.StartTime.Time {
+				t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime)
+			}
+
+			if err := sc.DeleteEntry(ctx, &tr, &c.pod); err != nil {
+				t.Fatalf("unable to create entry for tr: %v", tr.Name)
+			}
+
+		})
+	}
+}
+
 func TestMakeTaskRunStatus(t *testing.T) {
 	for _, c := range []struct {
 		desc      string
@@ -1061,7 +1457,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
 				},
 			}
 			logger, _ := logging.NewLogger("", "status")
-			got, err := MakeTaskRunStatus(logger, tr, &c.pod)
+			got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil)
 			if err != nil {
 				t.Errorf("MakeTaskRunResult: %s", err)
 			}
@@ -1275,7 +1671,7 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
 				},
 			}
 			logger, _ := logging.NewLogger("", "status")
-			got, err := MakeTaskRunStatus(logger, tr, &c.pod)
+			got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil)
 			if err != nil {
 				t.Errorf("MakeTaskRunResult: %s", err)
 			}
@@ -1396,7 +1792,7 @@ func TestMakeRunStatusJSONError(t *testing.T) {
 	}
 
 	logger, _ := logging.NewLogger("", "status")
-	gotTr, err := MakeTaskRunStatus(logger, tr, pod)
+	gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod, false, nil)
 	if err == nil {
 		t.Error("Expected error, got nil")
 	}
diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go
index 9e2b07a9044..599e22ed781 100644
--- a/pkg/reconciler/taskrun/controller.go
+++ b/pkg/reconciler/taskrun/controller.go
@@ -32,6 +32,7 @@ import (
 	cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
 	"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
 	resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
+	"github.com/tektoncd/pipeline/pkg/spire"
 	"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
 	"k8s.io/client-go/tools/cache"
 	"k8s.io/utils/clock"
@@ -54,6 +55,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
 		resourceInformer := resourceinformer.Get(ctx)
 		limitrangeInformer := limitrangeinformer.Get(ctx)
 		resolutionInformer := resolutioninformer.Get(ctx)
+		spireControllerAPI := spire.GetControllerAPIClient(ctx)
 		configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger))
 		configStore.WatchConfigs(cmw)
 
@@ -66,6 +68,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
 			KubeClientSet:       kubeclientset,
 			PipelineClientSet:   pipelineclientset,
 			Images:              opts.Images,
+			SpireClient:         spireControllerAPI,
 			Clock:               clock,
 			taskRunLister:       taskRunInformer.Lister(),
 			resourceLister:      resourceInformer.Lister(),
@@ -77,6 +80,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
 			pvcHandler:          volumeclaim.NewPVCHandler(kubeclientset, logger),
 			resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()),
 		}
+		c.SpireClient.SetConfig(opts.SpireConfig)
 		impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
 			return controller.Options{
 				AgentName:   pipeline.TaskRunControllerName,
diff --git a/pkg/reconciler/taskrun/resources/image_exporter.go b/pkg/reconciler/taskrun/resources/image_exporter.go
index b03b98a277d..ee80870beb7 100644
--- a/pkg/reconciler/taskrun/resources/image_exporter.go
+++ b/pkg/reconciler/taskrun/resources/image_exporter.go
@@ -33,7 +33,7 @@ func AddOutputImageDigestExporter(
 	imageDigestExporterImage string,
 	tr *v1beta1.TaskRun,
 	taskSpec *v1beta1.TaskSpec,
-	gr GetResource,
+	gr GetResource, spireEnabled bool,
 ) error {
 
 	output := []*image.Resource{}
@@ -80,7 +80,7 @@ func AddOutputImageDigestExporter(
 			}
 
 			augmentedSteps = append(augmentedSteps, taskSpec.Steps...)
-			augmentedSteps = append(augmentedSteps, imageDigestExporterStep(imageDigestExporterImage, imagesJSON))
+			augmentedSteps = append(augmentedSteps, imageDigestExporterStep(imageDigestExporterImage, imagesJSON, spireEnabled))
 
 			taskSpec.Steps = augmentedSteps
 		}
@@ -89,13 +89,19 @@ func AddOutputImageDigestExporter(
 	return nil
 }
 
-func imageDigestExporterStep(imageDigestExporterImage string, imagesJSON []byte) v1beta1.Step {
+func imageDigestExporterStep(imageDigestExporterImage string, imagesJSON []byte, spireEnabled bool) v1beta1.Step {
+	// Add extra entrypoint arg to enable or disable spire
+	commonExtraEntrypointArgs := []string{
+		"-images", string(imagesJSON),
+	}
+	if spireEnabled {
+		commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire")
+	}
+
 	return v1beta1.Step{
 		Name:    names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(imageDigestExporterContainerName),
 		Image:   imageDigestExporterImage,
 		Command: []string{"/ko-app/imagedigestexporter"},
-		Args: []string{
-			"-images", string(imagesJSON),
-		},
+		Args:    commonExtraEntrypointArgs,
 	}
 }
diff --git a/pkg/reconciler/taskrun/resources/image_exporter_test.go b/pkg/reconciler/taskrun/resources/image_exporter_test.go
index aba62bf4ab3..66b752edbb1 100644
--- a/pkg/reconciler/taskrun/resources/image_exporter_test.go
+++ b/pkg/reconciler/taskrun/resources/image_exporter_test.go
@@ -183,7 +183,175 @@ func TestAddOutputImageDigestExporter(t *testing.T) {
 					},
 				}, nil
 			}
-			err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr)
+			err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr, false)
+			if err != nil {
+				t.Fatalf("Failed to declare output resources for test %q: error %v", c.desc, err)
+			}
+
+			if d := cmp.Diff(c.task.Spec.Steps, c.wantSteps); d != "" {
+				t.Fatalf("post build steps mismatch %s", diff.PrintWantGot(d))
+			}
+		})
+	}
+}
+
+func TestAddOutputImageDigestExporterWithSpire(t *testing.T) {
+	for _, c := range []struct {
+		desc      string
+		task      *v1beta1.Task
+		taskRun   *v1beta1.TaskRun
+		wantSteps []v1beta1.Step
+	}{{
+		desc: "image resource declared as both input and output",
+		task: &v1beta1.Task{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "task1",
+				Namespace: "marshmallow",
+			},
+			Spec: v1beta1.TaskSpec{
+				Steps: []v1beta1.Step{{
+					Name: "step1",
+				}},
+				Resources: &v1beta1.TaskResources{
+					Inputs: []v1beta1.TaskResource{{
+						ResourceDeclaration: v1beta1.ResourceDeclaration{
+							Name: "source-image",
+							Type: "image",
+						},
+					}},
+					Outputs: []v1beta1.TaskResource{{
+						ResourceDeclaration: v1beta1.ResourceDeclaration{
+							Name: "source-image",
+							Type: "image",
+						},
+					}},
+				},
+			},
+		},
+		taskRun: &v1beta1.TaskRun{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "test-taskrun-run-output-steps",
+				Namespace: "marshmallow",
+			},
+			Spec: v1beta1.TaskRunSpec{
+				Resources: &v1beta1.TaskRunResources{
+					Inputs: []v1beta1.TaskResourceBinding{{
+						PipelineResourceBinding: v1beta1.PipelineResourceBinding{
+							Name: "source-image",
+							ResourceRef: &v1beta1.PipelineResourceRef{
+								Name: "source-image-1",
+							},
+						},
+					}},
+					Outputs: []v1beta1.TaskResourceBinding{{
+						PipelineResourceBinding: v1beta1.PipelineResourceBinding{
+							Name: "source-image",
+							ResourceRef: &v1beta1.PipelineResourceRef{
+								Name: "source-image-1",
+							},
+						},
+					}},
+				},
+			},
+		},
+		wantSteps: []v1beta1.Step{{
+			Name: "step1",
+		}, {
+			Name:    "image-digest-exporter-9l9zj",
+			Image:   "override-with-imagedigest-exporter-image:latest",
+			Command: []string{"/ko-app/imagedigestexporter"},
+			Args:    []string{"-images", "[{\"name\":\"source-image\",\"type\":\"image\",\"url\":\"gcr.io/some-image-1\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/source-image\"}]", "-enable_spire"},
+		}},
+	}, {
+		desc: "image resource in task with multiple steps",
+		task: &v1beta1.Task{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "task1",
+				Namespace: "marshmallow",
+			},
+			Spec: v1beta1.TaskSpec{
+				Steps: []v1beta1.Step{{
+					Name: "step1",
+				}, {
+					Name: "step2",
+				}},
+				Resources: &v1beta1.TaskResources{
+					Inputs: []v1beta1.TaskResource{{
+						ResourceDeclaration: v1beta1.ResourceDeclaration{
+							Name: "source-image",
+							Type: "image",
+						},
+					}},
+					Outputs: []v1beta1.TaskResource{{
+						ResourceDeclaration: v1beta1.ResourceDeclaration{
+							Name: "source-image",
+							Type: "image",
+						},
+					}},
+				},
+			},
+		},
+		taskRun: &v1beta1.TaskRun{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "test-taskrun-run-output-steps",
+				Namespace: "marshmallow",
+			},
+			Spec: v1beta1.TaskRunSpec{
+				Resources: &v1beta1.TaskRunResources{
+					Inputs: []v1beta1.TaskResourceBinding{{
+						PipelineResourceBinding: v1beta1.PipelineResourceBinding{
+							Name: "source-image",
+							ResourceRef: &v1beta1.PipelineResourceRef{
+								Name: "source-image-1",
+							},
+						},
+					}},
+					Outputs: []v1beta1.TaskResourceBinding{{
+						PipelineResourceBinding: v1beta1.PipelineResourceBinding{
+							Name: "source-image",
+							ResourceRef: &v1beta1.PipelineResourceRef{
+								Name: "source-image-1",
+							},
+						},
+					}},
+				},
+			},
+		},
+		wantSteps: []v1beta1.Step{{
+			Name: "step1",
+		}, {
+			Name: "step2",
+		}, {
+			Name:    "image-digest-exporter-9l9zj",
+			Image:   "override-with-imagedigest-exporter-image:latest",
+			Command: []string{"/ko-app/imagedigestexporter"},
+			Args:    []string{"-images", "[{\"name\":\"source-image\",\"type\":\"image\",\"url\":\"gcr.io/some-image-1\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/source-image\"}]", "-enable_spire"},
+		}},
+	}} {
+		t.Run(c.desc, func(t *testing.T) {
+			names.TestingSeed()
+			gr := func(n string) (*resourcev1alpha1.PipelineResource, error) {
+				return &resourcev1alpha1.PipelineResource{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "source-image-1",
+						Namespace: "marshmallow",
+					},
+					Spec: resourcev1alpha1.PipelineResourceSpec{
+						Type: "image",
+						Params: []v1beta1.ResourceParam{{
+							Name:  "url",
+							Value: "gcr.io/some-image-1",
+						}, {
+							Name:  "digest",
+							Value: "",
+						}, {
+							Name:  "OutputImageDir",
+							Value: "/workspace/source-image-1/index.json",
+						}},
+					},
+				}, nil
+			}
+			err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr, true)
 			if err != nil {
 				t.Fatalf("Failed to declare output resources for test %q: error %v", c.desc, err)
 			}
diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go
index acde4d95d39..41702798509 100644
--- a/pkg/reconciler/taskrun/taskrun.go
+++ b/pkg/reconciler/taskrun/taskrun.go
@@ -45,6 +45,7 @@ import (
 	"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
 	"github.com/tektoncd/pipeline/pkg/remote"
 	resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
+	"github.com/tektoncd/pipeline/pkg/spire"
 	"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
 	_ "github.com/tektoncd/pipeline/pkg/taskrunmetrics/fake" // Make sure the taskrunmetrics are setup
 	"github.com/tektoncd/pipeline/pkg/trustedresources"
@@ -72,6 +73,7 @@ type Reconciler struct {
 	KubeClientSet     kubernetes.Interface
 	PipelineClientSet clientset.Interface
 	Images            pipeline.Images
+	SpireClient       spire.ControllerAPIClient
 	Clock             clock.PassiveClock
 
 	// listers index properties about resources
@@ -452,10 +454,11 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re
 	defer c.durationAndCountMetrics(ctx, tr)
 	logger := logging.FromContext(ctx)
 	recorder := controller.GetEventRecorder(ctx)
-	var err error
 
 	// Get the TaskRun's Pod if it should have one. Otherwise, create the Pod.
 	var pod *corev1.Pod
+	var err error
+	spireEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire
 
 	if tr.Status.PodName != "" {
 		pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName)
@@ -531,6 +534,16 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re
 	}
 
 	if podconvert.SidecarsReady(pod.Status) {
+		if spireEnabled {
+			// TTL for the entry is in seconds
+			ttl := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
+			if err = c.SpireClient.CreateEntries(ctx, tr, pod, ttl); err != nil {
+				logger.Errorf("Failed to create workload SPIFFE entry for taskrun %v: %v", tr.Name, err)
+				return err
+			}
+			logger.Infof("Created SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name)
+		}
+
 		if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil {
 			return err
 		}
@@ -540,7 +553,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re
 	}
 
 	// Convert the Pod's status to the equivalent TaskRun Status.
-	tr.Status, err = podconvert.MakeTaskRunStatus(logger, *tr, pod)
+	tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, spireEnabled, c.SpireClient)
 	if err != nil {
 		return err
 	}
@@ -550,6 +563,14 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re
 		return err
 	}
 
+	if spireEnabled && tr.IsDone() {
+		if err := c.SpireClient.DeleteEntry(ctx, tr, pod); err != nil {
+			logger.Infof("Failed to remove workload SPIFFE entry for taskrun %v: %v", tr.Name, err)
+			return err
+		}
+		logger.Infof("Deleted SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name)
+	}
+
 	logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, tr.Status.GetCondition(apis.ConditionSucceeded))
 	return nil
 }
@@ -715,8 +736,11 @@ func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1
 		return nil, err
 	}
 
+	// check if spire is enabled to pass to ImageDigestExporter
+	spireEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire
+
 	// Get actual resource
-	err = resources.AddOutputImageDigestExporter(c.Images.ImageDigestExporterImage, tr, ts, c.resourceLister.PipelineResources(tr.Namespace).Get)
+	err = resources.AddOutputImageDigestExporter(c.Images.ImageDigestExporterImage, tr, ts, c.resourceLister.PipelineResources(tr.Namespace).Get, spireEnabled)
 	if err != nil {
 		logger.Errorf("Failed to create a pod for taskrun: %s due to output image resource error %v", tr.Name, err)
 		return nil, err
diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go
index eba6ab473fe..c799b6e546a 100644
--- a/pkg/reconciler/taskrun/taskrun_test.go
+++ b/pkg/reconciler/taskrun/taskrun_test.go
@@ -44,6 +44,8 @@ import (
 	ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
 	"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
 	resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
+	"github.com/tektoncd/pipeline/pkg/spire"
+	spireconfig "github.com/tektoncd/pipeline/pkg/spire/config"
 	"github.com/tektoncd/pipeline/pkg/workspace"
 	"github.com/tektoncd/pipeline/test"
 	"github.com/tektoncd/pipeline/test/diff"
@@ -92,6 +94,7 @@ var (
 		PRImage:                  "override-with-pr:latest",
 		ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest",
 	}
+	spireConfig              = spireconfig.SpireConfig{MockSpire: true}
 	now                      = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
 	ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time")
 	// Pods are created with a random 5-character suffix that we want to
@@ -472,7 +475,7 @@ func getRunName(tr *v1beta1.TaskRun) string {
 func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) {
 	t.Helper()
 	names.TestingSeed()
-	return initializeTaskRunControllerAssets(t, d, pipeline.Options{Images: images})
+	return initializeTaskRunControllerAssets(t, d, pipeline.Options{Images: images, SpireConfig: spireConfig})
 }
 
 func initializeTaskRunControllerAssets(t *testing.T, d test.Data, opts pipeline.Options) (test.Assets, func()) {
@@ -547,7 +550,7 @@ spec:
 			image: "foo",
 			name:  "simple-step",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}, {
 		name:    "serviceaccount",
 		taskRun: taskRunWithSaSuccess,
@@ -555,7 +558,7 @@ spec:
 			image: "foo",
 			name:  "sa-step",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}} {
 		t.Run(tc.name, func(t *testing.T) {
 			saName := tc.taskRun.Spec.ServiceAccountName
@@ -955,7 +958,7 @@ spec:
 			image: "foo",
 			name:  "simple-step",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}, {
 		name:    "serviceaccount",
 		taskRun: taskRunWithSaSuccess,
@@ -967,7 +970,7 @@ spec:
 			image: "foo",
 			name:  "sa-step",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}, {
 		name:    "params",
 		taskRun: taskRunSubstitution,
@@ -1032,7 +1035,7 @@ spec:
 					"[{\"name\":\"myimage\",\"type\":\"image\",\"url\":\"gcr.io/kristoff/sven\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/myimage\"}]",
 				},
 			},
-		}),
+		}, false),
 	}, {
 		name:    "taskrun-with-taskspec",
 		taskRun: taskRunWithTaskSpec,
@@ -1062,7 +1065,7 @@ spec:
 					"--my-arg=foo",
 				},
 			},
-		}),
+		}, false),
 	}, {
 		name:    "success-with-cluster-task",
 		taskRun: taskRunWithClusterTask,
@@ -1074,7 +1077,7 @@ spec:
 			name:  "simple-step",
 			image: "foo",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}, {
 		name:    "taskrun-with-resource-spec-task-spec",
 		taskRun: taskRunWithResourceSpecAndTaskSpec,
@@ -1103,7 +1106,7 @@ spec:
 				image: "ubuntu",
 				cmd:   "/mycmd",
 			},
-		}),
+		}, false),
 	}, {
 		name:    "taskrun-with-pod",
 		taskRun: taskRunWithPod,
@@ -1115,7 +1118,7 @@ spec:
 			name:  "simple-step",
 			image: "foo",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}, {
 		name:    "taskrun-with-credentials-variable-default-tekton-creds",
 		taskRun: taskRunWithCredentialsVariable,
@@ -1127,7 +1130,7 @@ spec:
 			name:  "mycontainer",
 			image: "myimage",
 			cmd:   "/mycmd /tekton/creds",
-		}}),
+		}}, false),
 	}, {
 		name:    "remote-task",
 		taskRun: taskRunBundle,
@@ -1139,7 +1142,7 @@ spec:
 			name:  "simple-step",
 			image: "foo",
 			cmd:   "/mycmd",
-		}}),
+		}}, false),
 	}} {
 		t.Run(tc.name, func(t *testing.T) {
 			testAssets, cancel := getTaskRunController(t, d)
@@ -1265,12 +1268,28 @@ spec:
 			"Normal Started ",
 			"Normal Running Not all Steps",
 		},
-		wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
-			name:       "mycontainer",
-			image:      "myimage",
-			stdoutPath: "stdout.txt",
-			cmd:        "/mycmd",
-		}}),
+		wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false,
+			[]corev1.Volume{
+				{
+					Name: spire.WorkloadAPI,
+					VolumeSource: corev1.VolumeSource{
+						CSI: &corev1.CSIVolumeSource{
+							Driver: "csi.spiffe.io",
+						},
+					},
+				}}, []stepForExpectedPod{{
+				name:       "mycontainer",
+				image:      "myimage",
+				stdoutPath: "stdout.txt",
+				cmd:        "/mycmd",
+			}}, true),
+			[]corev1.VolumeMount{
+				{
+					Name:      spire.WorkloadAPI,
+					MountPath: spire.VolumeMountPath,
+				},
+			},
+		),
 	}, {
 		name:    "taskrun-with-output-config-ws",
 		taskRun: taskRunWithOutputConfigAndWorkspace,
@@ -1279,22 +1298,38 @@ spec:
 			"Normal Running Not all Steps",
 		},
 		wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "foo", config.DefaultServiceAccountValue, false,
-			[]corev1.Volume{{
-				Name: "ws-9l9zj",
-				VolumeSource: corev1.VolumeSource{
-					EmptyDir: &corev1.EmptyDirVolumeSource{},
+			[]corev1.Volume{
+				{
+					Name: "ws-9l9zj",
+					VolumeSource: corev1.VolumeSource{
+						EmptyDir: &corev1.EmptyDirVolumeSource{},
+					},
+				}, {
+					Name: spire.WorkloadAPI,
+					VolumeSource: corev1.VolumeSource{
+						CSI: &corev1.CSIVolumeSource{
+							Driver: "csi.spiffe.io",
+						},
+					},
 				},
-			}},
+			},
 			[]stepForExpectedPod{{
 				name:       "mycontainer",
 				image:      "myimage",
 				stdoutPath: "stdout.txt",
 				cmd:        "/mycmd",
-			}}),
-			[]corev1.VolumeMount{{
-				Name:      "ws-9l9zj",
-				MountPath: "/workspace/data",
-			}}),
+			}}, true),
+			[]corev1.VolumeMount{
+				{
+					Name:      "ws-9l9zj",
+					MountPath: "/workspace/data",
+				},
+				{
+					Name:      spire.WorkloadAPI,
+					MountPath: spire.VolumeMountPath,
+				},
+			},
+		),
 	}} {
 		t.Run(tc.name, func(t *testing.T) {
 			testAssets, cancel := getTaskRunController(t, d)
@@ -1355,8 +1390,8 @@ spec:
 }
 
 func addVolumeMounts(p *corev1.Pod, vms []corev1.VolumeMount) *corev1.Pod {
-	for i, vm := range vms {
-		p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vm)
+	for i := range p.Spec.Containers {
+		p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vms...)
 	}
 	return p
 }
@@ -1376,7 +1411,15 @@ spec:
   serviceAccountName: default
 `)
 
+	cms := []*corev1.ConfigMap{{
+		ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()},
+		Data: map[string]string{
+			"enable-api-fields": config.AlphaAPIFields,
+		},
+	}}
+
 	d := test.Data{
+		ConfigMaps: cms,
 		TaskRuns: []*v1beta1.TaskRun{tr},
 		ServiceAccounts: []*corev1.ServiceAccount{{
 			ObjectMeta: metav1.ObjectMeta{Name: tr.Spec.ServiceAccountName, Namespace: "foo"},
@@ -1476,7 +1519,15 @@ spec:
   serviceAccountName: default
 `)
 
+	cms := []*corev1.ConfigMap{{
+		ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()},
+		Data: map[string]string{
+			"enable-api-fields": config.AlphaAPIFields,
+		},
+	}}
+
 	d := test.Data{
+		ConfigMaps: cms,
 		TaskRuns: []*v1beta1.TaskRun{tr},
 		ServiceAccounts: []*corev1.ServiceAccount{{
 			ObjectMeta: metav1.ObjectMeta{Name: tr.Spec.ServiceAccountName, Namespace: "foo"},
@@ -4595,7 +4646,7 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount {
 	return mnts
 }
 
-func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int) []string {
+func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int, alpha bool) []string {
 	args := []string{
 		"-wait_file",
 	}
@@ -4612,6 +4663,9 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []
 		"-step_metadata_dir",
 		fmt.Sprintf("/tekton/run/%d/status", idx),
 	)
+	if alpha {
+		args = append(args, "-enable_spire")
+	}
 	if stdoutPath != "" {
 		args = append(args, "-stdout_path", stdoutPath)
 	}
@@ -4673,11 +4727,23 @@ type stepForExpectedPod struct {
 	stderrPath      string
 }
 
-func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod {
+func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod, alpha bool) *corev1.Pod {
 	stepNames := make([]string, 0, len(steps))
 	for _, s := range steps {
 		stepNames = append(stepNames, fmt.Sprintf("step-%s", s.name))
 	}
+
+	initContainers := []corev1.Container{placeToolsInitContainer(stepNames)}
+	if alpha {
+		for i := range initContainers {
+			c := &initContainers[i]
+			c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
+				Name:      spire.WorkloadAPI,
+				MountPath: spire.VolumeMountPath,
+			})
+		}
+	}
+
 	p := &corev1.Pod{
 		ObjectMeta: podObjectMeta(podName, taskName, taskRunName, ns, isClusterTask),
 		Spec: corev1.PodSpec{
@@ -4689,7 +4755,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas
 				binVolume,
 				downwardVolume,
 			},
-			InitContainers:        []corev1.Container{placeToolsInitContainer(stepNames)},
+			InitContainers:        initContainers,
 			RestartPolicy:         corev1.RestartPolicyNever,
 			ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
 			ServiceAccountName:    saName,
@@ -4710,7 +4776,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas
 			VolumeMounts:           podVolumeMounts(idx, len(steps)),
 			TerminationMessagePath: "/tekton/termination",
 		}
-		stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx)
+		stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx, alpha)
 
 		for k, v := range s.envVars {
 			stepContainer.Env = append(stepContainer.Env, corev1.EnvVar{
diff --git a/test/e2e-common.sh b/test/e2e-common.sh
index 5ef47f77e81..fdbf914df06 100755
--- a/test/e2e-common.sh
+++ b/test/e2e-common.sh
@@ -46,6 +46,65 @@ function install_pipeline_crd_version() {
   verify_pipeline_installation
 }
 
+function spire_apply() {
+  if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then
+    echo "spire_apply requires a spiffeID as the first arg" >&2
+    exit 1
+  fi
+  show=$(kubectl exec -n spire deployment/spire-server -- \
+    /opt/spire/bin/spire-server entry show $1 $2)
+  if [ "$show" != "Found 0 entries" ]; then
+    # delete to recreate
+    entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:)
+    kubectl exec -n spire deployment/spire-server -- \
+      /opt/spire/bin/spire-server entry delete -entryID $entryid
+  fi
+  kubectl exec -n spire deployment/spire-server -- \
+    /opt/spire/bin/spire-server entry create "$@"
+}
+
+function install_spire() {
+  echo ">> Deploying Spire"
+  DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+  echo "Creating SPIRE namespace..."
+  kubectl create ns spire
+
+  echo "Applying SPIFFE CSI Driver configuration..."
+  kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml
+
+  echo "Deploying SPIRE server"
+  kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml
+
+  echo "Deploying SPIRE agent"
+  kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml
+
+  wait_until_pods_running spire || fail_test "SPIRE did not come up"
+
+  spire_apply \
+    -spiffeID spiffe://example.org/ns/spire/node/example \
+    -selector k8s_psat:cluster:example-cluster \
+    -selector k8s_psat:agent_ns:spire \
+    -selector k8s_psat:agent_sa:spire-agent \
+    -node
+  spire_apply \
+    -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \
+    -parentID spiffe://example.org/ns/spire/node/example \
+    -selector k8s:ns:tekton-pipelines \
+    -selector k8s:pod-label:app:tekton-pipelines-controller \
+    -selector k8s:sa:tekton-pipelines-controller \
+    -admin
+}
+
+function patch_pipline_spire() {
+  kubectl patch \
+      deployment tekton-pipelines-controller \
+      -n tekton-pipelines \
+      --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json
+      
+  verify_pipeline_installation
+}
+
 function verify_pipeline_installation() {
   # Make sure that everything is cleaned up in the current namespace.
   delete_pipeline_resources
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index 44c1c2ed94b..f99a518f12c 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -40,6 +40,19 @@ header "Setting up environment"
 install_pipeline_crd
 
 failed=0
+function alpha_gate() {
+  local gate="$1"
+  if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then
+    printf "Invalid gate %s\n" ${gate}
+    exit 255
+  fi
+  if [ "$gate" == "alpha" ] ; then
+    printf "Setting up environement for alpha features"
+    install_spire
+    patch_pipline_spire
+    failed=0
+  fi
+}
 
 function set_feature_gate() {
   local gate="$1"
@@ -91,6 +104,7 @@ function run_e2e() {
   fi
 }
 
+alpha_gate "$PIPELINE_FEATURE_GATE"
 set_feature_gate "$PIPELINE_FEATURE_GATE"
 set_embedded_status "$EMBEDDED_STATUS_GATE"
 run_e2e
diff --git a/test/embed_test.go b/test/embed_test.go
index 4b15b002e67..0d473370d99 100644
--- a/test/embed_test.go
+++ b/test/embed_test.go
@@ -41,10 +41,29 @@ const (
 // TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be
 // executed with an embedded resource spec.
 func TestTaskRun_EmbeddedResource(t *testing.T) {
+	embeddedResourceTest(t, false)
+}
+
+// TestTaskRun_EmbeddedResourceWithSpire is an integration test with spire enabled that will verify a very simple "hello world" TaskRun can be
+// executed with an embedded resource spec.
+func TestTaskRun_EmbeddedResourceWithSpire(t *testing.T) {
+	embeddedResourceTest(t, true)
+}
+
+func embeddedResourceTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	t.Parallel()
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
@@ -68,6 +87,15 @@ func TestTaskRun_EmbeddedResource(t *testing.T) {
 
 	// TODO(#127) Currently we have no reliable access to logs from the TaskRun so we'll assume successful
 	// completion of the TaskRun means the TaskRun did what it was intended.
+
+	if spireEnabled {
+		tr, err := c.TaskRunClient.Get(ctx, embedTaskRunName, metav1.GetOptions{})
+		if err != nil {
+			t.Errorf("Error retrieving taskrun: %s", err)
+		}
+		spireShouldPassTaskRunResultsVerify(tr, t)
+	}
+
 }
 
 func getEmbeddedTask(t *testing.T, taskName, namespace string, args []string) *v1beta1.Task {
diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go
index 16828c1af7c..dbf84606fef 100644
--- a/test/entrypoint_test.go
+++ b/test/entrypoint_test.go
@@ -36,10 +36,31 @@ import (
 // that doesn't have a cmd defined. In addition to making sure the steps
 // are executed in the order specified
 func TestEntrypointRunningStepsInOrder(t *testing.T) {
+	entryPointerTest(t, false)
+}
+
+// TestEntrypointRunningStepsInOrderWithSpire is an integration test with spire enabled that will
+// verify attempt to the get the entrypoint of a container image
+// that doesn't have a cmd defined. In addition to making sure the steps
+// are executed in the order specified
+func TestEntrypointRunningStepsInOrderWithSpire(t *testing.T) {
+	entryPointerTest(t, true)
+}
+
+func entryPointerTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	t.Parallel()
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
@@ -70,4 +91,12 @@ spec:
 		t.Errorf("Error waiting for TaskRun to finish successfully: %s", err)
 	}
 
+	if spireEnabled {
+		tr, err := c.TaskRunClient.Get(ctx, epTaskRunName, metav1.GetOptions{})
+		if err != nil {
+			t.Errorf("Error retrieving taskrun: %s", err)
+		}
+		spireShouldPassTaskRunResultsVerify(tr, t)
+	}
+
 }
diff --git a/test/helm_task_test.go b/test/helm_task_test.go
index 577247a9350..6f46d19d3cd 100644
--- a/test/helm_task_test.go
+++ b/test/helm_task_test.go
@@ -42,11 +42,30 @@ var (
 // TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image
 // and then using helm to deploy it
 func TestHelmDeployPipelineRun(t *testing.T) {
+	helmDeploytest(t, false)
+}
+
+// TestHelmDeployPipelineRunWithSpire is an integration test with spire enabled that will verify a pipeline build an image
+// and then using helm to deploy it
+func TestHelmDeployPipelineRunWithSpire(t *testing.T) {
+	helmDeploytest(t, true)
+}
+
+func helmDeploytest(t *testing.T, spireEnabled bool) {
 	repo := ensureDockerRepo(t)
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	setupClusterBindingForHelm(ctx, c, t, namespace)
 
 	var (
@@ -103,6 +122,16 @@ func TestHelmDeployPipelineRun(t *testing.T) {
 		t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(")
 	}
 
+	if spireEnabled {
+		taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + helmDeployPipelineRunName})
+		if err != nil {
+			t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", helmDeployPipelineRunName, err)
+		}
+		for _, taskrunItem := range taskrunList.Items {
+			spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+		}
+	}
+
 	// cleanup task to remove helm releases from cluster and cluster role bindings, will not fail the test if it fails, just log
 	knativetest.CleanupOnInterrupt(func() { helmCleanup(ctx, c, t, namespace) }, t.Logf)
 	defer helmCleanup(ctx, c, t, namespace)
diff --git a/test/hermetic_taskrun_test.go b/test/hermetic_taskrun_test.go
index 5a861053bfb..79727b48931 100644
--- a/test/hermetic_taskrun_test.go
+++ b/test/hermetic_taskrun_test.go
@@ -34,11 +34,30 @@ import (
 // it does this by first running the TaskRun normally to make sure it passes
 // Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network.
 func TestHermeticTaskRun(t *testing.T) {
+	hermeticTest(t, false)
+}
+
+// TestHermeticTaskRunWithSpire (with spire enabled) make sure that the hermetic execution mode actually drops network from a TaskRun step
+// it does this by first running the TaskRun normally to make sure it passes
+// Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network.
+func TestHermeticTaskRunWithSpire(t *testing.T) {
+	hermeticTest(t, true)
+}
+
+func hermeticTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
-	c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"}))
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"}))
+	}
+
 	t.Parallel()
 	defer tearDown(ctx, t, c, namespace)
 
@@ -67,6 +86,13 @@ func TestHermeticTaskRun(t *testing.T) {
 			if err := WaitForTaskRunState(ctx, c, regularTaskRunName, Succeed(regularTaskRunName), "TaskRunCompleted"); err != nil {
 				t.Errorf("Error waiting for TaskRun %s to finish: %s", regularTaskRunName, err)
 			}
+			if spireEnabled {
+				tr, err := c.TaskRunClient.Get(ctx, regularTaskRunName, metav1.GetOptions{})
+				if err != nil {
+					t.Errorf("Error retrieving taskrun: %s", err)
+				}
+				spireShouldPassTaskRunResultsVerify(tr, t)
+			}
 
 			// now, run the task mode with hermetic mode
 			// it should fail, since it shouldn't be able to access any network
@@ -79,6 +105,13 @@ func TestHermeticTaskRun(t *testing.T) {
 			if err := WaitForTaskRunState(ctx, c, hermeticTaskRunName, Failed(hermeticTaskRunName), "Failed"); err != nil {
 				t.Errorf("Error waiting for TaskRun %s to fail: %s", hermeticTaskRunName, err)
 			}
+			if spireEnabled {
+				tr, err := c.TaskRunClient.Get(ctx, hermeticTaskRunName, metav1.GetOptions{})
+				if err != nil {
+					t.Errorf("Error retrieving taskrun: %s", err)
+				}
+				spireShouldFailTaskRunResultsVerify(tr, t)
+			}
 		})
 	}
 }
diff --git a/test/ignore_step_error_test.go b/test/ignore_step_error_test.go
index bc77fdcc8f0..b6522f0735e 100644
--- a/test/ignore_step_error_test.go
+++ b/test/ignore_step_error_test.go
@@ -33,10 +33,27 @@ import (
 )
 
 func TestMissingResultWhenStepErrorIsIgnored(t *testing.T) {
+	stepErrorTest(t, false)
+}
+
+func TestMissingResultWhenStepErrorIsIgnoredWithSpire(t *testing.T) {
+	stepErrorTest(t, true)
+}
+
+func stepErrorTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
 
@@ -99,6 +116,10 @@ spec:
 		t.Fatalf("task1 should have produced a result before failing the step")
 	}
 
+	if spireEnabled {
+		spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+	}
+
 	for _, r := range taskrunItem.Status.TaskRunResults {
 		if r.Name == "result1" && r.Value.StringVal != "123" {
 			t.Fatalf("task1 should have initialized a result \"result1\" to \"123\"")
diff --git a/test/init_test.go b/test/init_test.go
index 4158ac8f622..6094905a788 100644
--- a/test/init_test.go
+++ b/test/init_test.go
@@ -46,6 +46,11 @@ import (
 	"sigs.k8s.io/yaml"
 )
 
+var spireFeatureGates = map[string]string{
+	"enable-spire":      "true",
+	"enable-api-fields": "alpha",
+}
+
 var initMetrics sync.Once
 var skipRootUserTests = false
 
@@ -274,3 +279,19 @@ func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) {
 
 	return output, nil
 }
+
+// Verifies if the taskrun results should not be verified by spire
+func spireShouldFailTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) {
+	if tr.IsTaskRunResultVerified() {
+		t.Errorf("Taskrun `%s` status condition should not be verified as taskrun failed", tr.Name)
+	}
+	t.Logf("Taskrun `%s` status results condition verified by spire as false, which is valid", tr.Name)
+}
+
+// Verifies if the taskrun results are verified by spire
+func spireShouldPassTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) {
+	if !tr.IsTaskRunResultVerified() {
+		t.Errorf("Taskrun `%s` status condition not verified. Spire taskrun results verification failure", tr.Name)
+	}
+	t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name)
+}
diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go
index 4062e88eb9f..45e152c6d06 100644
--- a/test/kaniko_task_test.go
+++ b/test/kaniko_task_test.go
@@ -42,6 +42,15 @@ const (
 
 // TestTaskRun is an integration test that will verify a TaskRun using kaniko
 func TestKanikoTaskRun(t *testing.T) {
+	kanikoTest(t, false)
+}
+
+// TestKanikoTaskRunWithSpire is an integration test that will verify a TaskRun using kaniko with Spire enabled
+func TestKanikoTaskRunWithSpire(t *testing.T) {
+	kanikoTest(t, true)
+}
+
+func kanikoTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
@@ -50,7 +59,15 @@ func TestKanikoTaskRun(t *testing.T) {
 		t.Skip("Skip test as skipRootUserTests set to true")
 	}
 
-	c, namespace := setup(ctx, t, withRegistry)
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, withRegistry, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t, withRegistry)
+	}
+
 	t.Parallel()
 
 	repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace)
@@ -123,6 +140,10 @@ func TestKanikoTaskRun(t *testing.T) {
 		t.Fatalf("Expected remote commit to match local revision: %s, %s", commit, revision)
 	}
 
+	if spireEnabled {
+		spireShouldPassTaskRunResultsVerify(tr, t)
+	}
+
 	// match the local digest, which is first capture group against the remote image
 	remoteDigest, err := getRemoteDigest(t, c, namespace, repo)
 	if err != nil {
diff --git a/test/pipelinefinally_test.go b/test/pipelinefinally_test.go
index c5afe638c09..c7867660ec3 100644
--- a/test/pipelinefinally_test.go
+++ b/test/pipelinefinally_test.go
@@ -44,10 +44,27 @@ var requireAlphaFeatureFlags = requireAnyGate(map[string]string{
 })
 
 func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_Failure(t *testing.T) {
+	pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, false)
+}
+
+func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_FailureWithSpire(t *testing.T) {
+	pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, true)
+}
+
+func pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
 
@@ -260,27 +277,46 @@ spec:
 			if !isFailed(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("dag task %s should have failed", n)
 			}
+			if spireEnabled {
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			dagTask1EndTime = taskrunItem.Status.CompletionTime
 		case n == "dagtask2":
 			if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil {
 				t.Errorf("Error waiting for TaskRun to succeed: %v", err)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 			dagTask2EndTime = taskrunItem.Status.CompletionTime
 		case n == "dagtask4":
+			if spireEnabled {
+				// Skipped so status annotations should not be there. Results should not be verified as not run
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			t.Fatalf("task %s should have skipped due to when expression", n)
 		case n == "dagtask5":
 			if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil {
 				t.Errorf("Error waiting for TaskRun to succeed: %v", err)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case n == "finaltask1":
 			if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil {
 				t.Errorf("Error waiting for TaskRun to succeed: %v", err)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 			finalTaskStartTime = taskrunItem.Status.StartTime
 		case n == "finaltask2":
 			if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil {
 				t.Errorf("Error waiting for TaskRun to succeed: %v", err)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 			for _, p := range taskrunItem.Spec.Params {
 				switch param := p.Name; param {
 				case "dagtask1-status":
@@ -306,6 +342,9 @@ spec:
 			if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil {
 				t.Errorf("Error waiting for TaskRun to succeed: %v", err)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 			for _, p := range taskrunItem.Spec.Params {
 				if p.Name == "dagtask-result" && p.Value.StringVal != "Hello" {
 					t.Errorf("Error resolving task result reference in a finally task %s", n)
@@ -315,13 +354,27 @@ spec:
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("final task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case n == "guardedfinaltaskusingdagtask5status1":
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("final task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case n == "guardedfinaltaskusingdagtask5result2":
+			if spireEnabled {
+				// Skipped so status annotations should not be there. Results should not be verified as not run
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			t.Fatalf("final task %s should have skipped due to when expression evaluating to false", n)
 		case n == "finaltaskconsumingdagtask1" || n == "finaltaskconsumingdagtask4" || n == "guardedfinaltaskconsumingdagtask4":
+			if spireEnabled {
+				// Skipped so status annotations should not be there. Results should not be verified as not run
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			t.Fatalf("final task %s should have skipped due to missing task result reference", n)
 		default:
 			t.Fatalf("Found unexpected taskRun %s", n)
@@ -394,10 +447,27 @@ spec:
 }
 
 func TestPipelineLevelFinally_OneFinalTaskFailed_Failure(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, false)
+}
+
+func TestPipelineLevelFinally_OneFinalTaskFailed_FailureWithSpire(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, true)
+}
+
+func pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
 
@@ -451,10 +521,16 @@ spec:
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("dag task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case n == "finaltask1":
 			if !isFailed(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("final task %s should have failed", n)
 			}
+			if spireEnabled {
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 		default:
 			t.Fatalf("TaskRuns were not found for both final and dag tasks")
 		}
@@ -462,10 +538,27 @@ spec:
 }
 
 func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinally(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, false)
+}
+
+func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinallyWithSpire(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, true)
+}
+
+func pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t, requireAlphaFeatureFlags)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t, requireAlphaFeatureFlags)
+	}
+
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
 
@@ -562,13 +655,25 @@ spec:
 			if !isCancelled(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("dag task %s should have been cancelled", n)
 			}
+			if spireEnabled {
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case "dagtask2":
+			if spireEnabled {
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			t.Fatalf("second dag task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n)
 		case "finaltask1":
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("first final task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case "finaltask2":
+			if spireEnabled {
+				spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+			}
 			t.Fatalf("second final task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n)
 		default:
 			t.Fatalf("TaskRuns were not found for both final and dag tasks")
@@ -577,10 +682,27 @@ spec:
 }
 
 func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinally(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, false)
+}
+
+func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinallyWithSpire(t *testing.T) {
+	pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, true)
+}
+
+func pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t, requireAlphaFeatureFlags)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t, requireAlphaFeatureFlags)
+	}
+
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
 
@@ -677,14 +799,23 @@ spec:
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("dag task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case "finaltask1":
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("first final task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		case "finaltask2":
 			if !isSuccessful(t, n, taskrunItem.Status.Conditions) {
 				t.Fatalf("second final task %s should have succeeded", n)
 			}
+			if spireEnabled {
+				spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+			}
 		default:
 			t.Fatalf("TaskRuns were not found for both final and dag tasks")
 		}
diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go
index 6f11fd59413..2f59d10a450 100644
--- a/test/pipelinerun_test.go
+++ b/test/pipelinerun_test.go
@@ -176,6 +176,15 @@ spec:
 }
 
 func TestPipelineRun(t *testing.T) {
+	pipelineTestWithOptions(t, false)
+}
+
+// Used different function name as helpers.ObjectNameForTest(t) would run into an issue with the number of characters exceeding the limit causing it to crash
+func TestWithSpirePR(t *testing.T) {
+	pipelineTestWithOptions(t, true)
+}
+
+func pipelineTestWithOptions(t *testing.T, spireEnabled bool) {
 	t.Parallel()
 	type tests struct {
 		name                   string
@@ -315,7 +324,15 @@ spec:
 			ctx := context.Background()
 			ctx, cancel := context.WithCancel(ctx)
 			defer cancel()
-			c, namespace := setup(ctx, t)
+
+			var c *clients
+			var namespace string
+
+			if spireEnabled {
+				c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+			} else {
+				c, namespace = setup(ctx, t)
+			}
 
 			knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 			defer tearDown(ctx, t, c, namespace)
@@ -347,6 +364,9 @@ spec:
 					if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) {
 						taskRunName = actualTaskRunItem.Name
 					}
+					if spireEnabled {
+						spireShouldPassTaskRunResultsVerify(&actualTaskRunItem, t)
+					}
 				}
 				expectedTaskRunNames = append(expectedTaskRunNames, taskRunName)
 				r, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
@@ -448,10 +468,28 @@ spec:
 // TestPipelineRunRefDeleted tests that a running PipelineRun doesn't fail when the Pipeline
 // it references is deleted.
 func TestPipelineRunRefDeleted(t *testing.T) {
+	pipelineRunRefDeletedTestWithOptions(t, false)
+}
+
+// TestPipelineRunRefDeletedWithSpire tests (with spire enabled) that a running PipelineRun doesn't fail when the Pipeline
+// it references is deleted.
+func TestPipelineRunRefDeletedWithSpire(t *testing.T) {
+	pipelineRunRefDeletedTestWithOptions(t, true)
+}
+
+func pipelineRunRefDeletedTestWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
@@ -515,6 +553,16 @@ spec:
 		t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err)
 	}
 
+	if spireEnabled {
+		taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName})
+		if err != nil {
+			t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err)
+		}
+		for _, taskrunItem := range taskrunList.Items {
+			spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+		}
+	}
+
 }
 
 // TestPipelineRunPending tests that a Pending PipelineRun is not run until the pending
@@ -522,10 +570,30 @@ spec:
 // transition PipelineRun states during the test, which the TestPipelineRun suite does not
 // support.
 func TestPipelineRunPending(t *testing.T) {
+	pipelineRunPendingTestWithOptions(t, false)
+}
+
+// TestPipelineRunPendingWithSpire tests (with spire) that a Pending PipelineRun is not run until the pending
+// status is cleared. This is separate from the TestPipelineRun suite because it has to
+// transition PipelineRun states during the test, which the TestPipelineRun suite does not
+// support.
+func TestPipelineRunPendingWithSpire(t *testing.T) {
+	pipelineRunPendingTestWithOptions(t, true)
+}
+
+func pipelineRunPendingTestWithOptions(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
 	defer tearDown(ctx, t, c, namespace)
@@ -601,6 +669,15 @@ spec:
 	if err := WaitForPipelineRunState(ctx, c, prName, timeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil {
 		t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err)
 	}
+	if spireEnabled {
+		taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName})
+		if err != nil {
+			t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err)
+		}
+		for _, taskrunItem := range taskrunList.Items {
+			spireShouldPassTaskRunResultsVerify(&taskrunItem, t)
+		}
+	}
 }
 
 func getFanInFanOutTasks(t *testing.T, namespace string) map[string]*v1beta1.Task {
diff --git a/test/status_test.go b/test/status_test.go
index 551dff1d858..d53d2a40c33 100644
--- a/test/status_test.go
+++ b/test/status_test.go
@@ -35,10 +35,30 @@ import (
 // verify a very simple "hello world" TaskRun and PipelineRun failure
 // execution lead to the correct TaskRun status.
 func TestTaskRunPipelineRunStatus(t *testing.T) {
+	taskRunPipelineRunStatus(t, false)
+}
+
+// TestTaskRunPipelineRunStatusWithSpire is an integration test with spire enabled that will
+// verify a very simple "hello world" TaskRun and PipelineRun failure
+// execution lead to the correct TaskRun status.
+func TestTaskRunPipelineRunStatusWithSpire(t *testing.T) {
+	taskRunPipelineRunStatus(t, true)
+}
+
+func taskRunPipelineRunStatus(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	t.Parallel()
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
@@ -72,6 +92,14 @@ spec:
 		t.Errorf("Error waiting for TaskRun to finish: %s", err)
 	}
 
+	if spireEnabled {
+		tr, err := c.TaskRunClient.Get(ctx, taskRun.Name, metav1.GetOptions{})
+		if err != nil {
+			t.Errorf("Error retrieving taskrun: %s", err)
+		}
+		spireShouldFailTaskRunResultsVerify(tr, t)
+	}
+
 	pipeline := parse.MustParsePipeline(t, fmt.Sprintf(`
 metadata:
   name: %s
@@ -98,4 +126,15 @@ spec:
 	if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, PipelineRunFailed(pipelineRun.Name), "BuildValidationFailed"); err != nil {
 		t.Errorf("Error waiting for TaskRun to finish: %s", err)
 	}
+
+	if spireEnabled {
+		taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name})
+		if err != nil {
+			t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err)
+		}
+		for _, taskrunItem := range taskrunList.Items {
+			spireShouldFailTaskRunResultsVerify(&taskrunItem, t)
+		}
+	}
+
 }
diff --git a/test/taskrun_test.go b/test/taskrun_test.go
index 45b65648308..9c9d4bd9e71 100644
--- a/test/taskrun_test.go
+++ b/test/taskrun_test.go
@@ -39,11 +39,27 @@ import (
 )
 
 func TestTaskRunFailure(t *testing.T) {
+	taskrunFailureTest(t, false)
+}
+
+func TestTaskRunFailureWithSpire(t *testing.T) {
+	taskrunFailureTest(t, true)
+}
+
+func taskrunFailureTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
-	c, namespace := setup(ctx, t)
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	t.Parallel()
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
@@ -93,6 +109,10 @@ spec:
 		t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
 	}
 
+	if spireEnabled {
+		spireShouldFailTaskRunResultsVerify(taskrun, t)
+	}
+
 	expectedStepState := []v1beta1.StepState{{
 		ContainerState: corev1.ContainerState{
 			Terminated: &corev1.ContainerStateTerminated{
@@ -136,10 +156,27 @@ spec:
 }
 
 func TestTaskRunStatus(t *testing.T) {
+	taskrunStatusTest(t, false)
+}
+
+func TestTaskRunStatusWithSpire(t *testing.T) {
+	taskrunStatusTest(t, true)
+}
+
+func taskrunStatusTest(t *testing.T, spireEnabled bool) {
 	ctx := context.Background()
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
-	c, namespace := setup(ctx, t)
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
 	t.Parallel()
 
 	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
@@ -185,6 +222,10 @@ spec:
 		t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
 	}
 
+	if spireEnabled {
+		spireShouldPassTaskRunResultsVerify(taskrun, t)
+	}
+
 	expectedStepState := []v1beta1.StepState{{
 		ContainerState: corev1.ContainerState{
 			Terminated: &corev1.ContainerStateTerminated{
@@ -210,3 +251,113 @@ spec:
 		t.Fatalf("-got, +want: %v", d)
 	}
 }
+
+func TestTaskRunModification(t *testing.T) {
+	taskrunModificationTest(t, false)
+}
+
+func TestTaskRunModificationWithSpire(t *testing.T) {
+	taskrunModificationTest(t, true)
+}
+
+func taskrunModificationTest(t *testing.T, spireEnabled bool) {
+	ctx := context.Background()
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	var c *clients
+	var namespace string
+
+	if spireEnabled {
+		c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates))
+	} else {
+		c, namespace = setup(ctx, t)
+	}
+
+	knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
+	defer tearDown(ctx, t, c, namespace)
+
+	taskRunName := "non-falsifiable-provenance"
+
+	t.Logf("Creating Task and TaskRun in namespace %s", namespace)
+	task := parse.MustParseTask(t, fmt.Sprintf(`
+metadata:
+  name: non-falsifiable
+  namespace: %s
+spec:
+  steps:
+  - image: ubuntu
+    script: |
+      #!/usr/bin/env bash
+      sleep 20
+      printf "hello" > "$(results.foo.path)"
+       printf "world" > "$(results.bar.path)"
+    results:
+    - name: foo
+    - name: bar
+`, namespace))
+	if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
+		t.Fatalf("Failed to create Task: %s", err)
+	}
+	taskRun := parse.MustParseTaskRun(t, fmt.Sprintf(`
+metadata:
+  name: %s
+  namespace: %s
+spec:
+  taskRef:
+    name: non-falsifiable
+`, taskRunName, namespace))
+	if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
+		t.Fatalf("Failed to create TaskRun: %s", err)
+	}
+
+	t.Logf("Waiting for TaskRun in namespace %s to be in running state", namespace)
+	if err := WaitForTaskRunState(ctx, c, taskRunName, Running(taskRunName), "TaskRunRunning"); err != nil {
+		t.Errorf("Error waiting for TaskRun to start running: %s", err)
+	}
+
+	patches := []jsonpatch.JsonPatchOperation{{
+		Operation: "replace",
+		Path:      "/status/taskSpec/steps/0/image",
+		Value:     "not-ubuntu",
+	}}
+	patchBytes, err := json.Marshal(patches)
+	if err != nil {
+		t.Fatalf("failed to marshal patch bytes in order to stop")
+	}
+	t.Logf("Patching TaskRun %s in namespace %s mid run for spire to catch the un-authorized changed", taskRunName, namespace)
+	if _, err := c.TaskRunClient.Patch(ctx, taskRunName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil {
+		t.Fatalf("Failed to patch taskrun `%s`: %s", taskRunName, err)
+	}
+
+	t.Logf("Waiting for TaskRun %s in namespace %s to succeed", taskRunName, namespace)
+	if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil {
+		t.Errorf("Error waiting for TaskRun to finish: %s", err)
+	}
+
+	taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
+	if err != nil {
+		t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
+	}
+
+	if spireEnabled {
+		spireShouldFailTaskRunResultsVerify(taskrun, t)
+	}
+
+	expectedStepState := []v1beta1.StepState{{
+		ContainerState: corev1.ContainerState{
+			Terminated: &corev1.ContainerStateTerminated{
+				ExitCode: 1,
+				Reason:   "Error",
+			},
+		},
+		Name:          "unnamed-0",
+		ContainerName: "step-unnamed-0",
+	}}
+
+	ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID")
+	ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID")
+	if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" {
+		t.Fatalf("-got, +want: %v", d)
+	}
+}
diff --git a/test/testdata/patch/pipeline-controller-spire.json b/test/testdata/patch/pipeline-controller-spire.json
new file mode 100644
index 00000000000..c137f675cb0
--- /dev/null
+++ b/test/testdata/patch/pipeline-controller-spire.json
@@ -0,0 +1,55 @@
+{
+   "spec":{
+      "template":{
+         "spec":{
+            "$setElementOrder/containers":[
+               {
+                  "name":"tekton-pipelines-controller"
+               }
+            ],
+            "$setElementOrder/volumes":[
+               {
+                  "name":"config-logging"
+               },
+               {
+                  "name":"config-registry-cert"
+               },
+               {
+                  "name":"spiffe-workload-api"
+               }
+            ],
+            "containers":[
+               {
+                  "$setElementOrder/volumeMounts":[
+                     {
+                        "mountPath":"/etc/config-logging"
+                     },
+                     {
+                        "mountPath":"/etc/config-registry-cert"
+                     },
+                     {
+                        "mountPath":"/spiffe-workload-api"
+                     }
+                  ],
+                  "name":"tekton-pipelines-controller",
+                  "volumeMounts":[
+                     {
+                        "mountPath":"/spiffe-workload-api",
+                        "name":"spiffe-workload-api",
+                        "readOnly":true
+                     }
+                  ]
+               }
+            ],
+            "volumes":[
+               {
+                  "csi":{
+                     "driver":"csi.spiffe.io"
+                  },
+                  "name":"spiffe-workload-api"
+               }
+            ]
+         }
+      }
+   }
+}
diff --git a/test/testdata/spire/spiffe-csi-driver.yaml b/test/testdata/spire/spiffe-csi-driver.yaml
new file mode 100644
index 00000000000..e9d07bc5683
--- /dev/null
+++ b/test/testdata/spire/spiffe-csi-driver.yaml
@@ -0,0 +1,20 @@
+apiVersion: storage.k8s.io/v1
+kind: CSIDriver
+metadata:
+  name: "csi.spiffe.io"
+spec:
+  # Only ephemeral, inline volumes are supported. There is no need for a
+  # controller to provision and attach volumes.
+  attachRequired: false
+
+  # Request the pod information which the CSI driver uses to verify that an
+  # ephemeral mount was requested.
+  podInfoOnMount: true
+
+  # Don't change ownership on the contents of the mount since the Workload API
+  # Unix Domain Socket is typically open to all (i.e. 0777).
+  fsGroupPolicy: None
+
+  # Declare support for ephemeral volumes only.
+  volumeLifecycleModes:
+    - Ephemeral
diff --git a/test/testdata/spire/spire-agent.yaml b/test/testdata/spire/spire-agent.yaml
new file mode 100644
index 00000000000..4e848a51388
--- /dev/null
+++ b/test/testdata/spire/spire-agent.yaml
@@ -0,0 +1,208 @@
+# ServiceAccount for the SPIRE agent
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: spire-agent
+  namespace: spire
+
+---
+
+# Required cluster role to allow spire-agent to query k8s API server
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: spire-agent-cluster-role
+rules:
+- apiGroups: [""]
+  resources: ["pods", "nodes", "nodes/proxy"]
+  verbs: ["get"]
+
+---
+
+# Binds above cluster role to spire-agent service account
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: spire-agent-cluster-role-binding
+subjects:
+- kind: ServiceAccount
+  name: spire-agent
+  namespace: spire
+roleRef:
+  kind: ClusterRole
+  name: spire-agent-cluster-role
+  apiGroup: rbac.authorization.k8s.io
+
+
+---
+
+# ConfigMap for the SPIRE agent featuring:
+# 1) PSAT node attestation
+# 2) K8S Workload Attestation over the secure kubelet port
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: spire-agent
+  namespace: spire
+data:
+  agent.conf: |
+    agent {
+      data_dir = "/run/spire"
+      log_level = "DEBUG"
+      server_address = "spire-server"
+      server_port = "8081"
+      socket_path = "/run/spire/sockets/spire-agent.sock"
+      trust_bundle_path = "/run/spire/bundle/bundle.crt"
+      trust_domain = "example.org"
+    }
+
+    plugins {
+      NodeAttestor "k8s_psat" {
+        plugin_data {
+          cluster = "example-cluster"
+        }
+      }
+
+      KeyManager "memory" {
+        plugin_data {
+        }
+      }
+
+      WorkloadAttestor "k8s" {
+        plugin_data {
+          skip_kubelet_verification = true
+        }
+      }
+    }
+
+---
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: spire-agent
+  namespace: spire
+  labels:
+    app: spire-agent
+spec:
+  selector:
+    matchLabels:
+      app: spire-agent
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      namespace: spire
+      labels:
+        app: spire-agent
+    spec:
+      hostPID: true
+      hostNetwork: true
+      dnsPolicy: ClusterFirstWithHostNet
+      serviceAccountName: spire-agent
+      containers:
+        - name: spire-agent
+          image: ghcr.io/spiffe/spire-agent:1.1.1
+          imagePullPolicy: IfNotPresent
+          args: ["-config", "/run/spire/config/agent.conf"]
+          volumeMounts:
+            - name: spire-config
+              mountPath: /run/spire/config
+              readOnly: true
+            - name: spire-bundle
+              mountPath: /run/spire/bundle
+              readOnly: true
+            - name: spire-token
+              mountPath: /var/run/secrets/tokens
+            - name: spire-agent-socket-dir
+              mountPath: /run/spire/sockets
+        # This is the container which runs the SPIFFE CSI driver.
+        - name: spiffe-csi-driver
+          image: ghcr.io/spiffe/spiffe-csi-driver:nightly
+          imagePullPolicy: IfNotPresent
+          args: [
+            "-workload-api-socket-dir", "/spire-agent-socket",
+            "-csi-socket-path", "/spiffe-csi/csi.sock",
+          ]
+          env:
+            # The CSI driver needs a unique node ID. The node name can be
+            # used for this purpose.
+            - name: MY_NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+          volumeMounts:
+            # The volume containing the SPIRE agent socket. The SPIFFE CSI
+            # driver will mount this directory into containers.
+            - mountPath: /spire-agent-socket
+              name: spire-agent-socket-dir
+              readOnly: true
+            # The volume that will contain the CSI driver socket shared
+            # with the kubelet and the driver registrar.
+            - mountPath: /spiffe-csi
+              name: spiffe-csi-socket-dir
+            # The volume containing mount points for containers.
+            - mountPath: /var/lib/kubelet/pods
+              mountPropagation: Bidirectional
+              name: mountpoint-dir
+          securityContext:
+            privileged: true
+        # This container runs the CSI Node Driver Registrar which takes care
+        # of all the little details required to register a CSI driver with
+        # the kubelet.
+        - name: node-driver-registrar
+          image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1
+          imagePullPolicy: IfNotPresent
+          args: [
+            "-csi-address", "/spiffe-csi/csi.sock",
+            "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock",
+          ]
+          volumeMounts:
+            # The registrar needs access to the SPIFFE CSI driver socket
+            - mountPath: /spiffe-csi
+              name: spiffe-csi-socket-dir
+            # The registrar needs access to the Kubelet plugin registration
+            # directory
+            - name: kubelet-plugin-registration-dir
+              mountPath: /registration
+      volumes:
+        - name: spire-config
+          configMap:
+            name: spire-agent
+        - name: spire-bundle
+          configMap:
+            name: spire-bundle
+        - name: spire-token
+          projected:
+            sources:
+            - serviceAccountToken:
+                path: spire-agent
+                expirationSeconds: 7200
+                audience: spire-server
+        # This volume is used to share the Workload API socket between the CSI
+        # driver and SPIRE agent. Note, an emptyDir volume could also be used,
+        # however, this can lead to broken bind mounts in the workload
+        # containers if the agent pod is restarted (since the emptyDir
+        # directory on the node that was mounted into workload containers by
+        # the CSI driver belongs to the old pod instance and is no longer
+        # valid).
+        - name: spire-agent-socket-dir
+          hostPath:
+            path: /run/spire/agent-sockets
+            type: DirectoryOrCreate
+        # This volume is where the socket for kubelet->driver communication lives
+        - name: spiffe-csi-socket-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins/csi.spiffe.io
+            type: DirectoryOrCreate
+        # This volume is where the SPIFFE CSI driver mounts volumes
+        - name: mountpoint-dir
+          hostPath:
+            path: /var/lib/kubelet/pods
+            type: Directory
+        # This volume is where the node-driver-registrar registers the plugin
+        # with kubelet
+        - name: kubelet-plugin-registration-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins_registry
+            type: Directory
diff --git a/test/testdata/spire/spire-server.yaml b/test/testdata/spire/spire-server.yaml
new file mode 100644
index 00000000000..ceec824613d
--- /dev/null
+++ b/test/testdata/spire/spire-server.yaml
@@ -0,0 +1,211 @@
+# ServiceAccount used by the SPIRE server.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: spire-server
+  namespace: spire
+
+---
+
+# Required cluster role to allow spire-server to query k8s API server
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: spire-server-cluster-role
+rules:
+- apiGroups: [""]
+  resources: ["nodes"]
+  verbs: ["get"]
+  # allow TokenReview requests (to verify service account tokens for PSAT
+  # attestation)
+- apiGroups: ["authentication.k8s.io"]
+  resources: ["tokenreviews"]
+  verbs: ["get", "create"]
+
+---
+
+# Binds above cluster role to spire-server service account
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: spire-server-cluster-role-binding
+  namespace: spire
+subjects:
+- kind: ServiceAccount
+  name: spire-server
+  namespace: spire
+roleRef:
+  kind: ClusterRole
+  name: spire-server-cluster-role
+  apiGroup: rbac.authorization.k8s.io
+
+---
+
+# Role for the SPIRE server
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  namespace: spire
+  name: spire-server-role
+rules:
+  # allow "get" access to pods (to resolve selectors for PSAT attestation)
+- apiGroups: [""]
+  resources: ["pods"]
+  verbs: ["get"]
+  # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE
+  # agent bootstrapping, see the spire-bundle ConfigMap below)
+- apiGroups: [""]
+  resources: ["configmaps"]
+  resourceNames: ["spire-bundle"]
+  verbs: ["get", "patch"]
+
+---
+
+# RoleBinding granting the spire-server-role to the SPIRE server
+# service account.
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: spire-server-role-binding
+  namespace: spire
+subjects:
+- kind: ServiceAccount
+  name: spire-server
+  namespace: spire
+roleRef:
+  kind: Role
+  name: spire-server-role
+  apiGroup: rbac.authorization.k8s.io
+
+---
+
+# ConfigMap containing the latest trust bundle for the trust domain. It is
+# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount
+# this config map and use the certificate to bootstrap trust with the SPIRE
+# server during attestation.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: spire-bundle
+  namespace: spire
+
+---
+
+# ConfigMap containing the SPIRE server configuration.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: spire-server
+  namespace: spire
+data:
+  server.conf: |
+    server {
+      bind_address = "0.0.0.0"
+      bind_port = "8081"
+      trust_domain = "example.org"
+      data_dir = "/run/spire/data"
+      log_level = "DEBUG"
+      default_svid_ttl = "1h"
+      ca_ttl = "12h"
+      ca_subject {
+        country = ["US"]
+        organization = ["SPIFFE"]
+        common_name = ""
+      }
+    }
+
+    plugins {
+      DataStore "sql" {
+        plugin_data {
+          database_type = "sqlite3"
+          connection_string = "/run/spire/data/datastore.sqlite3"
+        }
+      }
+
+      NodeAttestor "k8s_psat" {
+        plugin_data {
+          clusters = {
+            "example-cluster" = {
+              service_account_allow_list = ["spire:spire-agent"]
+            }
+          }
+        }
+      }
+
+      KeyManager "disk" {
+        plugin_data {
+          keys_path = "/run/spire/data/keys.json"
+        }
+      }
+
+      Notifier "k8sbundle" {
+        plugin_data {
+          # This plugin updates the bundle.crt value in the spire:spire-bundle
+          # ConfigMap by default, so no additional configuration is necessary.
+        }
+      }
+    }
+
+    health_checks {
+      listener_enabled = true
+      bind_address = "0.0.0.0"
+      bind_port = "8080"
+      live_path = "/live"
+      ready_path = "/ready"
+    }
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: spire-server
+  namespace: spire
+  labels:
+    app: spire-server
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: spire-server
+  template:
+    metadata:
+      namespace: spire
+      labels:
+        app: spire-server
+    spec:
+      serviceAccountName: spire-server
+      shareProcessNamespace: true
+      containers:
+        - name: spire-server
+          image: ghcr.io/spiffe/spire-server:1.1.1
+          imagePullPolicy: IfNotPresent
+          args: ["-config", "/run/spire/config/server.conf"]
+          ports:
+            - containerPort: 8081
+          volumeMounts:
+            - name: spire-config
+              mountPath: /run/spire/config
+              readOnly: true
+      volumes:
+        - name: spire-config
+          configMap:
+            name: spire-server
+
+---
+
+# Service definition for SPIRE server defining the gRPC port.
+apiVersion: v1
+kind: Service
+metadata:
+  name: spire-server
+  namespace: spire
+spec:
+  type: NodePort
+  ports:
+    - name: grpc
+      port: 8081
+      targetPort: 8081
+      protocol: TCP
+  selector:
+    app: spire-server