diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b52d3e77e7e..7b870428648 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -39,19 +39,19 @@ Examples of user facing changes: For pull requests with a release note: - ```release-note - Your release note here - ``` +```release-note +Your release note here +``` For pull requests that require additional action from users switching to the new release, include the string "action required" (case insensitive) in the release note: - ```release-note - action required: your release note here - ``` +```release-note +action required: your release note here +``` For pull requests that don't need to be mentioned at release time, use the `/release-note-none` Prow command to add the `release-note-none` label to the PR. You can also write the string "NONE" as a release note in your PR description: - ```release-note - NONE - ``` +```release-note +NONE +``` --> diff --git a/.ko.yaml b/.ko.yaml index a922d22cdc0..7dadee87eec 100644 --- a/.ko.yaml +++ b/.ko.yaml @@ -4,11 +4,9 @@ baseImageOverrides: # They are produced from ./images/Dockerfile github.com/tektoncd/pipeline/cmd/creds-init: gcr.io/tekton-nightly/github.com/tektoncd/pipeline/build-base:latest github.com/tektoncd/pipeline/cmd/git-init: gcr.io/tekton-nightly/github.com/tektoncd/pipeline/build-base:latest + # GCS fetcher needs root due to workspace permissions github.com/tektoncd/pipeline/vendor/github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/cmd/gcs-fetcher: gcr.io/distroless/static:latest # PullRequest resource needs root because in output mode it needs to access pr.json # which might have been copied or written with any level of permissions. github.com/tektoncd/pipeline/cmd/pullrequest-init: gcr.io/distroless/static:latest - - # Our entrypoint image does not need root, it simply needs to be able to 'cp' the binary into a shared location. - github.com/tektoncd/pipeline/cmd/entrypoint: gcr.io/distroless/base:debug-nonroot diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 588a11ec7d7..33bef6bbb4c 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -6,9 +6,8 @@ 1. Create [a GitHub account](https://github.com/join) 1. Setup [GitHub access via SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) +1. Set up your [development environment](#environment-setup) 1. [Create and checkout a repo fork](#checkout-your-fork) -1. Set up your [shell environment](#environment-setup) -1. Install [requirements](#requirements) 1. [Set up a Kubernetes cluster](#kubernetes-cluster) 1. [Configure kubectl to use your cluster](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 1. [Set up a docker repository you can push to](https://github.com/knative/serving/blob/4a8c859741a4454bdd62c2b60069b7d05f5468e7/docs/setting-up-a-docker-registry.md) @@ -45,6 +44,86 @@ At this point, you may find it useful to return to these `Tekton Pipeline` docs: - [Tekton Pipeline "Hello World" tutorial](https://github.com/tektoncd/pipeline/blob/master/docs/tutorial.md) - Define `Tasks`, `Pipelines`, and `PipelineResources`, see what happens when they are run + +## Environment Setup + +You must install these tools: + +1. [`git`](https://help.github.com/articles/set-up-git/): For source control + +1. [`go`](https://golang.org/doc/install): The language Tekton Pipelines is + built in. You need go version [v1.15](https://golang.org/dl/) or higher. + +Your [`$GOPATH`] setting is critical for `ko apply` to function properly: a +successful run will typically involve building pushing images instead of only +configuring Kubernetes resources. + +To [run your controllers with `ko`](#install-pipeline) you'll need to set these +environment variables (we recommend adding them to your `.bashrc`): + +1. `GOPATH`: If you don't have one, simply pick a directory and add `export + GOPATH=...` +1. `$GOPATH/bin` on `PATH`: This is so that tooling installed via `go get` will + work properly. +1. `KO_DOCKER_REPO`: The docker repository to which developer images should be + pushed (e.g. `gcr.io/[gcloud-project]`). You can also + [run a local registry](https://docs.docker.com/registry/deploying/) and set + `KO_DOCKER_REPO` to reference the registry (e.g. at + `localhost:5000/mypipelineimages`). + +`.bashrc` example: + +```shell +export GOPATH="$HOME/go" +export PATH="${PATH}:${GOPATH}/bin" +export KO_DOCKER_REPO='gcr.io/my-gcloud-project-name' +``` + +Make sure to configure +[authentication](https://cloud.google.com/container-registry/docs/advanced-authentication#standalone_docker_credential_helper) +for your `KO_DOCKER_REPO` if required. To be able to push images to +`gcr.io/`, you need to run this once: + +```shell +gcloud auth configure-docker +``` + +After setting `GOPATH` and putting `$GOPATH/bin` on your `PATH`, you must then install these tools: + +3. [`ko`](https://github.com/google/ko): For development. `ko` version v0.5.1 or + higher is required for `pipeline` to work correctly. + +4. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/): For + interacting with your kube cluster + +The user you are using to interact with your k8s cluster must be a cluster admin +to create role bindings: + +```shell +# Using gcloud to get your current user +USER=$(gcloud config get-value core/account) +# Make that user a cluster admin +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user="${USER}" +``` + +### Install in custom namespace + +1. To install into a different namespace you can use this script : + +```shell +#!/usr/bin/env bash +set -e + +# Set your target namespace here +TARGET_NAMESPACE=new-target-namespace + +ko resolve -f config | sed -e '/kind: Namespace/!b;n;n;s/:.*/: '"${TARGET_NAMESPACE}"'/' | \ + sed "s/namespace: tekton-pipelines$/namespace: ${TARGET_NAMESPACE}/" | \ + kubectl apply -f- +kubectl set env deployments --all SYSTEM_NAMESPACE=${TARGET_NAMESPACE} -n ${TARGET_NAMESPACE} +``` ### Checkout your fork @@ -70,22 +149,6 @@ git remote set-url --push upstream no_push _Adding the `upstream` remote sets you up nicely for regularly [syncing your fork](https://help.github.com/articles/syncing-a-fork/)._ -### Requirements - -You must install these tools: - -1. [`go`](https://golang.org/doc/install): The language Tekton Pipelines is - built in -1. [`git`](https://help.github.com/articles/set-up-git/): For source control -1. [`ko`](https://github.com/google/ko): For development. `ko` version v0.5.1 or - higher is required for `pipeline` to work correctly. -1. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/): For - interacting with your kube cluster - -Your [`$GOPATH`] setting is critical for `ko apply` to function properly: a -successful run will typically involve building pushing images instead of only -configuring Kubernetes resources. - ## Kubernetes cluster The recommended configuration is: @@ -170,67 +233,6 @@ To enable the Kubernetes that comes with Docker Desktop: --user=$(gcloud config get-value core/account) ``` -## Environment Setup - -To [run your controllers with `ko`](#install-pipeline) you'll need to set these -environment variables (we recommend adding them to your `.bashrc`): - -1. `GOPATH`: If you don't have one, simply pick a directory and add `export - GOPATH=...` -1. `$GOPATH/bin` on `PATH`: This is so that tooling installed via `go get` will - work properly. -1. `KO_DOCKER_REPO`: The docker repository to which developer images should be - pushed (e.g. `gcr.io/[gcloud-project]`). You can also - [run a local registry](https://docs.docker.com/registry/deploying/) and set - `KO_DOCKER_REPO` to reference the registry (e.g. at - `localhost:5000/mypipelineimages`). - -`.bashrc` example: - -```shell -export GOPATH="$HOME/go" -export PATH="${PATH}:${GOPATH}/bin" -export KO_DOCKER_REPO='gcr.io/my-gcloud-project-name' -``` - -Make sure to configure -[authentication](https://cloud.google.com/container-registry/docs/advanced-authentication#standalone_docker_credential_helper) -for your `KO_DOCKER_REPO` if required. To be able to push images to -`gcr.io/`, you need to run this once: - -```shell -gcloud auth configure-docker -``` - -The user you are using to interact with your k8s cluster must be a cluster admin -to create role bindings: - -```shell -# Using gcloud to get your current user -USER=$(gcloud config get-value core/account) -# Make that user a cluster admin -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole=cluster-admin \ - --user="${USER}" -``` - -### Install in custom namespace - -1. To install into a different namespace you can use this script : - -```shell -#!/usr/bin/env bash -set -e - -# Set your target namespace here -TARGET_NAMESPACE=new-target-namespace - -ko resolve -f config | sed -e '/kind: Namespace/!b;n;n;s/:.*/: '"${TARGET_NAMESPACE}"'/' | \ - sed "s/namespace: tekton-pipelines$/namespace: ${TARGET_NAMESPACE}/" | \ - kubectl apply -f- -kubectl set env deployments --all SYSTEM_NAMESPACE=${TARGET_NAMESPACE} -n ${TARGET_NAMESPACE} -``` - ## Iterating While iterating on the project, you may need to: diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 8eac3d2863a..a9f958965b4 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -25,6 +25,8 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/taskrun" "github.com/tektoncd/pipeline/pkg/version" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "knative.dev/pkg/controller" "knative.dev/pkg/injection" "knative.dev/pkg/injection/sharedmain" "knative.dev/pkg/signals" @@ -48,6 +50,12 @@ var ( imageDigestExporterImage = flag.String("imagedigest-exporter-image", "", "The container image containing our image digest exporter binary.") namespace = flag.String("namespace", corev1.NamespaceAll, "Namespace to restrict informer to. Optional, defaults to all namespaces.") versionGiven = flag.String("version", "devel", "Version of Tekton running") + qps = flag.Int("kube-api-qps", int(rest.DefaultQPS), "Maximum QPS to the master from this client") + burst = flag.Int("kube-api-burst", rest.DefaultBurst, "Maximum burst for throttle") + threadsPerController = flag.Int("threads-per-controller", controller.DefaultThreadsPerController, "Threads (goroutines) to create per controller") + disableHighAvailability = flag.Bool("disable-ha", false, "Whether to disable high-availability functionality for this component. This flag will be deprecated "+ + "and removed when we have promoted this feature to stable, so do not pass it without filing an "+ + "issue upstream!") ) func main() { @@ -68,7 +76,18 @@ func main() { if err := images.Validate(); err != nil { log.Fatal(err) } - sharedmain.MainWithContext(injection.WithNamespaceScope(signals.NewContext(), *namespace), ControllerLogKey, + controller.DefaultThreadsPerController = *threadsPerController + + cfg := sharedmain.ParseAndGetConfigOrDie() + // multiply by 2, no of controllers being created + cfg.QPS = 2 * float32(*qps) + cfg.Burst = 2 * *burst + + ctx := injection.WithNamespaceScope(signals.NewContext(), *namespace) + if !*disableHighAvailability { + ctx = sharedmain.WithHADisabled(ctx) + } + sharedmain.MainWithConfig(ctx, ControllerLogKey, cfg, taskrun.NewController(*namespace, images), pipelinerun.NewController(*namespace, images), ) diff --git a/cmd/entrypoint/README.md b/cmd/entrypoint/README.md index 5faf5aa447d..883e9527731 100644 --- a/cmd/entrypoint/README.md +++ b/cmd/entrypoint/README.md @@ -1,10 +1,14 @@ # entrypoint This binary is used to override the entrypoint of a container by -wrapping it. In `tektoncd/pipeline` this is used to make sure `Task`'s -steps are executed in order, or for sidecars. +wrapping it and executing original entrypoint command in a subprocess. -The following flags are available : +Tekton uses this to make sure `TaskRun`s' steps are executed in order, only +after sidecars are ready and previous steps have completed successfully. + +## Flags + +The following flags are available: - `-entrypoint`: "original" command to be executed (as entrypoint). This will be executed as a sub-process on `entrypoint` @@ -16,20 +20,97 @@ The following flags are available : will either execute the sub-process (in case of `{{wait_file}}`) or skip the execution, write to `{{post_file}}.err` and return an error (`exitCode` >= 0) -- `-wait_file_content`: excepts the `wait_file` to add actual - content. It will continue watching for `wait_file` until it has +- `-wait_file_content`: expects the `wait_file` to contain actual + contents. It will continue watching for `wait_file` until it has content. +Any extra positional arguments are passed to the original entrypoint command. + +## Example + The following example of usage for `entrypoint` waits for -`/tekton/downward/ready` file to exist and have some content before -executing `/ko-app/bash -- -args mkdir -p /workspace/git-resource`, -and will write to `/tekton/tools/0` in case of success, or -`/tekton/tools/0.err` in case of failure. +`/tekton/tools/3` file to exist and executes the command `bash` with args +`echo` and `hello`, then writes the file `/tekton/tools/4`, or +`/tekton/tools/4.err` in case the command fails. ```shell entrypoint \ - -wait_file /tekton/downward/ready \ - -post_file /tekton/tools/0" \ - -wait_file_content \ - -entrypoint /ko-app/bash -- -args mkdir -p /workspace/git-resource + -wait_file /tekton/tools/3 \ + -post_file /tekton/tools/4 \ + -entrypoint bash -- \ + echo hello +``` + +## Waiting for Sidecars + +In cases where the TaskRun's Pod has sidecar containers -- including, possibly, +injected sidecars that Tekton itself didn't specify -- the first step should +also wait until all those sidecars have reported as ready. Starting before +sidecars are ready could lead to flaky errors if steps rely on the sidecar +being ready to succeed. + +To account for this, the Tekton controller starts TaskRun Pods with the first +step's entrypoint binary configured to wait for a special file provided by the +[Kubernetes Downward +API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#the-downward-api). +This allows Tekton to write a Pod annotation when all sidecars report as ready, +and for the value of that annotation to appear to the Pod as a file in a +Volume. To the Pod, that file always exists, but without content until the +annotation is set, so we instruct the entrypoint to wait for the `-wait_file` +to contain contents before proceeding. + +### Example + +The following example of usage for `entrypoint` waits for +`/tekton/downward/ready` file to exist and contain actual contents +(`-wait_file_contents`), and executes the command `bash` with args +`echo` and `hello`, then writes the file `/tekton/tools/1`, or +`/tekton/tools/1.err` in case the command fails. + +```shell +entrypoint \ + -wait_file /tekton/downward/ready \ + -wait_file_contents \ + -post_file /tekton/tools/1 \ + -entrypoint bash -- \ + echo hello +``` + +## `cp` Mode + +In order to make the `entrypoint` binary available to the user's steps, it gets +copied to a Volume that's shared with all the steps' containers. This is done +in an `initContainer` pre-step, that runs before steps start. + +To reduce external dependencies, the `entrypoint` binary actually copies +_itself_ to the shared Volume. When executed with the positional args of `cp + `, the `entrypoint` binary copies the `` file to `` and +exits. + +It's executed as an `initContainer` in the TaskRun's Pod like: + +``` +initContainers: +- image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/entrypoint + args: + - cp + - /ko-app/entrypoint # <-- path to the entrypoint binary inside the image + - /tekton/tools/entrypoint + volumeMounts: + - name: tekton-internal-tools + mountPath: /tekton/tools + +containers: +- image: user-image + command: + - /tekton/tools/entrypoint + ... args to entrypoint ... + volumeMounts: + - name: tekton-internal-tools + mountPath: /tekton/tools + +volumes: +- name: tekton-internal-tools + volumeSource: + emptyDir: {} ``` diff --git a/cmd/entrypoint/main.go b/cmd/entrypoint/main.go index d675fcd8763..c8437a8a4e4 100644 --- a/cmd/entrypoint/main.go +++ b/cmd/entrypoint/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "io" "log" "os" "os/exec" @@ -42,6 +43,25 @@ var ( waitPollingInterval = time.Second ) +func cp(src, dst string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + // Owner has permission to write and execute, and anybody has + // permission to execute. + d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, 0311) + if err != nil { + return err + } + defer d.Close() + + _, err = io.Copy(d, s) + return err +} + func main() { // Add credential flags originally used in creds-init. gitcreds.AddFlags(flag.CommandLine) @@ -49,6 +69,19 @@ func main() { flag.Parse() + // If invoked in "cp mode" (`entrypoint cp `), simply copy + // the src path to the dst path. This is used to place the entrypoint + // binary in the tools directory, without requiring the cp command to + // exist in the base image. + if len(flag.Args()) == 3 && flag.Args()[0] == "cp" { + src, dst := flag.Args()[1], flag.Args()[2] + if err := cp(src, dst); err != nil { + log.Fatal(err) + } + log.Println("Copied", src, "to", dst) + return + } + // Copy creds-init credentials from secret volume mounts to /tekton/creds // This is done to support the expansion of a variable, $(credentials.path), that // resolves to a single place with all the stored credentials. diff --git a/cmd/git-init/main.go b/cmd/git-init/main.go index 9bc81dd7bc3..afbb43ab81b 100644 --- a/cmd/git-init/main.go +++ b/cmd/git-init/main.go @@ -62,7 +62,7 @@ func main() { { Key: "commit", Value: commit, - ResourceRef: v1beta1.PipelineResourceRef{ + ResourceRef: &v1beta1.PipelineResourceRef{ Name: resourceName, }, ResourceName: resourceName, @@ -70,7 +70,7 @@ func main() { { Key: "url", Value: fetchSpec.URL, - ResourceRef: v1beta1.PipelineResourceRef{ + ResourceRef: &v1beta1.PipelineResourceRef{ Name: resourceName, }, ResourceName: resourceName, diff --git a/cmd/imagedigestexporter/main.go b/cmd/imagedigestexporter/main.go index a33fd28f678..53db04d9a6a 100644 --- a/cmd/imagedigestexporter/main.go +++ b/cmd/imagedigestexporter/main.go @@ -67,7 +67,7 @@ func main() { Key: "digest", Value: digest.String(), ResourceName: imageResource.Name, - ResourceRef: v1beta1.PipelineResourceRef{ + ResourceRef: &v1beta1.PipelineResourceRef{ Name: imageResource.Name, }, }) @@ -75,7 +75,7 @@ func main() { Key: "url", Value: imageResource.URL, ResourceName: imageResource.Name, - ResourceRef: v1beta1.PipelineResourceRef{ + ResourceRef: &v1beta1.PipelineResourceRef{ Name: imageResource.Name, }, }) diff --git a/config/300-clustertask.yaml b/config/300-clustertask.yaml index 9d6616f817e..c985e6ab7b2 100644 --- a/config/300-clustertask.yaml +++ b/config/300-clustertask.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clustertasks.tekton.dev @@ -24,23 +24,28 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - - name: v1alpha1 + - &version + name: v1alpha1 served: true storage: false - - name: v1beta1 - served: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - <<: *version + name: v1beta1 storage: true names: kind: ClusterTask @@ -49,13 +54,11 @@ spec: - tekton - tekton-pipelines scope: Cluster - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} conversion: strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines diff --git a/config/300-condition.yaml b/config/300-condition.yaml index 7acde734738..13001edbd58 100644 --- a/config/300-condition.yaml +++ b/config/300-condition.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: conditions.tekton.dev @@ -23,6 +23,25 @@ metadata: version: "devel" spec: group: tekton.dev + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} names: kind: Condition plural: conditions @@ -30,8 +49,3 @@ spec: - tekton - tekton-pipelines scope: Namespaced - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} - version: v1alpha1 diff --git a/config/300-pipeline.yaml b/config/300-pipeline.yaml index 39c75cf39f0..b8f6ed5e8f9 100644 --- a/config/300-pipeline.yaml +++ b/config/300-pipeline.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: pipelines.tekton.dev @@ -24,23 +24,28 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - - name: v1alpha1 + - &version + name: v1alpha1 served: true storage: false - - name: v1beta1 - served: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + - <<: *version + name: v1beta1 storage: true names: kind: Pipeline @@ -49,13 +54,11 @@ spec: - tekton - tekton-pipelines scope: Namespaced - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} conversion: strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines diff --git a/config/300-pipelinerun.yaml b/config/300-pipelinerun.yaml index 1028cd62e9a..98f8798f4d1 100644 --- a/config/300-pipelinerun.yaml +++ b/config/300-pipelinerun.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: pipelineruns.tekton.dev @@ -24,23 +24,41 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - - name: v1alpha1 + - &version + name: v1alpha1 served: true storage: false - - name: v1beta1 - served: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - <<: *version + name: v1beta1 storage: true names: kind: PipelineRun @@ -52,26 +70,11 @@ spec: - pr - prs scope: Namespaced - additionalPrinterColumns: - - name: Succeeded - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].status" - - name: Reason - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" - - name: StartTime - type: date - JSONPath: .status.startTime - - name: CompletionTime - type: date - JSONPath: .status.completionTime - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} conversion: strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines diff --git a/config/300-resource.yaml b/config/300-resource.yaml index c4a4d8d4d11..f85b3747fdb 100644 --- a/config/300-resource.yaml +++ b/config/300-resource.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: pipelineresources.tekton.dev @@ -23,6 +23,25 @@ metadata: version: "devel" spec: group: tekton.dev + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} names: kind: PipelineResource plural: pipelineresources @@ -30,8 +49,3 @@ spec: - tekton - tekton-pipelines scope: Namespaced - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} - version: v1alpha1 diff --git a/config/300-run.yaml b/config/300-run.yaml index dda73eaf357..7dd65222c68 100644 --- a/config/300-run.yaml +++ b/config/300-run.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: runs.tekton.dev @@ -24,21 +24,38 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - name: v1alpha1 served: true storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} names: kind: Run plural: runs @@ -46,26 +63,3 @@ spec: - tekton - tekton-pipelines scope: Namespaced - additionalPrinterColumns: - - name: Succeeded - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].status" - - name: Reason - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" - - name: StartTime - type: date - JSONPath: .status.startTime - - name: CompletionTime - type: date - JSONPath: .status.completionTime - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} - conversion: - strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines diff --git a/config/300-task.yaml b/config/300-task.yaml index 4e3ef17c45b..69c3c506270 100644 --- a/config/300-task.yaml +++ b/config/300-task.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: tasks.tekton.dev @@ -24,23 +24,28 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - - name: v1alpha1 + - &version + name: v1alpha1 served: true storage: false - - name: v1beta1 - served: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - <<: *version + name: v1beta1 storage: true names: kind: Task @@ -49,13 +54,11 @@ spec: - tekton - tekton-pipelines scope: Namespaced - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} conversion: strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines diff --git a/config/300-taskrun.yaml b/config/300-taskrun.yaml index ccf434e1b5a..4ab0c9300a5 100644 --- a/config/300-taskrun.yaml +++ b/config/300-taskrun.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: taskruns.tekton.dev @@ -24,23 +24,41 @@ metadata: spec: group: tekton.dev preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - # One can use x-kubernetes-preserve-unknown-fields: true - # at the root of the schema (and inside any properties, additionalProperties) - # to get the traditional CRD behaviour that nothing is pruned, despite - # setting spec.preserveUnknownProperties: false. - # - # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ - # See issue: https://github.com/knative/serving/issues/912 - x-kubernetes-preserve-unknown-fields: true versions: - - name: v1alpha1 + - &version + name: v1alpha1 served: true storage: false - - name: v1beta1 - served: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - <<: *version + name: v1beta1 storage: true names: kind: TaskRun @@ -52,26 +70,11 @@ spec: - tr - trs scope: Namespaced - additionalPrinterColumns: - - name: Succeeded - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].status" - - name: Reason - type: string - JSONPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" - - name: StartTime - type: date - JSONPath: .status.startTime - - name: CompletionTime - type: date - JSONPath: .status.completionTime - # Opt into the status subresource so metadata.generation - # starts to increment - subresources: - status: {} conversion: strategy: Webhook - webhookClientConfig: - service: - name: tekton-pipelines-webhook - namespace: tekton-pipelines + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines diff --git a/config/config-feature-flags.yaml b/config/config-feature-flags.yaml index 9313eeec2f1..0c45aca300b 100644 --- a/config/config-feature-flags.yaml +++ b/config/config-feature-flags.yaml @@ -58,3 +58,9 @@ data: # # See https://github.com/tektoncd/pipeline/issues/2080 for more info. running-in-environment-with-injected-sidecars: "true" + # Setting this flag to "true" will require that any Git SSH Secret + # offered to Tekton must have known_hosts included. + # + # See https://github.com/tektoncd/pipeline/issues/2981 for more + # info. + require-git-ssh-secret-known-hosts: "false" diff --git a/config/controller.yaml b/config/controller.yaml index 0d54a4ccf66..1c77940b719 100644 --- a/config/controller.yaml +++ b/config/controller.yaml @@ -69,8 +69,8 @@ spec: "-pr-image", "ko://github.com/tektoncd/pipeline/cmd/pullrequest-init", "-build-gcs-fetcher-image", "ko://github.com/tektoncd/pipeline/vendor/github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/cmd/gcs-fetcher", - # This is google/cloud-sdk:302.0.0-slim - "-gsutil-image", "google/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f", + # This is gcr.io/google.com/cloudsdktool/cloud-sdk:302.0.0-slim + "-gsutil-image", "gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f", # The shell image must be root in order to create directories and copy files to PVCs. # gcr.io/distroless/base:debug-nonroot as of July 23, 2020 "-shell-image", "gcr.io/distroless/base@sha256:60f5ffe6fc481e9102747b043b3873a01893a5a8138f970c5f5fc06fb7494656" diff --git a/docs/api-spec.md b/docs/api-spec.md new file mode 100644 index 00000000000..ee1a4a4cd7c --- /dev/null +++ b/docs/api-spec.md @@ -0,0 +1,321 @@ +# Tekton Pipelines API Specification + + +- [Abstract](#abstract) +- [Background](#background) +- [Modifying This Specification](#modifying-this-specification) +- [Resource Overview](#resource-overview) + * [`TaskRun`](#-taskrun-) +- [Detailed Resources - v1beta1](#detailed-resources---v1beta1) + * [`TaskRun`](#-taskrun--1) + + [Metadata](#metadata) + + [Spec](#spec) + + [Status](#status) +- [Status Signalling](#status-signalling) +- [Listing Resources](#listing-resources) +- [Detailed Resource Types - v1beta1](#detailed-resource-types---v1beta1) + * [`ArrayOrString`](#arrayorstring) + * [`ContainerStateRunning`](#containerstaterunning) + * [`ContainerStateWaiting`](#containerstatewaiting) + * [`ContainerStateTerminated`](#containerstateterminated) + * [`EnvVar`](#envvar) + * [`Param`](#param) + * [`ParamSpec`](#paramspec) + * [`Step`](#step) + * [`StepState`](#stepstate) + * [`TaskResult`](#taskresult) + * [`TaskRunResult`](#taskrunresult) + * [`TaskSpec`](#taskspec) + * [`WorkspaceBinding`](#workspacebinding) + * [`WorkspaceDeclaration`](#workspacedeclaration) + + +## Abstract + +The Tekton Pipelines platform provides common abstractions for describing and executing container-based, run-to-completion workflows, typipcally in service of CI/CD scenarios. This document describes the structure, lifecycle and management of Tekton resources in the context of the [Kubernetes Resource Model](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/resource-management.md). An understanding of the Kubernetes API interface and the capabilities of [Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) is assumed. + +This document does not define the [runtime contract](https://tekton.dev/docs/pipelines/container-contract/) nor prescribe specific implementations of supporting services such as access control, observability, or resource management. + +This document makes reference in a few places to different profiles for Tekton installations. A profile in this context is a set of operations, resources, and fields that are accessible to a developer interacting with a Tekton installation. Currently, only a single (minimal) profile for Tekton Pipelines is defined, but additional profiles may be defined in the future to standardize advanced functionality. A minimal profile is one that implements all of the “MUST”, “MUST NOT”, and “REQUIRED” conditions of this document. + +## Background + +The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “NOT RECOMMENDED”, “MAY”, and “OPTIONAL” are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). + +There is no formal specification of the Kubernetes API and Resource Model. This document assumes Kubernetes 1.16 behavior; this behavior will typically be supported by many future Kubernetes versions. Additionally, this document may reference specific core Kubernetes resources; these references may be illustrative (i.e. an implementation on Kubernetes) or descriptive (i.e. this Kubernetes resource MUST be exposed). References to these core Kubernetes resources will be annotated as either illustrative or descriptive. + +## Modifying This Specification + +This spec is a living document, meaning new resources and fields may be added, and may transition from being OPTIONAL to RECOMMENDED to REQUIRED over time. In general a resource or field should not be added as REQUIRED directly, as this may cause unsuspecting previously-conformant implementations to suddenly no longer be conformant. These should be first OPTIONAL or RECOMMENDED, then change to be REQUIRED once a survey of conformant implementations indicates that doing so will not cause undue burden on any implementation. + +## Resource Overview + +The Tekton Pipelines API provides a set of API resources to manage run-to-completion workflows. Those are expressed as [Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) + +### `TaskRun` + +A `TaskRun` represents an instantiation of a single execution of a `Task`. It can describe the steps of the Task directly. + +Its HTTP API endpoint is `/apis/tekton.dev/v1beta1/[parent]/taskruns`, where `[parent]` can refer to any string identifying a grouping of `TaskRun`s. + +For example, in the Kubernetes implementation `[parent]` refers to a Kubernetes namespace. Other implementations could interpret the string differently, including enabling hierarchies of resources containing `TaskRun`s, for example the API endpoint `/apis/tekton.dev/v1beta1/folders/my-folder/project/my-project/taskruns` could represent a hierarchy of "projects" that own `TaskRun`s. + +| HTTP Verb | Requirement | +|---------------------------|------------------| +| Create (POST) | REQUIRED | +| Patch (PATCH)* | RECOMMENDED | +| Replace (PUT)** | RECOMMENDED | +| Delete (DELETE) | OPTIONAL | +| Read (GET) | REQUIRED | +| List (GET) | REQUIRED | +| Watch (GET) | OPTIONAL | +| DeleteCollection (DELETE) | OPTIONAL | + +\* Kubernetes only allows JSON merge patch for CRD types. It is recommended that if allowed, at least JSON Merge patch be made available. [JSON Merge Patch Spec (RFC 7386)](https://tools.ietf.org/html/rfc7386) + +\** NB: Support for cancellation depends on `Replace`. + +## Detailed Resources - v1beta1 + +The following schema defines a set of REQUIRED or RECOMMENDED resource fields on the Tekton resource types. Whether a field is REQUIRED or RECOMMENDED is denoted in the "Requirement" column. + +Additional fields MAY be provided by particular implementations, however it is expected that most extension will be accomplished via the `metadata.labels` and `metadata.annotations` fields, as Tekton implementations MAY validate supplied resources against these fields and refuse resources which specify unknown fields. + +Tekton implementations MUST NOT require `spec` fields outside this implementation; to do so would break interoperability between such implementations and implementations which implement validation of field names. + +**NB:** All fields and resources not listed below are assumed to be **OPTIONAL**, not RECOMMENDED or REQUIRED. For example, at this time, support for `PipelineRun`s and for CRUD operations on `Task`s or `Pipeline`s is **OPTIONAL**. + +### `TaskRun` + +#### Metadata + +Standard Kubernetes [meta.v1/ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#objectmeta-v1-meta) resource. + +| Field Name | Field Type | Requirement | +|----------------------|--------------------|-------------| +| `name` | string | REQUIRED | +| `labels` | map | REQUIRED | +| `annotations` | map | REQUIRED | +| `creationTimestamp`* | string | REQUIRED | +| `uid` | string | RECOMMENDED | +| `resourceVersion` | string | RECOMMENDED | +| `generation` | int64 | RECOMMENDED | +| `namespace` | string | RECOMMENDED | +| `generateName`** | string | RECOMMENDED | + +\* `creationTimestamp` MUST be populated by the implementation, in [RFC3339](https://tools.ietf.org/html/rfc3339). + +** If `generateName` is supported by the implementation, when it is specified at creation, it MUST be prepended to a random string and set as the `name`, and not set on the subsequent response. + +#### Spec + +| Field Name | Field Type | Requirement | +|-----------------------|----------------------|-------------| +| `params` | `[]Param` | REQUIRED | +| `taskSpec` | `TaskSpec` | REQUIRED | +| `workspaces` | `[]WorkspaceBinding` | REQUIRED | +| `status` | Enum:
- `""` (default)
- `"TaskRunCancelled"` | RECOMMENDED | +| `timeout` | string (duration) | RECOMMENDED | +| `serviceAccountName`^ | string | RECOMMENDED | + +^ In the Kubernetes implementation, `serviceAccountName` refers to a Kubernetes `ServiceAccount` resource that is assumed to exist in the same namespace. Other implementations MAY interpret this string differently, and impose other requirements on specified values. + +#### Status + +| Field Name | Field Type | Requirement | +|-----------------------|------------------------|-------------| +| `conditions` | see [#error-signaling] | REQUIRED | +| `startTime` | string | REQUIRED | +| `completionTime`* | string | REQUIRED | +| `steps` | `[]StepState` | REQUIRED | +| `observedGeneration` | int64 | RECOMMENDED | + +\* `startTime` and `completionTime` MUST be populated by the implementation, in [RFC3339](https://tools.ietf.org/html/rfc3339). + +## Status Signalling + +The Tekton Pipelines API uses the [Kubernetes Conditions convention](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) to communicate status and errors to the user. + +`TaskRun`'s `status` field MUST have a `conditions` field, which must be a list of `Condition` objects of the following form: + +| Field | Type | Requirement | +|-----------------------|--------|-------------| +| `type` | string | REQUIRED | +| `status` | Enum:
- `"True"`
- `"False"`
- `"Unknown"` (default) | REQUIRED | +| `reason` | string | REQUIRED | +| `message` | string | REQUIRED | +| `severity` | Enum:
- `""` (default)
- `"Warning"`
- `"Info"` | REQUIRED | +| `lastTransitionTime`* | string | OPTIONAL | + +\* If `lastTransitionTime` is populated by the implementation, it must be in [RFC3339](https://tools.ietf.org/html/rfc3339). + +Additionally, the resource's `status.conditions` field MUST be managed as follows to enable clients to present useful diagnostic and error information to the user. + +If a resource describes that it must report a Condition of the `type` `Succeeded`, then it must report it in the following manner: + +* If the `status` field is `"True"`, that means the execution finished successfully. +* If the `status` field is `"False"`, that means the execution finished unsuccessfully -- the Condition's `reason` and `message` MUST include further diagnostic information. +* If the `status` field is `"Unknown"`, that means the execution is still ongoing, and clients can check again later until the Condition's `status` reports either `"True"` or `"False"`. + +Resources MAY report Conditions with other `type`s, but none are REQUIRED or RECOMMENDED. + +## Listing Resources + +Requests to list resources specify the following fields (based on [`meta.v1/ListOptions`](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ListOptions)): + +| Field Name | Field Type | Requirement | +|-------------------|------------|-------------| +| `continue` | string | REQUIRED | +| `limit` | int64 | REQUIRED | + +List responses have the following fields (based on [`meta.v1/ListMeta`](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ListMeta)): + +| Field Name | Field Type | Requirement | +|----------------------|------------|-------------| +| `continue` | string | REQUIRED | +| `items` | `[]Object` | REQUIRED | +| `remainingItemCount` | int64 | REQUIRED | +| `apiVersion` | string
(`"tekton.dev/v1beta1"`) | REQUIRED | +| `kind` | string
(e.g., `"ObjectList"`) | REQUIRED | + +**NB:** All other fields inherited from [`ListOptions`] or [`ListMeta`]supported by the Kubernetes implementation (e.g., `fieldSelector`, `labelSelector`) are **OPTIONAL** for the purposes of this spec. + +**NB:** The sort order of items returned by list requests is unspecified at this time. + +## Detailed Resource Types - v1beta1 + +### `ArrayOrString` + +| Field Name | Field Type | Requirement | +|-------------|------------|-------------| +| `type` | Enum:
- `"string"` (default)
- `"array"` | REQUIRED | +| `stringVal` | string | REQUIRED | +| `arrayVal` | []string | REQUIRED | + +### `ContainerStateRunning` + +| Field Name | Field Type | Requirement | +|--------------|------------|-------------| +| `startedAt`* | string | REQUIRED | + +\* `startedAt` MUST be populated by the implementation, in [RFC3339](https://tools.ietf.org/html/rfc3339). + +### `ContainerStateWaiting` + +| Field Name | Field Type | Requirement | +|------------|------------|-------------| +| `reason` | string | REQUIRED | +| `message` | string | REQUIRED | + +### `ContainerStateTerminated` + +| Field Name | Field Type | Requirement | +|---------------|------------|-------------| +| `exitCode` | int32 | REQUIRED | +| `reason` | string | REQUIRED | +| `message` | string | REQUIRED | +| `startedAt`* | string | REQUIRED | +| `finishedAt`* | string | REQUIRED | + +\* `startedAt` and `finishedAt` MUST be populated by the implementation, in [RFC3339](https://tools.ietf.org/html/rfc3339). + +### `EnvVar` + +| Field Name | Field Type | Requirement | +|------------|------------|-------------| +| `name` | string | REQUIRED | +| `value` | string | REQUIRED | + +**NB:** All other [EnvVar](https://godoc.org/k8s.io/api/core/v1#EnvVar) types inherited from [core.v1/EnvVar](https://godoc.org/k8s.io/api/core/v1#EnvVar) and supported by the Kubernetes implementation (e.g., `valueFrom`) are **OPTIONAL** for the purposes of this spec. + +### `Param` + +| Field Name | Field Type | Requirement | +|------------|-----------------|-------------| +| `name` | string | REQUIRED | +| `value` | `ArrayOrString` | REQUIRED | + +### `ParamSpec` + +| Field Name | Field Type | Requirement | +|---------------|------------|-------------| +| `name` | string | REQUIRED | +| `description` | string | REQUIRED | +| `type` | Enum:
- `"string"` (default)
- `"array"` | REQUIRED | +| `default` | `ArrayOrString` | REQUIRED | + +### `Step` + +| Field Name | Field Type | Requirement | +|--------------|------------|-------------| +| `name` | string | REQUIRED | +| `image` | string | REQUIRED | +| `args` | []string | REQUIRED | +| `command` | []string | REQUIRED | +| `workingDir` | []string | REQUIRED | +| `env` | `[]EnvVar` | REQUIRED | +| `script` | string | REQUIRED | + +**NB:** All other fields inherited from the [core.v1/Container](https://godoc.org/k8s.io/api/core/v1#Container) type supported by the Kubernetes implementation are **OPTIONAL** for the purposes of this spec. + +### `StepState` + +| Field Name | Field Type | Requirement | +|---------------|----------------------------|-------------| +| `name` | string | REQUIRED | +| `imageID` | string | REQUIRED | +| `waiting`* | `ContainerStateWaiting` | REQUIRED | +| `running`* | `ContainerStateRunning` | REQUIRED | +| `terminated`* | `ContainerStateTerminated` | REQUIRED | +| `taskResults` | `[]TaskRunResult` | REQUIRED | +| `taskSpec` | `TaskSpec` | REQUIRED | + +\* Only one of `waiting`, `running` or `terminated` can be returned at a time. + +### `TaskResult` + +| Field Name | Field Type | Requirement | +|--------------|------------|-------------| +| `name` | string | REQUIRED | +| `description` | string | REQUIRED | + +### `TaskRunResult` + +| Field Name | Field Type | Requirement | +|------------|------------|-------------| +| `name` | string | REQUIRED | +| `value` | string | REQUIRED | + +### `TaskSpec` + +| Field Name | Field Type | Requirement | +|---------------|--------------------------|-------------| +| `params` | `[]ParamSpec` | REQUIRED | +| `steps` | `[]Step` | REQUIRED | +| `results` | `[]TaskResult` | REQUIRED | +| `workspaces` | `[]WorkspaceDeclaration` | REQUIRED | +| `description` | string | REQUIRED | + +### `WorkspaceBinding` + +| Field Name | Field Type | Requirement | +|------------|--------------|-------------| +| `name` | string | REQUIRED | +| `emptyDir` | empty struct | REQUIRED | + +**NB:** All other Workspace types supported by the Kubernetes implementation are **OPTIONAL** for the purposes of this spec. + +### `WorkspaceDeclaration` + + name string + description string + mountPath string + readOnly bool + +| Field Name | Field Type | Requirement | +|---------------|------------|-------------| +| `name` | string | REQUIRED | +| `description` | string | REQUIRED | +| `mountPath` | string | REQUIRED | +| `readOnly` | boolean | REQUIRED | +| `description` | string | REQUIRED | diff --git a/docs/auth.md b/docs/auth.md index fc053de61bc..6b6eabfc5c1 100644 --- a/docs/auth.md +++ b/docs/auth.md @@ -121,7 +121,7 @@ The following are considerations for executing `Runs` as a non-root user: Specifying a UID that has no valid home directory results in authentication failure. - Since SSH authentication ignores the `$HOME` environment variable, you must either move or symlink the appropriate `Secret` files from the `$HOME` directory defined by Tekton (`/tekton/home`) to - the the non-root user's valid home directory to use SSH authentication for either Git or Docker. + the non-root user's valid home directory to use SSH authentication for either Git or Docker. For an example of configuring SSH authentication in a non-root `securityContext`, see [`authenticating-git-commands`](../examples/v1beta1/taskruns/authenticating-git-commands.yaml). @@ -147,7 +147,7 @@ This section describes how to configure the following authentication schemes for ### Configuring `basic-auth` authentication for Git -This section descibes how to configure a `basic-auth` type `Secret` for use with Git. In the example below, +This section describes how to configure a `basic-auth` type `Secret` for use with Git. In the example below, before executing any `Steps` in the `Run`, Tekton creates a `~/.gitconfig` file containing the credentials specified in the `Secret`. When the `Steps` execute, Tekton uses those credentials to retrieve `PipelineResources` specified in the `Run`. @@ -218,7 +218,7 @@ specified in the `Secret`. When the `Steps` execute, Tekton uses those credentia ### Configuring `ssh-auth` authentication for Git -This section descibes how to configure an `ssh-auth` type `Secret` for use with Git. In the example below, +This section describes how to configure an `ssh-auth` type `Secret` for use with Git. In the example below, before executing any `Steps` in the `Run`, Tekton creates a `~/.ssh/config` file containing the SSH key specified in the `Secret`. When the `Steps` execute, Tekton uses this key to retrieve `PipelineResources` specified in the `Run`. @@ -504,9 +504,12 @@ https://user2:pass2@url2.com Given hostnames, private keys, and `known_hosts` of the form: `url{n}.com`, `key{n}`, and `known_hosts{n}`, Tekton generates the following. -If no value is specified for `known_hosts`, Tekton configures SSH to accept +By default, if no value is specified for `known_hosts`, Tekton configures SSH to accept **any public key** returned by the server on first query. Tekton does this by setting Git's `core.sshCommand` variable to `ssh -o StrictHostKeyChecking=accept-new`. +This behaviour can be prevented +[using a feature-flag: `require-git-ssh-secret-known-hosts`](./install.md#customizing-the-pipelines-controller-behavior). +Set this flag to `true` and all Git SSH Secrets _must_ include a `known_hosts`. ``` === ~/.ssh/id_key1 === diff --git a/docs/container-contract.md b/docs/container-contract.md index 4c6cddce7da..4052ee5631c 100644 --- a/docs/container-contract.md +++ b/docs/container-contract.md @@ -28,7 +28,7 @@ embed a script within a `Step`, **do not** specify a `command` value. For exampl If you do not specify a `command` value, the Pipelines controller performs a lookup for the `entrypoint` value in the associated remote container registry. If the image is in a private registry, you must include an [`ImagePullSecret`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) -value in the the service account definition used by the `Task`. +value in the service account definition used by the `Task`. The Pipelines controller uses this value unless the service account is not defined, at which point it assumes the value of `default`. diff --git a/docs/install.md b/docs/install.md index 1f1897b2ac2..fe7c1a8bb36 100644 --- a/docs/install.md +++ b/docs/install.md @@ -271,7 +271,7 @@ The example below customizes the following: - the default service account from `default` to `tekton`. - the default timeout from 60 minutes to 20 minutes. - the default `app.kubernetes.io/managed-by` label is applied to all Pods created to execute `TaskRuns`. -- the default Pod template to include a node selector to select the node where the Pod will be scheduled by default. +- the default Pod template to include a node selector to select the node where the Pod will be scheduled by default. A list of supported fields is available [here](https://github.com/tektoncd/pipeline/blob/master/docs/podtemplates.md#supported-fields). For more information, see [`PodTemplate` in `TaskRuns`](./taskruns.md#specifying-a-pod-template) or [`PodTemplate` in `PipelineRuns`](./pipelineruns.md#specifying-a-pod-template). - the default `Workspace` configuration can be set for any `Workspaces` that a Task declares but that a TaskRun does not explicitly provide @@ -327,6 +327,12 @@ TaskRuns with no Sidecars specified. Enabling this option should decrease the ti start running. However, for clusters that use injected sidecars e.g. istio enabling this option can lead to unexpected behavior. +- `require-git-ssh-secret-known-hosts`: set this flag to `"true"` to require that +Git SSH Secrets include a `known_hosts` field. This ensures that a git remote server's +key is validated before data is accepted from it when authenticating over SSH. Secrets +that don't include a `known_hosts` will result in the TaskRun failing validation and +not running. + For example: ```yaml diff --git a/docs/pipelines.md b/docs/pipelines.md index b3c5d8f01b3..bb38fa97bc3 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -115,6 +115,7 @@ spec: For more information, see: - [Using `Workspaces` in `Pipelines`](workspaces.md#using-workspaces-in-pipelines) - The [`Workspaces` in a `PipelineRun`](../examples/v1beta1/pipelineruns/workspaces.yaml) code example +- The [variables available in a `PipelineRun`](variables.md#variables-available-in-a-pipeline), including `workspaces..bound`. ## Specifying `Parameters` @@ -324,13 +325,13 @@ To run a `Task` only when certain conditions are met, it is possible to _guard_ The components of `WhenExpressions` are `Input`, `Operator` and `Values`: - `Input` is the input for the `WhenExpression` which can be static inputs or variables ([`Parameters`](#specifying-parameters) or [`Results`](#using-results)). If the `Input` is not provided, it defaults to an empty string. - `Operator` represents an `Input`'s relationship to a set of `Values`. A valid `Operator` must be provided, which can be either `in` or `notin`. -- `Values` is an array of string values. The `Values` array must be provided and be non-empty. It can contain static values or variables ([`Parameters`](#specifying-parameters) or [`Results`](#using-results)). +- `Values` is an array of string values. The `Values` array must be provided and be non-empty. It can contain static values or variables ([`Parameters`](#specifying-parameters), [`Results`](#using-results) or [a Workspaces's `bound` state](#specifying-workspaces)). The [`Parameters`](#specifying-parameters) are read from the `Pipeline` and [`Results`](#using-results) are read directly from previous [`Tasks`](#adding-tasks-to-the-pipeline). Using [`Results`](#using-results) in a `WhenExpression` in a guarded `Task` introduces a resource dependency on the previous `Task` that produced the `Result`. The declared `WhenExpressions` are evaluated before the `Task` is run. If all the `WhenExpressions` evaluate to `True`, the `Task` is run. If any of the `WhenExpressions` evaluate to `False`, the `Task` is not run and the `Task` is listed in the [`Skipped Tasks` section of the `PipelineRunStatus`](pipelineruns.md#monitoring-execution-status). -In these examples, `first-create-file` task will only be executed if the `path` parameter is `README.md` and `echo-file-exists` task will only be executed if the `exists` result from `check-file` task is `yes`. +In these examples, `first-create-file` task will only be executed if the `path` parameter is `README.md`, `echo-file-exists` task will only be executed if the `exists` result from `check-file` task is `yes` and `run-lint` task will only be executed if the `lint-config` optional workspace has been provided by a PipelineRun. ```yaml tasks: @@ -350,6 +351,15 @@ tasks: values: ["yes"] taskRef: name: echo-file-exists +--- +tasks: + - name: run-lint + when: + - input: "$(workspaces.lint-config.bound)" + operator: in + values: ["true"] + taskRef: + name: lint-source ``` For an end-to-end example, see [PipelineRun with WhenExpressions](../examples/v1beta1/pipelineruns/pipelinerun-with-when-expressions.yaml). @@ -362,6 +372,7 @@ There are a lot of scenarios where `WhenExpressions` can be really useful. Some - Checking if a git file has changed in the previous commits - Checking if an image exists in the registry - Checking if the name of a CI job matches +- Checking if an optional Workspace has been provided ### Guard `Task` execution using `Conditions` diff --git a/docs/tasks.md b/docs/tasks.md index a9f020f934a..17d74264b86 100644 --- a/docs/tasks.md +++ b/docs/tasks.md @@ -403,6 +403,8 @@ a `results` field but it's the responsibility of the `Task` to generate its cont It's important to note that Tekton does not perform any processing on the contents of results; they are emitted verbatim from your Task including any leading or trailing whitespace characters. Make sure to write only the precise string you want returned from your `Task` into the `/tekton/results/` files that your `Task` creates. +You can use [`$(results.name.path)`](https://github.com/tektoncd/pipeline/blob/master/docs/variables.md#variables-available-in-a-task) +to avoid having to hardcode this path. In the example below, the `Task` specifies two files in the `results` field: `current-date-unix-timestamp` and `current-date-human-readable`. @@ -426,12 +428,12 @@ spec: image: bash:latest script: | #!/usr/bin/env bash - date +%s | tee /tekton/results/current-date-unix-timestamp + date +%s | tee $(results.current-date-unix-timestamp.path) - name: print-date-human-readable image: bash:latest script: | #!/usr/bin/env bash - date | tee /tekton/results/current-date-human-readable + date | tee $(results.current-date-human-readable.path) ``` The stored results can be used [at the `Task` level](./pipelines.md#configuring-execution-results-at-the-task-level) diff --git a/docs/variables.md b/docs/variables.md index 06d7fcc38fe..2c75b907737 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -7,6 +7,7 @@ weight: 15 # Variable Substitutions Supported by `Tasks` and `Pipelines` This page documents the variable substitutions supported by `Tasks` and `Pipelines`. +**Note:** Tekton does not escape the contents of variables. Task authors are responsible for properly escaping a variable's value according to the shell, image or scripting language that the variable will be used in. ## Variables available in a `Pipeline` @@ -14,6 +15,7 @@ This page documents the variable substitutions supported by `Tasks` and `Pipelin | -------- | ----------- | | `params.` | The value of the parameter at runtime. | | `tasks..results.` | The value of the `Task's` result. Can alter `Task` execution order within a `Pipeline`.) | +| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if the `Workspace` declaration has `optional: true` and the Workspace binding was omitted by the PipelineRun. | | `context.pipelineRun.name` | The name of the `PipelineRun` that this `Pipeline` is running in. | | `context.pipelineRun.namespace` | The namespace of the `PipelineRun` that this `Pipeline` is running in. | | `context.pipelineRun.uid` | The uid of the `PipelineRun` that this `Pipeline` is running in. | @@ -28,7 +30,8 @@ This page documents the variable substitutions supported by `Tasks` and `Pipelin | `resources.inputs..path` | The path to the input resource's directory. | | `resources.outputs..path` | The path to the output resource's directory. | | `results..path` | The path to the file where the `Task` writes its results data. | -| `workspaces..path` | The path to the mounted `Workspace`. | +| `workspaces..path` | The path to the mounted `Workspace`. Empty string if an optional `Workspace` has not been provided by the TaskRun. | +| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if an optional`Workspace` has not been provided by the TaskRun. | | `workspaces..claim` | The name of the `PersistentVolumeClaim` specified as a volume source for the `Workspace`. Empty string for other volume types. | | `workspaces..volume` | The name of the volume populating the `Workspace`. | | `credentials.path` | The path to credentials injected from Secrets with matching annotations. | @@ -155,6 +158,7 @@ variable via `resources.inputs..` or | `Task` | `spec.sidecars[].volumemounts.name` | | `Task` | `spec.sidecars[].volumemounts.mountpath` | | `Task` | `spec.sidecars[].volumemounts.subpath` | +| `Task` | `spec.sidecars[].script` | | `Pipeline` | `spec.tasks[].params[].value` | | `Pipeline` | `spec.tasks[].conditions[].params[].value` | -| `Pipeline` | `spec.results[].value` | \ No newline at end of file +| `Pipeline` | `spec.results[].value` | diff --git a/docs/workspaces.md b/docs/workspaces.md index ea348079609..5644caaacf2 100644 --- a/docs/workspaces.md +++ b/docs/workspaces.md @@ -72,6 +72,18 @@ specific `Volume` information to use for the `Workspaces` used by each `Pipeline `PipelineRuns` have the added responsibility of ensuring that whatever `Volume` type they provide can be safely and correctly shared across multiple `Tasks`. +### Optional `Workspaces` + +Both Tasks and Pipelines can declare a Workspace "optional". When an optional Workspace +is declared the TaskRun or PipelineRun may omit a Workspace Binding for that Workspace. +The Task or Pipeline behaviour may change when the Binding is omitted. This feature has +many uses: + +- A Task may optionally accept credentials to run authenticated commands. +- A Pipeline may accept optional configuration that changes the linting or compilation +parameters used. +- An optional build cache may be provided to speed up compile times. + ## Configuring `Workspaces` This section describes how to configure one or more `Workspaces` in a `TaskRun`. @@ -82,14 +94,15 @@ To configure one or more `Workspaces` in a `Task`, add a `workspaces` list with - `name` - (**required**) A **unique** string identifier that can be used to refer to the workspace - `description` - An informative string describing the purpose of the `Workspace` -- `readOnly` - A boolean declaring whether the `Task` will write to the `Workspace`. +- `readOnly` - A boolean declaring whether the `Task` will write to the `Workspace`. Defaults to `false`. +- `optional` - A boolean indicating whether a TaskRun can omit the `Workspace`. Defaults to `false`. - `mountPath` - A path to a location on disk where the workspace will be available to `Steps`. Relative paths will be prepended with `/workspace`. If a `mountPath` is not provided the workspace will be placed by default at `/workspace/` where `` is the workspace's unique name. - + Note the following: - + - A `Task` definition can include as many `Workspaces` as it needs. It is recommended that `Tasks` use **at most** one _writeable_ `Workspace`. - A `readOnly` `Workspace` will have its volume mounted as read-only. Attempting to write @@ -98,9 +111,7 @@ Note the following: start with the name of a directory. For example, a `mountPath` of `"/foobar"` is absolute and exposes the `Workspace` at `/foobar` inside the `Task's` `Steps`, but a `mountPath` of `"foobar"` is relative and exposes the `Workspace` at `/workspace/foobar`. -- A default `Workspace` configuration can be set for any `Workspaces` that a Task declares but that a TaskRun - does not explicitly provide. It can be set in the `config-defaults` ConfigMap in `default-task-run-workspace-binding`. - + Below is an example `Task` definition that includes a `Workspace` called `messages` to which the `Task` writes a message: ```yaml @@ -111,19 +122,44 @@ spec: script: | #!/usr/bin/env bash set -xe - echo hello! > $(workspaces.messages.path)/message + if [ "$(workspaces.messages.bound)" == "true" ] ; then + echo hello! > $(workspaces.messages.path)/message + fi workspaces: - name: messages - description: The folder where we write the message to + description: | + The folder where we write the message to. If no workspace + is provided then the message will not be written. + optional: true mountPath: /custom/path/relative/to/root ``` +#### Setting a Default TaskRun Workspace Binding + +An organization may want to specify default Workspace configuration for TaskRuns. This allows users to +use Tasks without having to know the specifics of Workspaces - they can simply rely on the platform +to use the default configuration when a Workspace is missing. To support this Tekton allows a default +Workspace Binding to be specified for TaskRuns. When the TaskRun executes, any Workspaces that a Task +requires but which are not provided by the TaskRun will be bound with the default configuration. + +The configuration for the default Workspace Binding is added to the `config-defaults` ConfigMap, under +the `default-task-run-workspace-binding` key. For an example, see the [Customizing basic execution +parameters](./install.md#customizing-basic-execution-parameters) section of the install doc. + +**Note:** the default configuration is used for any _required_ Workspace declared by a Task. Optional +Workspaces are not populated with the default binding. This is because a Task's behaviour will typically +differ slightly when an optional Workspace is bound. + #### Using `Workspace` variables in `Tasks` The following variables make information about `Workspaces` available to `Tasks`: - `$(workspaces..path)` - specifies the path to a `Workspace` - where `` is the name of the `Workspace`. + where `` is the name of the `Workspace`. This will be an + empty string when a Workspace is declared optional and not provided + by a TaskRun. +- `$(workspaces..bound)` - either `true` or `false`, specifies + whether a workspace was bound. Always `true` if the workspace is required. - `$(workspaces..claim)` - specifies the name of the `PersistentVolumeClaim` used as a volume source for the `Workspace` where `` is the name of the `Workspace`. If a volume source other than `PersistentVolumeClaim` is used, an empty string is returned. - `$(workspaces..volume)`- specifies the name of the `Volume` @@ -139,7 +175,7 @@ its own `workspaces` list. Each entry in the list contains the following fields: - `subPath` - An optional subdirectory on the `Volume` to store data for that `Workspace` The entry must also include one `VolumeSource`. See [Specifying `VolumeSources` in `Workspaces`](#specifying-volumesources-in-workspaces) for more information. - + **Caution:** - The `Workspaces` declared in a `Task` must be available when executing the associated `TaskRun`. Otherwise, the `TaskRun` will fail. @@ -187,6 +223,8 @@ data within that `Workspace`. spec: workspaces: - name: pipeline-ws1 # Name of the workspace in the Pipeline + - name: pipeline-ws2 + optional: true tasks: - name: use-ws-from-pipeline taskRef: diff --git a/examples/v1beta1/pipelineruns/optional-workspaces.yaml b/examples/v1beta1/pipelineruns/optional-workspaces.yaml new file mode 100644 index 00000000000..e3aa86743ce --- /dev/null +++ b/examples/v1beta1/pipelineruns/optional-workspaces.yaml @@ -0,0 +1,86 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-source-code-configmap +data: + main.js: | + console.log("Hello, World!"); +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: run-js +spec: + workspaces: + - name: prelaunch + optional: true + - name: launch + steps: + - image: node:lts-alpine3.11 + script: | + #!/usr/bin/env sh + if [ $(workspaces.prelaunch.bound) == "true" ] ; then + node "$(workspaces.prelaunch.path)/init.js" + else + echo "Skipping prelaunch." + fi + if [ -f "$(workspaces.launch.path)/main.js" ] ; then + node "$(workspaces.launch.path)/main.js" + else + echo "Error: missing main.js file in launch workspace!" + exit 1 + fi +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: echo +spec: + params: + - name: values + type: array + steps: + - image: alpine + command: ["echo"] + args: ["$(params.values[*])"] +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: optional-workspaces- +spec: + workspaces: + - name: launch + configMap: + name: example-source-code-configmap + pipelineSpec: + workspaces: + - name: launch + description: | + The program to run. Must provide a main.js at its root. + - name: prelaunch + optional: true + description: | + Prelaunch program to run before the launch program. Use this + to set up any environment-specific requirements. Must provide + an init.js file at its root. + tasks: + - name: print-bound-state + taskRef: + name: echo + params: + - name: values + value: + - "Was a prelaunch workspace provided? " + - $(workspaces.prelaunch.bound) + - "\n" + - "Was a launch workspace provided? " + - "$(workspaces.launch.bound)" + - "\n" + - name: run-js + runAfter: [print-bound-state] + workspaces: + - name: launch + workspace: launch + taskRef: + name: run-js diff --git a/examples/v1beta1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml b/examples/v1beta1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml new file mode 100644 index 00000000000..b79c7f98589 --- /dev/null +++ b/examples/v1beta1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml @@ -0,0 +1,66 @@ +# This example demonstrates using the workspaces..bound variable +# in a when expression to selectively run different portions of a Pipeline +# based on the presence of an optional workspace. +# +# In the PipelineRun below an optional message-of-the-day workspace is accepted +# by the Pipeline. If that workspace is provided then the print-motd task is +# executed. If that workspace is not provided then a print-default-motd task +# is run instead. We supply a ConfigMap for the workspace and so the print-motd +# task ends up running and printing the contents of each entry in the ConfigMap. +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-motd +data: + message_1: "Hello, good morning!" + message_2: "Hello, good evening!" +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: optional-workspace-when- +spec: + serviceAccountName: 'default' + workspaces: + - name: message-of-the-day + configMap: + name: test-motd + pipelineSpec: + workspaces: + - name: message-of-the-day + optional: true + description: | + If a workspace is provided here then every file at the root of the workspace + will be printed. + tasks: + - name: print-motd + when: + - input: "$(workspaces.message-of-the-day.bound)" + operator: in + values: ["true"] + workspaces: + - name: message-of-the-day + workspace: message-of-the-day + taskSpec: + workspaces: + - name: message-of-the-day + steps: + - image: alpine + script: | + #!/usr/bin/env ash + for f in "$(workspaces.message-of-the-day.path)"/* ; do + echo "Message from $f:" + cat "$f" + echo "" # add newline + done + - name: print-default-motd + when: + - input: "$(workspaces.message-of-the-day.bound)" + operator: in + values: ["false"] + taskSpec: + steps: + - name: print-default + image: alpine + script: | + echo "No message-of-the-day workspace was provided. This is the default MOTD instead!" diff --git a/examples/v1beta1/taskruns/optional-workspaces.yaml b/examples/v1beta1/taskruns/optional-workspaces.yaml new file mode 100644 index 00000000000..6fe9071da29 --- /dev/null +++ b/examples/v1beta1/taskruns/optional-workspaces.yaml @@ -0,0 +1,25 @@ +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + generateName: optional-workspaces- +spec: + workspaces: + - name: source-code + emptyDir: {} + taskSpec: + workspaces: + - name: source-code + optional: true + - name: extra-config + optional: true + steps: + - name: check-workspaces + image: alpine:3.12.0 + script: | + if [ "$(workspaces.source-code.bound)" == "true" ]; then + printf "Source code workspace was provided at %s!\n" "$(workspaces.source-code.path)" + fi + if [ "$(workspaces.extra-config.bound)" == "true" ]; then + printf "Unexpected extra configuration mounted at %s\n" "$(workspaces.extra-config.path)" + exit 1 + fi diff --git a/internal/builder/v1alpha1/condition.go b/internal/builder/v1alpha1/condition.go index ccef5a9bee2..c5d25f6b398 100644 --- a/internal/builder/v1alpha1/condition.go +++ b/internal/builder/v1alpha1/condition.go @@ -17,10 +17,9 @@ limitations under the License. package builder import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" ) // ConditionOp is an operation which modifies a Condition struct. diff --git a/internal/builder/v1alpha1/pipeline.go b/internal/builder/v1alpha1/pipeline.go index 9c72df3a35d..fbad8813645 100644 --- a/internal/builder/v1alpha1/pipeline.go +++ b/internal/builder/v1alpha1/pipeline.go @@ -465,26 +465,6 @@ func PipelineRunNodeSelector(values map[string]string) PipelineRunSpecOp { } } -// PipelineRunTolerations sets the Node selector to the PipelineRunSpec. -func PipelineRunTolerations(values []corev1.Toleration) PipelineRunSpecOp { - return func(prs *v1alpha1.PipelineRunSpec) { - if prs.PodTemplate == nil { - prs.PodTemplate = &v1alpha1.PodTemplate{} - } - prs.PodTemplate.Tolerations = values - } -} - -// PipelineRunAffinity sets the affinity to the PipelineRunSpec. -func PipelineRunAffinity(affinity *corev1.Affinity) PipelineRunSpecOp { - return func(prs *v1alpha1.PipelineRunSpec) { - if prs.PodTemplate == nil { - prs.PodTemplate = &v1alpha1.PodTemplate{} - } - prs.PodTemplate.Affinity = affinity - } -} - // PipelineRunPipelineSpec adds a PipelineSpec to the PipelineRunSpec. // Any number of PipelineSpec modifiers can be passed to transform it. func PipelineRunPipelineSpec(ops ...PipelineSpecOp) PipelineRunSpecOp { diff --git a/internal/builder/v1alpha1/step.go b/internal/builder/v1alpha1/step.go index b489ce71b7f..695f4671b8a 100644 --- a/internal/builder/v1alpha1/step.go +++ b/internal/builder/v1alpha1/step.go @@ -16,7 +16,6 @@ package builder import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" ) // StepOp is an operation which modifies a Container struct. @@ -87,64 +86,3 @@ func StepScript(script string) StepOp { step.Script = script } } - -// StepResources adds ResourceRequirements to the Container (step). -func StepResources(ops ...ResourceRequirementsOp) StepOp { - return func(step *v1alpha1.Step) { - rr := &corev1.ResourceRequirements{} - for _, op := range ops { - op(rr) - } - step.Resources = *rr - } -} - -// StepLimits adds Limits to the ResourceRequirements. -func StepLimits(ops ...ResourceListOp) ResourceRequirementsOp { - return func(rr *corev1.ResourceRequirements) { - limits := corev1.ResourceList{} - for _, op := range ops { - op(limits) - } - rr.Limits = limits - } -} - -// StepRequests adds Requests to the ResourceRequirements. -func StepRequests(ops ...ResourceListOp) ResourceRequirementsOp { - return func(rr *corev1.ResourceRequirements) { - requests := corev1.ResourceList{} - for _, op := range ops { - op(requests) - } - rr.Requests = requests - } -} - -// StepCPU sets the CPU resource on the ResourceList. -func StepCPU(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceCPU] = resource.MustParse(val) - } -} - -// StepMemory sets the memory resource on the ResourceList. -func StepMemory(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceMemory] = resource.MustParse(val) - } -} - -// StepEphemeralStorage sets the ephemeral storage resource on the ResourceList. -func StepEphemeralStorage(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceEphemeralStorage] = resource.MustParse(val) - } -} - -// StepTerminationMessagePath sets the source of the termination message. -func StepTerminationMessagePath(terminationMessagePath string) StepOp { - return func(step *v1alpha1.Step) { - step.TerminationMessagePath = terminationMessagePath - } -} diff --git a/internal/builder/v1alpha1/task.go b/internal/builder/v1alpha1/task.go index 42314b000c6..d45da4cab91 100644 --- a/internal/builder/v1alpha1/task.go +++ b/internal/builder/v1alpha1/task.go @@ -313,19 +313,6 @@ func TaskResourcesOutput(name string, resourceType v1alpha1.PipelineResourceType } } -// TaskResultsOutput adds a TaskResult as Outputs to the TaskResources -func TaskResultsOutput(name, desc string, ops ...TaskResultOp) TaskResultOp { - return func(result *v1beta1.TaskResult) { - r := &v1beta1.TaskResult{ - Name: name, - Description: desc, - } - for _, op := range ops { - op(r) - } - } -} - // TaskInputs sets inputs to the TaskSpec. // Any number of Inputs modifier can be passed to transform it. func TaskInputs(ops ...InputsOp) TaskSpecOp { @@ -551,36 +538,6 @@ func TaskRunNodeSelector(values map[string]string) TaskRunSpecOp { } } -// TaskRunTolerations sets the Tolerations to the TaskRunSpec. -func TaskRunTolerations(values []corev1.Toleration) TaskRunSpecOp { - return func(spec *v1alpha1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1alpha1.PodTemplate{} - } - spec.PodTemplate.Tolerations = values - } -} - -// TaskRunAffinity sets the Affinity to the TaskRunSpec. -func TaskRunAffinity(affinity *corev1.Affinity) TaskRunSpecOp { - return func(spec *v1alpha1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1alpha1.PodTemplate{} - } - spec.PodTemplate.Affinity = affinity - } -} - -// TaskRunPodSecurityContext sets the SecurityContext to the TaskRunSpec (through PodTemplate). -func TaskRunPodSecurityContext(context *corev1.PodSecurityContext) TaskRunSpecOp { - return func(spec *v1alpha1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1alpha1.PodTemplate{} - } - spec.PodTemplate.SecurityContext = context - } -} - // StateTerminated sets Terminated to the StepState. func StateTerminated(exitcode int) StepStateOp { return func(s *v1alpha1.StepState) { @@ -819,16 +776,6 @@ func TaskRunInputs(ops ...TaskRunInputsOp) TaskRunSpecOp { } } -// TaskRunInputsParam add a param, with specified name and value, to the TaskRunInputs. -func TaskRunInputsParam(name, value string, additionalValues ...string) TaskRunInputsOp { - return func(i *v1alpha1.TaskRunInputs) { - i.Params = append(i.Params, v1alpha1.Param{ - Name: name, - Value: *v1beta1.NewArrayOrString(value, additionalValues...), - }) - } -} - // TaskRunInputsResource adds a resource, with specified name, to the TaskRunInputs. // Any number of TaskResourceBinding modifier can be passed to transform it. func TaskRunInputsResource(name string, ops ...TaskResourceBindingOp) TaskRunInputsOp { diff --git a/internal/builder/v1beta1/pipeline.go b/internal/builder/v1beta1/pipeline.go index 27e869e51ab..edf9df3b235 100644 --- a/internal/builder/v1beta1/pipeline.go +++ b/internal/builder/v1beta1/pipeline.go @@ -518,26 +518,6 @@ func PipelineRunNodeSelector(values map[string]string) PipelineRunSpecOp { } } -// PipelineRunTolerations sets the Node selector to the PipelineRunSpec. -func PipelineRunTolerations(values []corev1.Toleration) PipelineRunSpecOp { - return func(prs *v1beta1.PipelineRunSpec) { - if prs.PodTemplate == nil { - prs.PodTemplate = &v1beta1.PodTemplate{} - } - prs.PodTemplate.Tolerations = values - } -} - -// PipelineRunAffinity sets the affinity to the PipelineRunSpec. -func PipelineRunAffinity(affinity *corev1.Affinity) PipelineRunSpecOp { - return func(prs *v1beta1.PipelineRunSpec) { - if prs.PodTemplate == nil { - prs.PodTemplate = &v1beta1.PodTemplate{} - } - prs.PodTemplate.Affinity = affinity - } -} - // PipelineRunPipelineSpec adds a PipelineSpec to the PipelineRunSpec. // Any number of PipelineSpec modifiers can be passed to transform it. func PipelineRunPipelineSpec(ops ...PipelineSpecOp) PipelineRunSpecOp { diff --git a/internal/builder/v1beta1/pod.go b/internal/builder/v1beta1/pod.go index 508ae3dc138..dd04195cf64 100644 --- a/internal/builder/v1beta1/pod.go +++ b/internal/builder/v1beta1/pod.go @@ -17,8 +17,6 @@ limitations under the License. package builder import ( - "time" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -164,29 +162,3 @@ func PodVolumes(volumes ...corev1.Volume) PodSpecOp { spec.Volumes = volumes } } - -// PodCreationTimestamp sets the creation time of the pod -func PodCreationTimestamp(t time.Time) PodOp { - return func(p *corev1.Pod) { - p.CreationTimestamp = metav1.Time{Time: t} - } -} - -// PodStatus creates a PodStatus with default values. -// Any number of PodStatus modifiers can be passed to transform it. -func PodStatus(ops ...PodStatusOp) PodOp { - return func(pod *corev1.Pod) { - podStatus := &pod.Status - for _, op := range ops { - op(podStatus) - } - pod.Status = *podStatus - } -} - -// PodStatusConditions adds a Conditions (set) to the Pod status. -func PodStatusConditions(cond corev1.PodCondition) PodStatusOp { - return func(status *corev1.PodStatus) { - status.Conditions = append(status.Conditions, cond) - } -} diff --git a/internal/builder/v1beta1/step.go b/internal/builder/v1beta1/step.go index a2f10ddf364..e9c428c8d60 100644 --- a/internal/builder/v1beta1/step.go +++ b/internal/builder/v1beta1/step.go @@ -16,7 +16,6 @@ package builder import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" ) // StepOp is an operation which modifies a Container struct. @@ -87,64 +86,3 @@ func StepScript(script string) StepOp { step.Script = script } } - -// StepResources adds ResourceRequirements to the Container (step). -func StepResources(ops ...ResourceRequirementsOp) StepOp { - return func(step *v1beta1.Step) { - rr := &corev1.ResourceRequirements{} - for _, op := range ops { - op(rr) - } - step.Resources = *rr - } -} - -// StepLimits adds Limits to the ResourceRequirements. -func StepLimits(ops ...ResourceListOp) ResourceRequirementsOp { - return func(rr *corev1.ResourceRequirements) { - limits := corev1.ResourceList{} - for _, op := range ops { - op(limits) - } - rr.Limits = limits - } -} - -// StepRequests adds Requests to the ResourceRequirements. -func StepRequests(ops ...ResourceListOp) ResourceRequirementsOp { - return func(rr *corev1.ResourceRequirements) { - requests := corev1.ResourceList{} - for _, op := range ops { - op(requests) - } - rr.Requests = requests - } -} - -// StepCPU sets the CPU resource on the ResourceList. -func StepCPU(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceCPU] = resource.MustParse(val) - } -} - -// StepMemory sets the memory resource on the ResourceList. -func StepMemory(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceMemory] = resource.MustParse(val) - } -} - -// StepEphemeralStorage sets the ephemeral storage resource on the ResourceList. -func StepEphemeralStorage(val string) ResourceListOp { - return func(r corev1.ResourceList) { - r[corev1.ResourceEphemeralStorage] = resource.MustParse(val) - } -} - -// StepTerminationMessagePath sets the source of the termination message. -func StepTerminationMessagePath(terminationMessagePath string) StepOp { - return func(step *v1beta1.Step) { - step.TerminationMessagePath = terminationMessagePath - } -} diff --git a/internal/builder/v1beta1/task.go b/internal/builder/v1beta1/task.go index 59cf340d906..06b5d5156fc 100644 --- a/internal/builder/v1beta1/task.go +++ b/internal/builder/v1beta1/task.go @@ -305,19 +305,6 @@ func TaskResourcesOutput(name string, resourceType resource.PipelineResourceType } } -// TaskResultsOutput adds a TaskResult as Outputs to the TaskResources -func TaskResultsOutput(name, desc string, ops ...TaskResultOp) TaskResultOp { - return func(result *v1beta1.TaskResult) { - r := &v1beta1.TaskResult{ - Name: name, - Description: desc, - } - for _, op := range ops { - op(r) - } - } -} - // ResourceOptional marks a TaskResource as optional. func ResourceOptional(optional bool) TaskResourceOp { return func(r *v1beta1.TaskResource) { @@ -474,36 +461,6 @@ func TaskRunNodeSelector(values map[string]string) TaskRunSpecOp { } } -// TaskRunTolerations sets the Tolerations to the TaskRunSpec. -func TaskRunTolerations(values []corev1.Toleration) TaskRunSpecOp { - return func(spec *v1beta1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1beta1.PodTemplate{} - } - spec.PodTemplate.Tolerations = values - } -} - -// TaskRunAffinity sets the Affinity to the TaskRunSpec. -func TaskRunAffinity(affinity *corev1.Affinity) TaskRunSpecOp { - return func(spec *v1beta1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1beta1.PodTemplate{} - } - spec.PodTemplate.Affinity = affinity - } -} - -// TaskRunPodSecurityContext sets the SecurityContext to the TaskRunSpec (through PodTemplate). -func TaskRunPodSecurityContext(context *corev1.PodSecurityContext) TaskRunSpecOp { - return func(spec *v1beta1.TaskRunSpec) { - if spec.PodTemplate == nil { - spec.PodTemplate = &v1beta1.PodTemplate{} - } - spec.PodTemplate.SecurityContext = context - } -} - // StateTerminated sets Terminated to the StepState. func StateTerminated(exitcode int) StepStateOp { return func(s *v1beta1.StepState) { diff --git a/pkg/apis/config/default.go b/pkg/apis/config/default.go index d939de82cd5..c7d09bc96bb 100644 --- a/pkg/apis/config/default.go +++ b/pkg/apis/config/default.go @@ -32,6 +32,7 @@ const ( NoTimeoutDuration = 0 * time.Minute defaultTimeoutMinutesKey = "default-timeout-minutes" defaultServiceAccountKey = "default-service-account" + DefaultServiceAccountValue = "default" defaultManagedByLabelValueKey = "default-managed-by-label-value" DefaultManagedByLabelValue = "tekton-pipelines" defaultPodTemplateKey = "default-pod-template" @@ -82,6 +83,7 @@ func (cfg *Defaults) Equals(other *Defaults) bool { func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc := Defaults{ DefaultTimeoutMinutes: DefaultTimeoutMinutes, + DefaultServiceAccount: DefaultServiceAccountValue, DefaultManagedByLabelValue: DefaultManagedByLabelValue, DefaultCloudEventsSink: DefaultCloudEventSinkValue, } diff --git a/pkg/apis/config/default_test.go b/pkg/apis/config/default_test.go index 9eafd840bc1..adc6f0b7aed 100644 --- a/pkg/apis/config/default_test.go +++ b/pkg/apis/config/default_test.go @@ -81,6 +81,7 @@ func TestNewDefaultsFromEmptyConfigMap(t *testing.T) { expectedConfig := &config.Defaults{ DefaultTimeoutMinutes: 60, DefaultManagedByLabelValue: "tekton-pipelines", + DefaultServiceAccount: "default", } verifyConfigFileWithExpectedConfig(t, DefaultsConfigEmptyName, expectedConfig) } @@ -205,6 +206,7 @@ func TestEquals(t *testing.T) { } func verifyConfigFileWithExpectedConfig(t *testing.T, fileName string, expectedConfig *config.Defaults) { + t.Helper() cm := test.ConfigMapFromTestFile(t, fileName) if Defaults, err := config.NewDefaultsFromConfigMap(cm); err == nil { if d := cmp.Diff(Defaults, expectedConfig); d != "" { diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 97c507dcfe3..6c42ed8abd3 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -29,10 +29,12 @@ const ( disableWorkingDirOverwriteKey = "disable-working-directory-overwrite" disableAffinityAssistantKey = "disable-affinity-assistant" runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars" + requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" // nolint: gosec DefaultDisableHomeEnvOverwrite = false DefaultDisableWorkingDirOverwrite = false DefaultDisableAffinityAssistant = false DefaultRunningInEnvWithInjectedSidecars = true + DefaultRequireGitSSHSecretKnownHosts = false ) // FeatureFlags holds the features configurations @@ -42,6 +44,7 @@ type FeatureFlags struct { DisableWorkingDirOverwrite bool DisableAffinityAssistant bool RunningInEnvWithInjectedSidecars bool + RequireGitSSHSecretKnownHosts bool } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -81,6 +84,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(runningInEnvWithInjectedSidecarsKey, DefaultRunningInEnvWithInjectedSidecars, &tc.RunningInEnvWithInjectedSidecars); err != nil { return nil, err } + if err := setFeature(requireGitSSHSecretKnownHostsKey, DefaultRequireGitSSHSecretKnownHosts, &tc.RequireGitSSHSecretKnownHosts); err != nil { + return nil, err + } return &tc, nil } diff --git a/pkg/apis/config/feature_flags_test.go b/pkg/apis/config/feature_flags_test.go index 3d26ad7f31a..08b9b61512e 100644 --- a/pkg/apis/config/feature_flags_test.go +++ b/pkg/apis/config/feature_flags_test.go @@ -45,6 +45,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) { DisableWorkingDirOverwrite: true, DisableAffinityAssistant: true, RunningInEnvWithInjectedSidecars: false, + RequireGitSSHSecretKnownHosts: true, }, fileName: "feature-flags-all-flags-set", }, diff --git a/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml b/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml index c940a69e5f7..ad2b4618d9e 100644 --- a/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml +++ b/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml @@ -22,3 +22,4 @@ data: disable-working-directory-overwrite: "true" disable-affinity-assistant: "true" running-in-environment-with-injected-sidecars: "false" + require-git-ssh-secret-known-hosts: "true" diff --git a/pkg/apis/config/testdata/feature-flags.yaml b/pkg/apis/config/testdata/feature-flags.yaml index 2dcd768dde0..3e033b0c7f3 100644 --- a/pkg/apis/config/testdata/feature-flags.yaml +++ b/pkg/apis/config/testdata/feature-flags.yaml @@ -22,3 +22,4 @@ data: disable-working-directory-overwrite: "false" disable-affinity-assistant: "false" running-in-environment-with-injected-sidecars: "true" + require-git-ssh-secret-known-hosts: "false" diff --git a/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go b/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go index c757737d94a..61315899761 100644 --- a/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" - tb "github.com/tektoncd/pipeline/internal/builder/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" @@ -29,7 +28,7 @@ import ( ) func TestPipeline_Validate(t *testing.T) { - tests := []struct { + for _, tt := range []struct { name string p *v1alpha1.Pipeline failureExpected bool @@ -214,244 +213,616 @@ func TestPipeline_Validate(t *testing.T) { }, { // Adding this case because `task.Resources` is a pointer, explicitly making sure this is handled name: "task without resources", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("wonderful-resource", v1alpha1.PipelineResourceTypeImage), - tb.PipelineTask("bar", "bar-task"), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("wow-image", "wonderful-resource")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "wonderful-resource", + Type: v1alpha1.PipelineResourceTypeImage, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "wow-image", Resource: "wonderful-resource", + }}, + }, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + }}, + }, + }, failureExpected: false, }, { name: "valid resource declarations and usage", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineDeclaredResource("wonderful-resource", v1alpha1.PipelineResourceTypeImage), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskInputResource("some-workspace", "great-resource"), - tb.PipelineTaskOutputResource("some-image", "wonderful-resource"), - tb.PipelineTaskCondition("some-condition", - tb.PipelineTaskConditionResource("some-workspace", "great-resource"))), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("wow-image", "wonderful-resource", tb.From("bar")), - tb.PipelineTaskCondition("some-condition-2", - tb.PipelineTaskConditionResource("wow-image", "wonderful-resource", "bar"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }, { + Name: "wonderful-resource", + Type: v1alpha1.PipelineResourceTypeImage, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "wow-image", Resource: "wonderful-resource", From: []string{"bar"}, + }}, + }, + Conditions: []v1alpha1.PipelineTaskCondition{{ + ConditionRef: "some-condition-2", + Resources: []v1alpha1.PipelineTaskInputResource{{ + Name: "wow-image", Resource: "wonderful-resource", From: []string{"bar"}, + }}, + }}, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "great-resource", + }}, + Outputs: []v1alpha1.PipelineTaskOutputResource{{ + Name: "some-image", Resource: "wonderful-resource", + }}, + }, + Conditions: []v1alpha1.PipelineTaskCondition{{ + ConditionRef: "some-condition", + Resources: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "great-resource", + }}, + }}, + }}, + }, + }, failureExpected: false, }, { name: "valid condition only resource", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskCondition("some-condition", - tb.PipelineTaskConditionResource("some-workspace", "great-resource"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Conditions: []v1alpha1.PipelineTaskCondition{{ + ConditionRef: "some-condition", + Resources: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "great-resource", + }}, + }}, + }}, + }, + }, failureExpected: false, }, { name: "valid parameter variables", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeString), - tb.PipelineParamSpec("foo-is-baz", v1alpha1.ParamTypeString), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(baz) and $(foo-is-baz)")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeString, + }, { + Name: "foo-is-baz", + Type: v1alpha1.ParamTypeString, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(baz) and $(foo-is-baz)"), + }}, + }}, + }, + }, failureExpected: false, }, { name: "valid array parameter variables", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("some", "default")), - tb.PipelineParamSpec("foo-is-baz", v1alpha1.ParamTypeArray), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(baz)", "and", "$(foo-is-baz)")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("some", "default"), + }, { + Name: "foo-is-baz", + Type: v1alpha1.ParamTypeArray, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(baz)", "and", "$(foo-is-baz)"), + }}, + }}, + }, + }, failureExpected: false, }, { name: "valid star array parameter variables", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("some", "default")), - tb.PipelineParamSpec("foo-is-baz", v1alpha1.ParamTypeArray), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(baz[*])", "and", "$(foo-is-baz[*])")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("some", "default"), + }, { + Name: "foo-is-baz", + Type: v1alpha1.ParamTypeArray, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(baz[*])", "and", "$(foo-is-baz[*])"), + }}, + }}, + }, + }, failureExpected: false, }, { name: "pipeline parameter nested in task parameter", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeString), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(input.workspace.$(baz))")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeString, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(input.workspace.$(baz))"), + }}, + }}, + }, + }, failureExpected: false, }, { - name: "from is on first task", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("the-resource", "great-resource", tb.From("bar"))), - )), + name: "from is on only task", + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "the-resource", Resource: "great-resource", From: []string{"bar"}, + }}, + }, + }}, + }, + }, failureExpected: true, }, { name: "from task doesnt exist", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("baz", "baz-task"), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("the-resource", "great-resource", tb.From("bar"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "the-resource", Resource: "great-resource", From: []string{"bazzz"}, + }}, + }, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + }}, + }, + }, failureExpected: true, }, { name: "duplicate resource declaration", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("duplicate-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineDeclaredResource("duplicate-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("the-resource", "duplicate-resource")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }, { + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "the-resource", Resource: "great-resource", + }}, + }, + }}, + }, + }, failureExpected: true, }, { name: "output resources missing from declaration", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("the-resource", "great-resource"), - tb.PipelineTaskOutputResource("the-magic-resource", "missing-resource")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "the-resource", Resource: "great-resource", + }}, + Outputs: []v1alpha1.PipelineTaskOutputResource{{ + Name: "the-magic-resource", Resource: "missing-resource", + }}, + }, + }}, + }, + }, failureExpected: true, }, { name: "input resources missing from declaration", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("the-resource", "missing-resource"), - tb.PipelineTaskOutputResource("the-magic-resource", "great-resource")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "the-resource", Resource: "missing-resource", + }}, + Outputs: []v1alpha1.PipelineTaskOutputResource{{ + Name: "the-magic-resource", Resource: "great-resource", + }}, + }, + }}, + }, + }, failureExpected: true, }, { name: "invalid condition only resource", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskCondition("some-condition", - tb.PipelineTaskConditionResource("some-workspace", "missing-resource"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Conditions: []v1alpha1.PipelineTaskCondition{{ + ConditionRef: "some-condition", + Resources: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "missing-resource", + }}, + }}, + }}, + }, + }, failureExpected: true, }, { name: "invalid from in condition", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineTask("foo", "foo-task"), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskCondition("some-condition", - tb.PipelineTaskConditionResource("some-workspace", "missing-resource", "foo"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Conditions: []v1alpha1.PipelineTaskCondition{{ + ConditionRef: "some-condition", + Resources: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "missing-resource", From: []string{"foo"}, + }}, + }}, + }}, + }, + }, failureExpected: true, }, { name: "from resource isn't output by task", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineDeclaredResource("great-resource", v1alpha1.PipelineResourceTypeGit), - tb.PipelineDeclaredResource("wonderful-resource", v1alpha1.PipelineResourceTypeImage), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskInputResource("some-workspace", "great-resource")), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskInputResource("wow-image", "wonderful-resource", tb.From("bar"))), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Resources: []v1alpha1.PipelineDeclaredResource{{ + Name: "great-resource", + Type: v1alpha1.PipelineResourceTypeGit, + }, { + Name: "wonderful-resource", + Type: v1alpha1.PipelineResourceTypeImage, + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "some-workspace", Resource: "great-resource", + }}, + }, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + Resources: &v1alpha1.PipelineTaskResources{ + Inputs: []v1alpha1.PipelineTaskInputResource{{ + Name: "wow-image", Resource: "wonderful-resource", From: []string{"bar"}, + }}, + }, + }}, + }, + }, failureExpected: true, }, { name: "not defined parameter variable", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskParam("a-param", "$(params.does-not-exist)")))), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(params.does-not-exist)"), + }}, + }}, + }, + }, failureExpected: true, }, { name: "not defined parameter variable with defined", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("foo", v1alpha1.ParamTypeString, tb.ParamSpecDefault("something")), - tb.PipelineTask("foo", "foo-task", - tb.PipelineTaskParam("a-param", "$(params.foo) and $(params.does-not-exist)")))), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "foo", + Type: v1alpha1.ParamTypeString, + Default: v1beta1.NewArrayOrString("something"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(params.foo) and $(params.does-not-exist)"), + }}, + }}, + }, + }, failureExpected: true, }, { name: "invalid parameter type", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", "invalidtype", tb.ParamSpecDefault("some", "default")), - tb.PipelineParamSpec("foo-is-baz", v1alpha1.ParamTypeArray), - tb.PipelineTask("bar", "bar-task"), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamType("invalidtype"), + Default: v1beta1.NewArrayOrString("some", "default"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }}, + }, + }, failureExpected: true, }, { name: "array parameter mismatching default type", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("astring")), - tb.PipelineTask("bar", "bar-task"), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("astring"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }}, + }, + }, failureExpected: true, }, { name: "string parameter mismatching default type", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeString, tb.ParamSpecDefault("anarray", "elements")), - tb.PipelineTask("bar", "bar-task"), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeString, + Default: v1beta1.NewArrayOrString("an", "array"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }}, + }, + }, failureExpected: true, }, { name: "array parameter used as string", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("anarray", "elements")), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(params.baz)")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("an", "array"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(params.baz)"), + }}, + }}, + }, + }, failureExpected: true, }, { name: "star array parameter used as string", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("anarray", "elements")), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "$(params.baz[*])")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "baz", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("an", "array"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("$(params.baz[*])"), + }}, + }}, + }, + }, failureExpected: true, }, { name: "array parameter string template not isolated", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("anarray", "elements")), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "first", "value: $(params.baz)", "last")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "a-param", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("an", "array"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("first", "value: $(params.baz)", "last"), + }}, + }}, + }, + }, failureExpected: true, }, { name: "star array parameter string template not isolated", - p: tb.Pipeline("pipeline", tb.PipelineSpec( - tb.PipelineParamSpec("baz", v1alpha1.ParamTypeArray, tb.ParamSpecDefault("anarray", "elements")), - tb.PipelineTask("bar", "bar-task", - tb.PipelineTaskParam("a-param", "first", "value: $(params.baz[*])", "last")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Params: []v1alpha1.ParamSpec{{ + Name: "a-param", + Type: v1alpha1.ParamTypeArray, + Default: v1beta1.NewArrayOrString("an", "array"), + }}, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Params: []v1alpha1.Param{{ + Name: "a-param", + Value: *v1beta1.NewArrayOrString("first", "value: $(params.baz[*])", "last"), + }}, + }}, + }, + }, failureExpected: true, }, { - name: "invalid dependency graph between the tasks", - p: tb.Pipeline("foo", tb.PipelineSpec( - tb.PipelineTask("foo", "foo", tb.RunAfter("bar")), - tb.PipelineTask("bar", "bar", tb.RunAfter("foo")), - )), + name: "circular dependency graph between the tasks", + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + RunAfter: []string{"bar"}, + }, { + Name: "bar", + TaskRef: &v1alpha1.TaskRef{Name: "bar-task"}, + RunAfter: []string{"foo"}, + }}, + }, + }, failureExpected: true, }, { name: "unused pipeline spec workspaces do not cause an error", - p: tb.Pipeline("name", tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineWorkspaceDeclaration("bar"), - tb.PipelineTask("foo", "foo"), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Workspaces: []v1alpha1.PipelineWorkspaceDeclaration{ + {Name: "foo"}, + {Name: "bar"}, + }, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }}, + }, + }, failureExpected: false, }, { name: "workspace bindings relying on a non-existent pipeline workspace cause an error", - p: tb.Pipeline("name", tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineTask("taskname", "taskref", - tb.PipelineTaskWorkspaceBinding("taskWorkspaceName", "pipelineWorkspaceName", "")), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Workspaces: []v1alpha1.PipelineWorkspaceDeclaration{ + {Name: "foo"}, + }, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + Workspaces: []v1alpha1.WorkspacePipelineTaskBinding{{ + Name: "taskWorkspaceName", Workspace: "pipelineWorkspaceName", + }}, + }}, + }, + }, failureExpected: true, }, { name: "multiple workspaces sharing the same name are not allowed", - p: tb.Pipeline("name", tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineWorkspaceDeclaration("foo"), - )), + p: &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, + Spec: v1alpha1.PipelineSpec{ + Workspaces: []v1alpha1.PipelineWorkspaceDeclaration{ + {Name: "foo"}, + {Name: "foo"}, + }, + Tasks: []v1alpha1.PipelineTask{{ + Name: "foo", + TaskRef: &v1alpha1.TaskRef{Name: "foo-task"}, + }}, + }, + }, failureExpected: true, - }} - for _, tt := range tests { + }} { t.Run(tt.name, func(t *testing.T) { err := tt.p.Validate(context.Background()) if (!tt.failureExpected) && (err != nil) { diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults_test.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults_test.go index 718d24eb0fe..3f90c0d5b27 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults_test.go @@ -46,7 +46,8 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { desc: "timeout is nil", prs: &v1alpha1.PipelineRunSpec{}, want: &v1alpha1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { @@ -55,14 +56,16 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1alpha1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "pod template is nil", prs: &v1alpha1.PipelineRunSpec{}, want: &v1alpha1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { @@ -75,7 +78,8 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { }, }, want: &v1alpha1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, PodTemplate: &v1alpha1.PodTemplate{ NodeSelector: map[string]string{ "label": "value", @@ -108,7 +112,8 @@ func TestPipelineRunDefaulting(t *testing.T) { in: &v1alpha1.PipelineRun{}, want: &v1alpha1.PipelineRun{ Spec: v1alpha1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -120,8 +125,9 @@ func TestPipelineRunDefaulting(t *testing.T) { }, want: &v1alpha1.PipelineRun{ Spec: v1alpha1.PipelineRunSpec{ - PipelineRef: &v1alpha1.PipelineRef{Name: "foo"}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + PipelineRef: &v1alpha1.PipelineRef{Name: "foo"}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, wc: contexts.WithUpgradeViaDefaulting, @@ -134,8 +140,9 @@ func TestPipelineRunDefaulting(t *testing.T) { }, want: &v1alpha1.PipelineRun{ Spec: v1alpha1.PipelineRunSpec{ - PipelineRef: &v1alpha1.PipelineRef{Name: "foo"}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + PipelineRef: &v1alpha1.PipelineRef{Name: "foo"}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_defaults_test.go b/pkg/apis/pipeline/v1alpha1/taskrun_defaults_test.go index 7964845c1f0..d6f2b32a6af 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_defaults_test.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_defaults_test.go @@ -43,8 +43,9 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1alpha1.TaskRunSpec{ - TaskRef: nil, - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + TaskRef: nil, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "taskref kind is empty", @@ -53,8 +54,9 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + TaskRef: &v1alpha1.TaskRef{Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "timeout is nil", @@ -62,14 +64,16 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { TaskRef: &v1alpha1.TaskRef{Kind: v1alpha1.ClusterTaskKind}, }, want: &v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Kind: v1alpha1.ClusterTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Kind: v1alpha1.ClusterTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { desc: "pod template is nil", trs: &v1alpha1.TaskRunSpec{}, want: &v1alpha1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { desc: "pod template is not nil", @@ -81,7 +85,8 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { }, }, want: &v1alpha1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, PodTemplate: &v1alpha1.PodTemplate{ NodeSelector: map[string]string{ "label": "value", @@ -108,7 +113,8 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { }}, }, }, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }} for _, tc := range cases { @@ -137,7 +143,8 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1alpha1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -152,8 +159,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -168,8 +176,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, wc: contexts.WithUpgradeViaDefaulting, @@ -185,8 +194,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { @@ -243,8 +253,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "something-else"}, }, Spec: v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { @@ -275,8 +286,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "user-specified"}, }, Spec: v1alpha1.TaskRunSpec{ - TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + TaskRef: &v1alpha1.TaskRef{Name: "foo", Kind: v1alpha1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { diff --git a/pkg/apis/pipeline/v1beta1/cluster_task_validation.go b/pkg/apis/pipeline/v1beta1/cluster_task_validation.go index b803b0bce91..cd602e91ac2 100644 --- a/pkg/apis/pipeline/v1beta1/cluster_task_validation.go +++ b/pkg/apis/pipeline/v1beta1/cluster_task_validation.go @@ -26,8 +26,6 @@ import ( var _ apis.Validatable = (*ClusterTask)(nil) func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - return t.Spec.Validate(ctx) + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") + return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } diff --git a/pkg/apis/pipeline/v1beta1/param_types.go b/pkg/apis/pipeline/v1beta1/param_types.go index 09c18d954d9..7724e167619 100644 --- a/pkg/apis/pipeline/v1beta1/param_types.go +++ b/pkg/apis/pipeline/v1beta1/param_types.go @@ -145,39 +145,25 @@ func NewArrayOrString(value string, values ...string) *ArrayOrString { } } -func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) *apis.FieldError { +func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { for _, param := range params { if param.Value.Type == ParamTypeString { - if err := validateStringVariableInTaskParameters(fmt.Sprintf("[%s]", param.Name), param.Value.StringVal, prefix, paramNames, arrayParamNames); err != nil { - return err - } + errs = errs.Also(validateStringVariableInTaskParameters(param.Value.StringVal, prefix, paramNames, arrayParamNames).ViaFieldKey("params", param.Name)) } else { - for _, arrayElement := range param.Value.ArrayVal { - if err := validateArrayVariableInTaskParameters(fmt.Sprintf("[%s]", param.Name), arrayElement, prefix, paramNames, arrayParamNames); err != nil { - return err - } + for idx, arrayElement := range param.Value.ArrayVal { + errs = errs.Also(validateArrayVariableInTaskParameters(arrayElement, prefix, paramNames, arrayParamNames).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name)) } } } - return nil + return errs } -func validateStringVariableInTaskParameters(name, value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { - if err := substitution.ValidateVariable(name, value, prefix, "task parameter", "pipelinespec.params", stringVars); err != nil { - return err - } - if err := substitution.ValidateVariableProhibited(name, value, prefix, "task parameter", "pipelinespec.params", arrayVars); err != nil { - return err - } - return nil +func validateStringVariableInTaskParameters(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { + errs := substitution.ValidateVariableP(value, prefix, stringVars) + return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) } -func validateArrayVariableInTaskParameters(name, value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { - if err := substitution.ValidateVariable(name, value, prefix, "task parameter", "pipelinespec.params", stringVars); err != nil { - return err - } - if err := substitution.ValidateVariableIsolated(name, value, prefix, "task parameter", "pipelinespec.params", arrayVars); err != nil { - return err - } - return nil +func validateArrayVariableInTaskParameters(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { + errs := substitution.ValidateVariableP(value, prefix, stringVars) + return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) } diff --git a/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/pkg/apis/pipeline/v1beta1/pipeline_validation.go index fac8294c2c5..9244db87088 100644 --- a/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -36,187 +36,38 @@ var _ apis.Validatable = (*Pipeline)(nil) // Validate checks that the Pipeline structure is valid but does not validate // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(p.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - return p.Spec.Validate(ctx) -} - -// validateDeclaredResources ensures that the specified resources have unique names and -// validates that all the resources referenced by pipeline tasks are declared in the pipeline -func validateDeclaredResources(resources []PipelineDeclaredResource, tasks []PipelineTask, finalTasks []PipelineTask) error { - encountered := sets.NewString() - for _, r := range resources { - if encountered.Has(r.Name) { - return fmt.Errorf("resource with name %q appears more than once", r.Name) - } - encountered.Insert(r.Name) - } - required := []string{} - for _, t := range tasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - - for _, condition := range t.Conditions { - for _, cr := range condition.Resources { - required = append(required, cr.Resource) - } - } - } - for _, t := range finalTasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - } - - provided := make([]string, 0, len(resources)) - for _, resource := range resources { - provided = append(provided, resource.Name) - } - missing := list.DiffLeft(required, provided) - if len(missing) > 0 { - return fmt.Errorf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing) - } - return nil -} - -func isOutput(outputs []PipelineTaskOutputResource, resource string) bool { - for _, output := range outputs { - if output.Resource == resource { - return true - } - } - return false -} - -// validateFrom ensures that the `from` values make sense: that they rely on values from Tasks -// that ran previously, and that the PipelineResource is actually an output of the Task it should come from. -func validateFrom(tasks []PipelineTask) *apis.FieldError { - taskOutputs := map[string][]PipelineTaskOutputResource{} - for _, task := range tasks { - var to []PipelineTaskOutputResource - if task.Resources != nil { - to = make([]PipelineTaskOutputResource, len(task.Resources.Outputs)) - copy(to, task.Resources.Outputs) - } - taskOutputs[task.Name] = to - } - for _, t := range tasks { - inputResources := []PipelineTaskInputResource{} - if t.Resources != nil { - inputResources = append(inputResources, t.Resources.Inputs...) - } - - for _, c := range t.Conditions { - inputResources = append(inputResources, c.Resources...) - } - - for _, rd := range inputResources { - for _, pt := range rd.From { - outputs, found := taskOutputs[pt] - if !found { - return apis.ErrInvalidValue(fmt.Sprintf("expected resource %s to be from task %s, but task %s doesn't exist", rd.Resource, pt, pt), - "spec.tasks.resources.inputs.from") - } - if !isOutput(outputs, rd.Resource) { - return apis.ErrInvalidValue(fmt.Sprintf("the resource %s from %s must be an output but is an input", rd.Resource, pt), - "spec.tasks.resources.inputs.from") - } - } - } - } - return nil -} - -// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency -// cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource -// is actually an output of the Task it should come from. -func validateGraph(tasks []PipelineTask) error { - if _, err := dag.Build(PipelineTaskList(tasks)); err != nil { - return err - } - return nil + errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") + return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } // Validate checks that taskNames in the Pipeline are valid and that the graph // of Tasks expressed in the Pipeline makes sense. -func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { +func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) { - return apis.ErrGeneric("expected at least one, got none", "spec.description", "spec.params", "spec.resources", "spec.tasks", "spec.workspaces") + errs = errs.Also(apis.ErrGeneric("expected at least one, got none", "description", "params", "resources", "tasks", "workspaces")) } - // PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified - if err := validatePipelineTasks(ctx, ps.Tasks, ps.Finally); err != nil { - return err - } - + errs = errs.Also(validatePipelineTasks(ctx, ps.Tasks, ps.Finally)) // All declared resources should be used, and the Pipeline shouldn't try to use any resources // that aren't declared - if err := validateDeclaredResources(ps.Resources, ps.Tasks, ps.Finally); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.resources") - } - + errs = errs.Also(validateDeclaredResources(ps.Resources, ps.Tasks, ps.Finally)) // The from values should make sense - if err := validateFrom(ps.Tasks); err != nil { - return err - } - + errs = errs.Also(validateFrom(ps.Tasks)) // Validate the pipeline task graph - if err := validateGraph(ps.Tasks); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.tasks") - } - - if err := validateParamResults(ps.Tasks); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.tasks.params.value") - } - + errs = errs.Also(validateGraph(ps.Tasks)) + errs = errs.Also(validateParamResults(ps.Tasks)) // The parameter variables should be valid - if err := validatePipelineParameterVariables(ps.Tasks, ps.Params); err != nil { - return err - } - - if err := validatePipelineParameterVariables(ps.Finally, ps.Params); err != nil { - return err - } - - if err := validatePipelineContextVariables(ps.Tasks); err != nil { - return err - } - + errs = errs.Also(validatePipelineParameterVariables(ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineParameterVariables(ps.Finally, ps.Params).ViaField("finally")) + errs = errs.Also(validatePipelineContextVariables(ps.Tasks)) // Validate the pipeline's workspaces. - if err := validatePipelineWorkspaces(ps.Workspaces, ps.Tasks, ps.Finally); err != nil { - return err - } - + errs = errs.Also(validatePipelineWorkspaces(ps.Workspaces, ps.Tasks, ps.Finally)) // Validate the pipeline's results - if err := validatePipelineResults(ps.Results); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.tasks.params.value") - } - - if err := validateTasksAndFinallySection(ps); err != nil { - return err - } - - if err := validateFinalTasks(ps.Finally); err != nil { - return err - } - - if err := validateWhenExpressions(ps.Tasks); err != nil { - return err - } - - return nil + errs = errs.Also(validatePipelineResults(ps.Results)) + errs = errs.Also(validateTasksAndFinallySection(ps)) + errs = errs.Also(validateFinalTasks(ps.Finally)) + errs = errs.Also(validateWhenExpressions(ps.Tasks)) + return errs } // validatePipelineTasks ensures that pipeline tasks has unique label, pipeline tasks has specified one of @@ -224,72 +75,68 @@ func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { func validatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { // Names cannot be duplicated taskNames := sets.NewString() - var err *apis.FieldError + var errs *apis.FieldError for i, t := range tasks { - if err = validatePipelineTaskName(ctx, "spec.tasks", i, t, taskNames); err != nil { - return err - } + errs = errs.Also(validatePipelineTask(ctx, t, taskNames).ViaFieldIndex("tasks", i)) } for i, t := range finalTasks { - if err = validatePipelineTaskName(ctx, "spec.finally", i, t, taskNames); err != nil { - return err - } + errs = errs.Also(validatePipelineTask(ctx, t, taskNames).ViaFieldIndex("finally", i)) } - return nil + return errs } -func validatePipelineTaskName(ctx context.Context, prefix string, i int, t PipelineTask, taskNames sets.String) *apis.FieldError { - if errs := validation.IsDNS1123Label(t.Name); len(errs) > 0 { +func validatePipelineTaskName(name string) *apis.FieldError { + if err := validation.IsDNS1123Label(name); len(err) > 0 { return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", t.Name), - Paths: []string{fmt.Sprintf(prefix+"[%d].name", i)}, + Message: fmt.Sprintf("invalid value %q", name), + Paths: []string{"name"}, Details: "Pipeline Task name must be a valid DNS Label." + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } } + return nil +} + +func validatePipelineTask(ctx context.Context, t PipelineTask, taskNames sets.String) *apis.FieldError { + errs := validatePipelineTaskName(t.Name) // can't have both taskRef and taskSpec at the same time if (t.TaskRef != nil && t.TaskRef.Name != "") && t.TaskSpec != nil { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) + errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) } // Check that one of TaskRef and TaskSpec is present if (t.TaskRef == nil || (t.TaskRef != nil && t.TaskRef.Name == "")) && t.TaskSpec == nil { - return apis.ErrMissingOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) + errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) } // Validate TaskSpec if it's present if t.TaskSpec != nil { - if err := t.TaskSpec.Validate(ctx); err != nil { - return err - } + errs = errs.Also(t.TaskSpec.Validate(ctx).ViaField("taskSpec")) } if t.TaskRef != nil && t.TaskRef.Name != "" { - // Task names are appended to the container name, which must exist and - // must be a valid k8s name - if errSlice := validation.IsQualifiedName(t.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].name", i)) - } // TaskRef name must be a valid k8s name if errSlice := validation.IsQualifiedName(t.TaskRef.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].taskRef.name", i)) + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) } if _, ok := taskNames[t.Name]; ok { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].name", i)) + errs = errs.Also(apis.ErrMultipleOneOf("name")) } taskNames[t.Name] = struct{}{} } - return nil + return errs } // validatePipelineWorkspaces validates the specified workspaces, ensuring having unique name without any empty string, // and validates that all the referenced workspaces (by pipeline tasks) are specified in the pipeline -func validatePipelineWorkspaces(wss []PipelineWorkspaceDeclaration, pts []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { +func validatePipelineWorkspaces(wss []PipelineWorkspaceDeclaration, pts []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) { // Workspace names must be non-empty and unique. wsTable := sets.NewString() for i, ws := range wss { if ws.Name == "" { - return apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i), "spec.workspaces") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i), + "").ViaFieldIndex("workspaces", i)) } if wsTable.Has(ws.Name) { - return apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name), "spec.workspaces") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name), + "").ViaFieldIndex("workspaces", i)) } wsTable.Insert(ws.Name) } @@ -299,30 +146,30 @@ func validatePipelineWorkspaces(wss []PipelineWorkspaceDeclaration, pts []Pipeli for i, pt := range pts { for j, ws := range pt.Workspaces { if !wsTable.Has(ws.Workspace) { - return apis.ErrInvalidValue( + errs = errs.Also(apis.ErrInvalidValue( fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), - fmt.Sprintf("spec.tasks[%d].workspaces[%d]", i, j), - ) + "", + ).ViaFieldIndex("workspaces", j).ViaFieldIndex("tasks", i)) } } } for i, t := range finalTasks { for j, ws := range t.Workspaces { if !wsTable.Has(ws.Workspace) { - return apis.ErrInvalidValue( + errs = errs.Also(apis.ErrInvalidValue( fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", t.Name, ws.Workspace), - fmt.Sprintf("spec.finally[%d].workspaces[%d]", i, j), - ) + "", + ).ViaFieldIndex("workspaces", j).ViaFieldIndex("finally", i)) } } } - return nil + return errs } // validatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches // with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) *apis.FieldError { +func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { parameterNames := sets.NewString() arrayParameterNames := sets.NewString() @@ -335,23 +182,17 @@ func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec } } if !validType { - return apis.ErrInvalidValue(string(p.Type), fmt.Sprintf("spec.params.%s.type", p.Name)) + errs = errs.Also(apis.ErrInvalidValue(string(p.Type), "type").ViaFieldKey("params", p.Name)) } // If a default value is provided, ensure its type matches param's declared type. if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("spec.params.%s.type", p.Name), - fmt.Sprintf("spec.params.%s.default.type", p.Name), - }, - } + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + "type", "default.type").ViaFieldKey("params", p.Name)) } if parameterNames.Has(p.Name) { - return apis.ErrGeneric("parameter appears more than once", fmt.Sprintf("spec.params.%s", p.Name)) + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) } // Add parameter name to parameterNames, and to arrayParameterNames if type is array. parameterNames.Insert(p.Name) @@ -360,19 +201,15 @@ func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec } } - return validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames) + return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames)) } -func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) *apis.FieldError { - for _, task := range tasks { - if err := validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames); err != nil { - return err - } - if err := task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames); err != nil { - return err - } +func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { + for idx, task := range tasks { + errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames).ViaIndex(idx)) + errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames).ViaIndex(idx)) } - return nil + return errs } func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { @@ -391,24 +228,20 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { paramValues = append(paramValues, param.Value.ArrayVal...) } } - if err := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames); err != nil { - return err - } - return validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames) + errs := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames) + return errs.Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames)) } -func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) *apis.FieldError { +func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { - if err := substitution.ValidateVariable(fmt.Sprintf("param[%s]", paramValue), paramValue, prefix, "params", "pipelinespec.params", contextNames); err != nil { - return err - } + errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) } - return nil + return errs } // validateParamResults ensures that task result variables are properly configured -func validateParamResults(tasks []PipelineTask) error { - for _, task := range tasks { +func validateParamResults(tasks []PipelineTask) (errs *apis.FieldError) { + for idx, task := range tasks { for _, param := range task.Params { expressions, ok := GetVarSubstitutionExpressionsForParam(param) if ok { @@ -416,13 +249,14 @@ func validateParamResults(tasks []PipelineTask) error { expressions = filter(expressions, looksLikeResultRef) resultRefs := NewResultRefs(expressions) if len(expressions) != len(resultRefs) { - return fmt.Errorf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs) + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), + "value").ViaFieldKey("params", param.Name).ViaFieldIndex("tasks", idx)) } } } } } - return nil + return errs } func filter(arr []string, cond func(string) bool) []string { @@ -436,47 +270,49 @@ func filter(arr []string, cond func(string) bool) []string { } // validatePipelineResults ensure that pipeline result variables are properly configured -func validatePipelineResults(results []PipelineResult) error { - for _, result := range results { +func validatePipelineResults(results []PipelineResult) (errs *apis.FieldError) { + for idx, result := range results { expressions, ok := GetVarSubstitutionExpressionsForPipelineResult(result) if ok { if LooksLikeContainsResultRefs(expressions) { expressions = filter(expressions, looksLikeResultRef) resultRefs := NewResultRefs(expressions) if len(expressions) != len(resultRefs) { - return fmt.Errorf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs) + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), + "value").ViaFieldIndex("results", idx)) } } } } - return nil + + return errs } func validateTasksAndFinallySection(ps *PipelineSpec) *apis.FieldError { if len(ps.Finally) != 0 && len(ps.Tasks) == 0 { - return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "spec.finally") + return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "finally") } return nil } func validateFinalTasks(finalTasks []PipelineTask) *apis.FieldError { - for _, f := range finalTasks { + for idx, f := range finalTasks { if len(f.RunAfter) != 0 { - return apis.ErrInvalidValue(fmt.Sprintf("no runAfter allowed under spec.finally, final task %s has runAfter specified", f.Name), "spec.finally") + return apis.ErrInvalidValue(fmt.Sprintf("no runAfter allowed under spec.finally, final task %s has runAfter specified", f.Name), "").ViaFieldIndex("finally", idx) } if len(f.Conditions) != 0 { - return apis.ErrInvalidValue(fmt.Sprintf("no conditions allowed under spec.finally, final task %s has conditions specified", f.Name), "spec.finally") + return apis.ErrInvalidValue(fmt.Sprintf("no conditions allowed under spec.finally, final task %s has conditions specified", f.Name), "").ViaFieldIndex("finally", idx) } if len(f.WhenExpressions) != 0 { - return apis.ErrInvalidValue(fmt.Sprintf("no when expressions allowed under spec.finally, final task %s has when expressions specified", f.Name), "spec.finally") + return apis.ErrInvalidValue(fmt.Sprintf("no when expressions allowed under spec.finally, final task %s has when expressions specified", f.Name), "").ViaFieldIndex("finally", idx) } } - if err := validateTaskResultReferenceNotUsed(finalTasks); err != nil { + if err := validateTaskResultReferenceNotUsed(finalTasks).ViaField("finally"); err != nil { return err } - if err := validateTasksInputFrom(finalTasks); err != nil { + if err := validateTasksInputFrom(finalTasks).ViaField("finally"); err != nil { return err } @@ -484,13 +320,13 @@ func validateFinalTasks(finalTasks []PipelineTask) *apis.FieldError { } func validateTaskResultReferenceNotUsed(tasks []PipelineTask) *apis.FieldError { - for _, t := range tasks { + for idx, t := range tasks { for _, p := range t.Params { expressions, ok := GetVarSubstitutionExpressionsForParam(p) if ok { if LooksLikeContainsResultRefs(expressions) { return apis.ErrInvalidValue(fmt.Sprintf("no task result allowed under params,"+ - "final task param %s has set task result as its value", p.Name), "spec.finally.task.params") + "final task param %s has set task result as its value", p.Name), "params").ViaIndex(idx) } } } @@ -498,38 +334,140 @@ func validateTaskResultReferenceNotUsed(tasks []PipelineTask) *apis.FieldError { return nil } -func validateTasksInputFrom(tasks []PipelineTask) *apis.FieldError { - for _, t := range tasks { +func validateTasksInputFrom(tasks []PipelineTask) (errs *apis.FieldError) { + for idx, t := range tasks { inputResources := []PipelineTaskInputResource{} if t.Resources != nil { inputResources = append(inputResources, t.Resources.Inputs...) } - for _, rd := range inputResources { + for i, rd := range inputResources { if len(rd.From) != 0 { - return apis.ErrDisallowedFields(fmt.Sprintf("no from allowed under inputs,"+ - " final task %s has from specified", rd.Name), "spec.finally.task.resources.inputs") + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("no from allowed under inputs,"+ + " final task %s has from specified", rd.Name), "").ViaFieldIndex("inputs", i).ViaField("resources").ViaIndex(idx)) } } } - return nil + return errs } -func validateWhenExpressions(tasks []PipelineTask) *apis.FieldError { +func validateWhenExpressions(tasks []PipelineTask) (errs *apis.FieldError) { for i, t := range tasks { - if err := validateOneOfWhenExpressionsOrConditions(i, t); err != nil { - return err + errs = errs.Also(validateOneOfWhenExpressionsOrConditions(t).ViaFieldIndex("tasks", i)) + errs = errs.Also(t.WhenExpressions.validate().ViaFieldIndex("tasks", i)) + } + return errs +} + +func validateOneOfWhenExpressionsOrConditions(t PipelineTask) *apis.FieldError { + if t.WhenExpressions != nil && t.Conditions != nil { + return apis.ErrMultipleOneOf("when", "conditions") + } + return nil +} + +// validateDeclaredResources ensures that the specified resources have unique names and +// validates that all the resources referenced by pipeline tasks are declared in the pipeline +func validateDeclaredResources(resources []PipelineDeclaredResource, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { + encountered := sets.NewString() + for _, r := range resources { + if encountered.Has(r.Name) { + return apis.ErrInvalidValue(fmt.Sprintf("resource with name %q appears more than once", r.Name), "resources") + } + encountered.Insert(r.Name) + } + required := []string{} + for _, t := range tasks { + if t.Resources != nil { + for _, input := range t.Resources.Inputs { + required = append(required, input.Resource) + } + for _, output := range t.Resources.Outputs { + required = append(required, output.Resource) + } } - if err := t.WhenExpressions.validate(); err != nil { - return err + + for _, condition := range t.Conditions { + for _, cr := range condition.Resources { + required = append(required, cr.Resource) + } } } + for _, t := range finalTasks { + if t.Resources != nil { + for _, input := range t.Resources.Inputs { + required = append(required, input.Resource) + } + for _, output := range t.Resources.Outputs { + required = append(required, output.Resource) + } + } + } + + provided := make([]string, 0, len(resources)) + for _, resource := range resources { + provided = append(provided, resource.Name) + } + missing := list.DiffLeft(required, provided) + if len(missing) > 0 { + return apis.ErrInvalidValue(fmt.Sprintf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing), "resources") + } return nil } -func validateOneOfWhenExpressionsOrConditions(i int, t PipelineTask) *apis.FieldError { - prefix := "spec.tasks" - if t.WhenExpressions != nil && t.Conditions != nil { - return apis.ErrMultipleOneOf(fmt.Sprintf(fmt.Sprintf(prefix+"[%d].when", i), fmt.Sprintf(prefix+"[%d].conditions", i))) +func isOutput(outputs []PipelineTaskOutputResource, resource string) bool { + for _, output := range outputs { + if output.Resource == resource { + return true + } + } + return false +} + +// validateFrom ensures that the `from` values make sense: that they rely on values from Tasks +// that ran previously, and that the PipelineResource is actually an output of the Task it should come from. +func validateFrom(tasks []PipelineTask) (errs *apis.FieldError) { + taskOutputs := map[string][]PipelineTaskOutputResource{} + for _, task := range tasks { + var to []PipelineTaskOutputResource + if task.Resources != nil { + to = make([]PipelineTaskOutputResource, len(task.Resources.Outputs)) + copy(to, task.Resources.Outputs) + } + taskOutputs[task.Name] = to + } + for i, t := range tasks { + inputResources := []PipelineTaskInputResource{} + if t.Resources != nil { + inputResources = append(inputResources, t.Resources.Inputs...) + } + + for _, c := range t.Conditions { + inputResources = append(inputResources, c.Resources...) + } + + for j, rd := range inputResources { + for _, pt := range rd.From { + outputs, found := taskOutputs[pt] + if !found { + return apis.ErrInvalidValue(fmt.Sprintf("expected resource %s to be from task %s, but task %s doesn't exist", rd.Resource, pt, pt), + "from").ViaFieldIndex("inputs", j).ViaField("resources").ViaFieldIndex("tasks", i) + } + if !isOutput(outputs, rd.Resource) { + return apis.ErrInvalidValue(fmt.Sprintf("the resource %s from %s must be an output but is an input", rd.Resource, pt), + "from").ViaFieldIndex("inputs", j).ViaField("resources").ViaFieldIndex("tasks", i) + } + } + } + } + return errs +} + +// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency +// cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource +// is actually an output of the Task it should come from. +func validateGraph(tasks []PipelineTask) *apis.FieldError { + if _, err := dag.Build(PipelineTaskList(tasks)); err != nil { + return apis.ErrInvalidValue(err.Error(), "tasks") } return nil } diff --git a/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go b/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go index 30c5c34d1eb..96eb336c205 100644 --- a/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go @@ -21,9 +21,13 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/selection" + "knative.dev/pkg/apis" ) func TestPipeline_Validate_Success(t *testing.T) { @@ -110,8 +114,9 @@ func TestPipeline_Validate_Success(t *testing.T) { func TestPipeline_Validate_Failure(t *testing.T) { tests := []struct { - name string - p *Pipeline + name string + p *Pipeline + expectedError apis.FieldError }{{ name: "period in name", p: &Pipeline{ @@ -120,16 +125,31 @@ func TestPipeline_Validate_Failure(t *testing.T) { Tasks: []PipelineTask{{Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}}}, }, }, + expectedError: apis.FieldError{ + Message: `Invalid resource name: special character . must not be present`, + Paths: []string{"metadata.name"}, + }, }, { name: "pipeline name too long", p: &Pipeline{ ObjectMeta: metav1.ObjectMeta{Name: "asdf123456789012345678901234567890123456789012345678901234567890"}, + Spec: PipelineSpec{ + Tasks: []PipelineTask{{Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}}}, + }, + }, + expectedError: apis.FieldError{ + Message: `Invalid resource name: length must be no more than 63 characters`, + Paths: []string{"metadata.name"}, }, }, { name: "pipeline spec missing", p: &Pipeline{ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, }, + expectedError: apis.FieldError{ + Message: `expected at least one, got none`, + Paths: []string{"spec.description", "spec.params", "spec.resources", "spec.tasks", "spec.workspaces"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -137,14 +157,18 @@ func TestPipeline_Validate_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.Validate() did not return error for invalid pipeline: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("Pipeline.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } func TestPipelineSpec_Validate_Failure(t *testing.T) { tests := []struct { - name string - ps *PipelineSpec + name string + ps *PipelineSpec + expectedError apis.FieldError }{{ name: "invalid pipeline with one pipeline task having taskRef and taskSpec both", ps: &PipelineSpec{ @@ -158,6 +182,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()}, }}, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"tasks[1].taskRef", "tasks[1].taskSpec"}, + }, }, { name: "invalid pipeline with one pipeline task having both conditions and when expressions", ps: &PipelineSpec{ @@ -175,6 +203,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"tasks[0].conditions", "tasks[0].when"}, + }, }, { name: "invalid pipeline with one pipeline task having when expression with invalid operator (not In/NotIn)", ps: &PipelineSpec{ @@ -189,6 +221,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: operator "exists" is not recognized. valid operators: in,notin`, + Paths: []string{"tasks[0].when[0]"}, + }, }, { name: "invalid pipeline with one pipeline task having when expression with invalid values (empty)", ps: &PipelineSpec{ @@ -203,6 +239,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: expecting non-empty values field`, + Paths: []string{"tasks[0].when[0]"}, + }, }, { name: "invalid pipeline with one pipeline task having when expression with invalid operator (missing)", ps: &PipelineSpec{ @@ -216,6 +256,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: operator "" is not recognized. valid operators: in,notin`, + Paths: []string{"tasks[0].when[0]"}, + }, }, { name: "invalid pipeline with one pipeline task having when expression with invalid values (missing)", ps: &PipelineSpec{ @@ -229,6 +273,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: expecting non-empty values field`, + Paths: []string{"tasks[0].when[0]"}, + }, }, { name: "invalid pipeline with one pipeline task having when expression with misconfigured result reference", ps: &PipelineSpec{ @@ -246,6 +294,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: expected all of the expressions [tasks.a-task.resultTypo.bResult] to be result expressions but only [] were`, + Paths: []string{"tasks[1].when[0]"}, + }, }, { name: "invalid pipeline with one pipeline task having blank when expression", ps: &PipelineSpec{ @@ -259,6 +311,10 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { WhenExpressions: []WhenExpression{{}}, }}, }, + expectedError: apis.FieldError{ + Message: `missing field(s)`, + Paths: []string{"tasks[1].when[0]"}, + }, }, { name: "invalid pipeline with pipeline task having reference to resources which does not exist", ps: &PipelineSpec{ @@ -300,9 +356,16 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }}, }}, }, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline declared resources didn't match usage in Tasks: Didn't provide required values: [missing-great-resource missing-wonderful-resource missing-great-resource]`, + Paths: []string{"resources"}, + }, }, { name: "invalid pipeline spec - from referring to a pipeline task which does not exist", ps: &PipelineSpec{ + Resources: []PipelineDeclaredResource{{ + Name: "great-resource", Type: PipelineResourceTypeGit, + }}, Tasks: []PipelineTask{{ Name: "baz", TaskRef: &TaskRef{Name: "baz-task"}, }, { @@ -315,15 +378,8 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { }, }}, }, - }, { - name: "invalid pipeline spec with DAG having cyclic dependency", - ps: &PipelineSpec{ - Tasks: []PipelineTask{{ - Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, RunAfter: []string{"bar"}, - }, { - Name: "bar", TaskRef: &TaskRef{Name: "bar-task"}, RunAfter: []string{"foo"}, - }}, - }, + expectedError: *apis.ErrGeneric(`invalid value: couldn't add link between foo and bar: task foo depends on bar but bar wasn't present in Pipeline`, "tasks").Also( + apis.ErrInvalidValue("expected resource great-resource to be from task bar, but task bar doesn't exist", "tasks[1].resources.inputs[0].from")), }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -331,10 +387,28 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) { if err == nil { t.Errorf("PipelineSpec.Validate() did not return error for invalid pipelineSpec: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } +func TestPipelineSpec_Validate_Failure_CycleDAG(t *testing.T) { + name := "invalid pipeline spec with DAG having cyclic dependency" + ps := &PipelineSpec{ + Tasks: []PipelineTask{{ + Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, RunAfter: []string{"bar"}, + }, { + Name: "bar", TaskRef: &TaskRef{Name: "bar-task"}, RunAfter: []string{"foo"}, + }}, + } + err := ps.Validate(context.Background()) + if err == nil { + t.Errorf("PipelineSpec.Validate() did not return error for invalid pipelineSpec: %s", name) + } +} + func TestValidatePipelineTasks_Success(t *testing.T) { tests := []struct { name string @@ -364,13 +438,18 @@ func TestValidatePipelineTasks_Success(t *testing.T) { func TestValidatePipelineTasks_Failure(t *testing.T) { tests := []struct { - name string - tasks []PipelineTask + name string + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "pipeline task missing taskref and taskspec", tasks: []PipelineTask{{ Name: "foo", }}, + expectedError: apis.FieldError{ + Message: `expected exactly one, got neither`, + Paths: []string{"tasks[0].taskRef", "tasks[0].taskSpec"}, + }, }, { name: "pipeline task with both taskref and taskspec", tasks: []PipelineTask{{ @@ -378,30 +457,64 @@ func TestValidatePipelineTasks_Failure(t *testing.T) { TaskRef: &TaskRef{Name: "foo-task"}, TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()}, }}, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"tasks[0].taskRef", "tasks[0].taskSpec"}, + }, }, { name: "pipeline task with invalid taskspec", tasks: []PipelineTask{{ Name: "foo", TaskSpec: &EmbeddedTask{TaskSpec: &TaskSpec{}}, }}, + expectedError: apis.FieldError{ + Message: `missing field(s)`, + Paths: []string{"tasks[0].taskSpec.steps"}, + }, }, { name: "pipeline tasks invalid (duplicate tasks)", tasks: []PipelineTask{ {Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}}, {Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}}, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"tasks[1].name"}, + }, }, { name: "pipeline task with empty task name", tasks: []PipelineTask{{Name: "", TaskRef: &TaskRef{Name: "foo-task"}}}, + expectedError: apis.FieldError{ + Message: `invalid value ""`, + Paths: []string{"tasks[0].name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }, }, { name: "pipeline task with invalid task name", tasks: []PipelineTask{{Name: "_foo", TaskRef: &TaskRef{Name: "foo-task"}}}, + expectedError: apis.FieldError{ + Message: `invalid value "_foo"`, + Paths: []string{"tasks[0].name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }, }, { name: "pipeline task with invalid task name (camel case)", tasks: []PipelineTask{{Name: "fooTask", TaskRef: &TaskRef{Name: "foo-task"}}}, + expectedError: apis.FieldError{ + Message: `invalid value "fooTask"`, + Paths: []string{"tasks[0].name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }, }, { name: "pipeline task with invalid taskref name", tasks: []PipelineTask{{Name: "foo", TaskRef: &TaskRef{Name: "_foo-task"}}}, + expectedError: apis.FieldError{ + Message: `invalid value: name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`, + Paths: []string{"tasks[0].name"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -409,6 +522,9 @@ func TestValidatePipelineTasks_Failure(t *testing.T) { if err == nil { t.Error("Pipeline.validatePipelineTasks() did not return error for invalid pipeline tasks:", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -445,8 +561,9 @@ func TestValidateFrom_Success(t *testing.T) { func TestValidateFrom_Failure(t *testing.T) { tests := []struct { - name string - tasks []PipelineTask + name string + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "invalid pipeline task - from in a pipeline with single pipeline task", tasks: []PipelineTask{{ @@ -457,7 +574,10 @@ func TestValidateFrom_Failure(t *testing.T) { Name: "the-resource", Resource: "great-resource", From: []string{"bar"}, }}, }, - }, + }}, + expectedError: apis.FieldError{ + Message: `invalid value: expected resource great-resource to be from task bar, but task bar doesn't exist`, + Paths: []string{"tasks[0].resources.inputs[0].from"}, }, }, { name: "invalid pipeline task - from referencing pipeline task which does not exist", @@ -472,6 +592,10 @@ func TestValidateFrom_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: expected resource great-resource to be from task bar, but task bar doesn't exist`, + Paths: []string{"tasks[1].resources.inputs[0].from"}, + }, }, { name: "invalid pipeline task - pipeline task condition resource does not exist", tasks: []PipelineTask{{ @@ -486,6 +610,10 @@ func TestValidateFrom_Failure(t *testing.T) { }}, }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: the resource missing-resource from foo must be an output but is an input`, + Paths: []string{"tasks[1].resources.inputs[0].from"}, + }, }, { name: "invalid pipeline task - from resource referring to a pipeline task which has no output", tasks: []PipelineTask{{ @@ -505,6 +633,10 @@ func TestValidateFrom_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: the resource wonderful-resource from bar must be an output but is an input`, + Paths: []string{"tasks[1].resources.inputs[0].from"}, + }, }, { name: "invalid pipeline task - from resource referring to input resource of the pipeline task instead of output", tasks: []PipelineTask{{ @@ -527,6 +659,10 @@ func TestValidateFrom_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: the resource some-resource from bar must be an output but is an input`, + Paths: []string{"tasks[1].resources.inputs[0].from"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -534,6 +670,9 @@ func TestValidateFrom_Failure(t *testing.T) { if err == nil { t.Error("Pipeline.validateFrom() did not return error for invalid pipeline task resources: ", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -633,9 +772,10 @@ func TestValidateDeclaredResources_Success(t *testing.T) { func TestValidateDeclaredResources_Failure(t *testing.T) { tests := []struct { - name string - resources []PipelineDeclaredResource - tasks []PipelineTask + name string + resources []PipelineDeclaredResource + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "duplicate resource declaration - resource declarations must be unique", resources: []PipelineDeclaredResource{{ @@ -652,6 +792,10 @@ func TestValidateDeclaredResources_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: resource with name "duplicate-resource" appears more than once`, + Paths: []string{"resources"}, + }, }, { name: "output resource is missing from resource declarations", resources: []PipelineDeclaredResource{{ @@ -669,6 +813,10 @@ func TestValidateDeclaredResources_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline declared resources didn't match usage in Tasks: Didn't provide required values: [missing-resource]`, + Paths: []string{"resources"}, + }, }, { name: "input resource is missing from resource declarations", resources: []PipelineDeclaredResource{{ @@ -686,6 +834,10 @@ func TestValidateDeclaredResources_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline declared resources didn't match usage in Tasks: Didn't provide required values: [missing-resource]`, + Paths: []string{"resources"}, + }, }, { name: "invalid condition only resource -" + " pipeline task condition referring to a resource which is missing from resource declarations", @@ -699,6 +851,10 @@ func TestValidateDeclaredResources_Failure(t *testing.T) { }}, }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline declared resources didn't match usage in Tasks: Didn't provide required values: [missing-resource]`, + Paths: []string{"resources"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -706,6 +862,9 @@ func TestValidateDeclaredResources_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.validateDeclaredResources() did not return error for invalid resource declarations: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -723,12 +882,9 @@ func TestValidateGraph_Success(t *testing.T) { }, { Name: "foo-bar", TaskRef: &TaskRef{Name: "bar-task"}, RunAfter: []string{"foo1", "bar1"}, }} - t.Run(desc, func(t *testing.T) { - err := validateGraph(tasks) - if err != nil { - t.Errorf("Pipeline.validateGraph() returned error for valid DAG of pipeline tasks: %s: %v", desc, err) - } - }) + if err := validateGraph(tasks); err != nil { + t.Errorf("Pipeline.validateGraph() returned error for valid DAG of pipeline tasks: %s: %v", desc, err) + } } func TestValidateGraph_Failure(t *testing.T) { @@ -738,13 +894,10 @@ func TestValidateGraph_Failure(t *testing.T) { }, { Name: "bar", TaskRef: &TaskRef{Name: "bar-task"}, RunAfter: []string{"foo"}, }} - t.Run(desc, func(t *testing.T) { - err := validateGraph(tasks) - if err == nil { - t.Error("Pipeline.validateGraph() did not return error for invalid DAG of pipeline tasks:", desc) + if err := validateGraph(tasks); err == nil { + t.Error("Pipeline.validateGraph() did not return error for invalid DAG of pipeline tasks:", desc) - } - }) + } } func TestValidateParamResults_Success(t *testing.T) { @@ -766,12 +919,9 @@ func TestValidateParamResults_Success(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.foo) and $(tasks.a-task.results.output)"}, }}, }} - t.Run(desc, func(t *testing.T) { - err := validateParamResults(tasks) - if err != nil { - t.Errorf("Pipeline.validateParamResults() returned error for valid pipeline: %s: %v", desc, err) - } - }) + if err := validateParamResults(tasks); err != nil { + t.Errorf("Pipeline.validateParamResults() returned error for valid pipeline: %s: %v", desc, err) + } } func TestValidateParamResults_Failure(t *testing.T) { @@ -783,12 +933,17 @@ func TestValidateParamResults_Failure(t *testing.T) { Params: []Param{{ Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(tasks.a-task.resultTypo.bResult)"}}}, }} - t.Run(desc, func(t *testing.T) { - err := validateParamResults(tasks) - if err == nil { - t.Errorf("Pipeline.validateParamResults() did not return error for invalid pipeline: %s", desc) - } - }) + expectedError := apis.FieldError{ + Message: `invalid value: expected all of the expressions [tasks.a-task.resultTypo.bResult] to be result expressions but only [] were`, + Paths: []string{"tasks[1].params[a-param].value"}, + } + err := validateParamResults(tasks) + if err == nil { + t.Errorf("Pipeline.validateParamResults() did not return error for invalid pipeline: %s", desc) + } + if d := cmp.Diff(expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("Pipeline.validateParamResults() errors diff %s", diff.PrintWantGot(d)) + } } func TestValidatePipelineResults_Success(t *testing.T) { @@ -798,12 +953,9 @@ func TestValidatePipelineResults_Success(t *testing.T) { Description: "this is my pipeline result", Value: "$(tasks.a-task.results.output)", }} - t.Run(desc, func(t *testing.T) { - err := validatePipelineResults(results) - if err != nil { - t.Errorf("Pipeline.validatePipelineResults() returned error for valid pipeline: %s: %v", desc, err) - } - }) + if err := validatePipelineResults(results); err != nil { + t.Errorf("Pipeline.validatePipelineResults() returned error for valid pipeline: %s: %v", desc, err) + } } func TestValidatePipelineResults_Failure(t *testing.T) { @@ -813,12 +965,17 @@ func TestValidatePipelineResults_Failure(t *testing.T) { Description: "this is my pipeline result", Value: "$(tasks.a-task.results.output.output)", }} - t.Run(desc, func(t *testing.T) { - err := validatePipelineResults(results) - if err == nil { - t.Errorf("Pipeline.validatePipelineResults() did not return for invalid pipeline: %s", desc) - } - }) + expectedError := apis.FieldError{ + Message: `invalid value: expected all of the expressions [tasks.a-task.results.output.output] to be result expressions but only [] were`, + Paths: []string{"results[0].value"}, + } + err := validatePipelineResults(results) + if err == nil { + t.Errorf("Pipeline.validatePipelineResults() did not return for invalid pipeline: %s", desc) + } + if d := cmp.Diff(expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("Pipeline.validateParamResults() errors diff %s", diff.PrintWantGot(d)) + } } func TestValidatePipelineParameterVariables_Success(t *testing.T) { @@ -913,9 +1070,10 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) { func TestValidatePipelineParameterVariables_Failure(t *testing.T) { tests := []struct { - name string - params []ParamSpec - tasks []PipelineTask + name string + params []ParamSpec + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "invalid pipeline task with a parameter which is missing from the param declarations", tasks: []PipelineTask{{ @@ -925,6 +1083,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.does-not-exist)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.does-not-exist)"`, + Paths: []string{"[0].params[a-param]"}, + }, }, { name: "invalid string parameter variables in when expression, missing input param from the param declarations", tasks: []PipelineTask{{ @@ -936,6 +1098,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Values: []string{"foo"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.baz)"`, + Paths: []string{"[0].when[0].input"}, + }, }, { name: "invalid string parameter variables in when expression, missing values param from the param declarations", tasks: []PipelineTask{{ @@ -947,6 +1113,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Values: []string{"$(params.foo-is-baz)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.foo-is-baz)"`, + Paths: []string{"[0].when[0].values"}, + }, }, { name: "invalid string parameter variables in when expression, array reference in input", params: []ParamSpec{{ @@ -961,6 +1131,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Values: []string{"foo"}, }}, }}, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.foo)"`, + Paths: []string{"[0].when[0].input"}, + }, }, { name: "invalid string parameter variables in when expression, array reference in values", params: []ParamSpec{{ @@ -975,6 +1149,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Values: []string{"$(params.foo)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.foo)"`, + Paths: []string{"[0].when[0].values"}, + }, }, { name: "invalid pipeline task with a parameter combined with missing param from the param declarations", params: []ParamSpec{{ @@ -987,6 +1165,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.foo) and $(params.does-not-exist)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.foo) and $(params.does-not-exist)"`, + Paths: []string{"[0].params[a-param]"}, + }, }, { name: "invalid pipeline task with two parameters and one of them missing from the param declarations", params: []ParamSpec{{ @@ -1001,6 +1183,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "b-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.does-not-exist)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.does-not-exist)"`, + Paths: []string{"[0].params[b-param]"}, + }, }, { name: "invalid parameter type", params: []ParamSpec{{ @@ -1010,6 +1196,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: invalidtype`, + Paths: []string{"params[foo].type"}, + }, }, { name: "array parameter mismatching default type", params: []ParamSpec{{ @@ -1019,6 +1209,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `"array" type does not match default value's type: "string"`, + Paths: []string{"params[foo].default.type", "params[foo].type"}, + }, }, { name: "string parameter mismatching default type", params: []ParamSpec{{ @@ -1028,6 +1222,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params[foo].default.type", "params[foo].type"}, + }, }, { name: "array parameter used as string", params: []ParamSpec{{ @@ -1040,6 +1238,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.baz)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params[baz].default.type", "params[baz].type"}, + }, }, { name: "star array parameter used as string", params: []ParamSpec{{ @@ -1052,6 +1254,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(params.baz[*])"}, }}, }}, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params[baz].default.type", "params[baz].type"}, + }, }, { name: "array parameter string template not isolated", params: []ParamSpec{{ @@ -1064,6 +1270,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeArray, ArrayVal: []string{"value: $(params.baz)", "last"}}, }}, }}, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params[baz].default.type", "params[baz].type"}, + }, }, { name: "star array parameter string template not isolated", params: []ParamSpec{{ @@ -1076,6 +1286,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "a-param", Value: ArrayOrString{Type: ParamTypeArray, ArrayVal: []string{"value: $(params.baz[*])", "last"}}, }}, }}, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params[baz].default.type", "params[baz].type"}, + }, }, { name: "multiple string parameters with the same name", params: []ParamSpec{{ @@ -1087,6 +1301,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `parameter appears more than once`, + Paths: []string{"params[baz]"}, + }, }, { name: "multiple array parameters with the same name", params: []ParamSpec{{ @@ -1098,6 +1316,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `parameter appears more than once`, + Paths: []string{"params[baz]"}, + }, }, { name: "multiple different type parameters with the same name", params: []ParamSpec{{ @@ -1109,6 +1331,10 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { Name: "foo", TaskRef: &TaskRef{Name: "foo-task"}, }}, + expectedError: apis.FieldError{ + Message: `parameter appears more than once`, + Paths: []string{"params[baz]"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1116,6 +1342,9 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.validatePipelineParameterVariables() did not return error for invalid pipeline parameters: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -1140,9 +1369,10 @@ func TestValidatePipelineWorkspaces_Success(t *testing.T) { func TestValidatePipelineWorkspaces_Failure(t *testing.T) { tests := []struct { - name string - workspaces []PipelineWorkspaceDeclaration - tasks []PipelineTask + name string + workspaces []PipelineWorkspaceDeclaration + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "workspace bindings relying on a non-existent pipeline workspace cause an error", workspaces: []PipelineWorkspaceDeclaration{{ @@ -1155,6 +1385,10 @@ func TestValidatePipelineWorkspaces_Failure(t *testing.T) { Workspace: "pipelineWorkspaceName", }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline task "foo" expects workspace with name "pipelineWorkspaceName" but none exists in pipeline spec`, + Paths: []string{"tasks[0].workspaces[0]"}, + }, }, { name: "multiple workspaces sharing the same name are not allowed", workspaces: []PipelineWorkspaceDeclaration{{ @@ -1165,6 +1399,10 @@ func TestValidatePipelineWorkspaces_Failure(t *testing.T) { tasks: []PipelineTask{{ Name: "foo", TaskRef: &TaskRef{Name: "foo"}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: workspace with name "foo" appears more than once`, + Paths: []string{"workspaces[1]"}, + }, }, { name: "workspace name must not be empty", workspaces: []PipelineWorkspaceDeclaration{{ @@ -1173,6 +1411,10 @@ func TestValidatePipelineWorkspaces_Failure(t *testing.T) { tasks: []PipelineTask{{ Name: "foo", TaskRef: &TaskRef{Name: "foo"}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: workspace 0 has empty name`, + Paths: []string{"workspaces[0]"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1180,6 +1422,9 @@ func TestValidatePipelineWorkspaces_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.validatePipelineWorkspaces() did not return error for invalid pipeline workspaces: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -1261,8 +1506,9 @@ func TestValidatePipelineWithFinalTasks_Success(t *testing.T) { func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { tests := []struct { - name string - p *Pipeline + name string + p *Pipeline + expectedError apis.FieldError }{{ name: "invalid pipeline without any non-final task (tasks set to nil) but at least one final task", p: &Pipeline{ @@ -1275,6 +1521,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `invalid value: spec.tasks is empty but spec.finally has 1 tasks`, + Paths: []string{"spec.finally"}, + }, }, { name: "invalid pipeline without any non-final task (tasks set to empty list of pipeline task) but at least one final task", p: &Pipeline{ @@ -1287,6 +1537,13 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: *apis.ErrMissingOneOf("spec.tasks[0].taskRef", "spec.tasks[0].taskSpec").Also( + &apis.FieldError{ + Message: `invalid value ""`, + Paths: []string{"spec.tasks[0].name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }), }, { name: "invalid pipeline with valid non-final tasks but empty finally section", p: &Pipeline{ @@ -1299,6 +1556,13 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { Finally: []PipelineTask{{}}, }, }, + expectedError: *apis.ErrMissingOneOf("spec.finally[0].taskRef", "spec.finally[0].taskSpec").Also( + &apis.FieldError{ + Message: `invalid value ""`, + Paths: []string{"spec.finally[0].name"}, + Details: "Pipeline Task name must be a valid DNS Label." + + "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }), }, { name: "invalid pipeline with duplicate final tasks", p: &Pipeline{ @@ -1317,6 +1581,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"spec.finally[1].name"}, + }, }, { name: "invalid pipeline with same task name for final and non final task", p: &Pipeline{ @@ -1332,8 +1600,12 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"spec.finally[0].name"}, + }, }, { - name: "final task missing tasfref and taskspec", + name: "final task missing taskref and taskspec", p: &Pipeline{ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, Spec: PipelineSpec{ @@ -1346,6 +1618,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got neither`, + Paths: []string{"spec.finally[0].taskRef", "spec.finally[0].taskSpec"}, + }, }, { name: "final task with both tasfref and taskspec", p: &Pipeline{ @@ -1362,6 +1638,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `expected exactly one, got both`, + Paths: []string{"spec.finally[0].taskRef", "spec.finally[0].taskSpec"}, + }, }, { name: "extra parameter called final-param provided to final task which is not specified in the Pipeline", p: &Pipeline{ @@ -1383,6 +1663,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.foo) and $(params.does-not-exist)"`, + Paths: []string{"spec.finally[0].params[final-param]"}, + }, }, { name: "invalid pipeline with invalid final tasks with runAfter and conditions", p: &Pipeline{ @@ -1405,6 +1689,10 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `invalid value: no runAfter allowed under spec.finally, final task final-task-1 has runAfter specified`, + Paths: []string{"spec.finally[0]"}, + }, }, { name: "invalid pipeline - workspace bindings in final task relying on a non-existent pipeline workspace", p: &Pipeline{ @@ -1425,14 +1713,19 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { }}, }, }, + expectedError: apis.FieldError{ + Message: `invalid value: pipeline task "final-task" expects workspace with name "pipeline-shared-workspace" but none exists in pipeline spec`, + Paths: []string{"spec.finally[0].workspaces[0]"}, + }, }, { name: "invalid pipeline with no tasks under tasks section and empty finally section", p: &Pipeline{ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"}, Spec: PipelineSpec{ - Finally: []PipelineTask{{}}, + Finally: []PipelineTask{}, }, }, + expectedError: *apis.ErrGeneric("expected at least one, got none", "spec.description", "spec.params", "spec.resources", "spec.tasks", "spec.workspaces"), }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1440,6 +1733,9 @@ func TestValidatePipelineWithFinalTasks_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.Validate() did not return error for invalid pipeline with finally: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -1485,18 +1781,24 @@ func TestValidateTasksAndFinallySection_Failure(t *testing.T) { Name: "final-task", TaskRef: &TaskRef{Name: "foo"}, }}, } - t.Run(desc, func(t *testing.T) { - err := validateTasksAndFinallySection(ps) - if err == nil { - t.Errorf("Pipeline.ValidateTasksAndFinallySection() did not return error for invalid pipeline with finally: %s", desc) - } - }) + expectedError := apis.FieldError{ + Message: `invalid value: spec.tasks is empty but spec.finally has 1 tasks`, + Paths: []string{"finally"}, + } + err := validateTasksAndFinallySection(ps) + if err == nil { + t.Errorf("Pipeline.ValidateTasksAndFinallySection() did not return error for invalid pipeline with finally: %s", desc) + } + if d := cmp.Diff(expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("Pipeline.validateParamResults() errors diff %s", diff.PrintWantGot(d)) + } } func TestValidateFinalTasks_Failure(t *testing.T) { tests := []struct { - name string - finalTasks []PipelineTask + name string + finalTasks []PipelineTask + expectedError apis.FieldError }{{ name: "invalid pipeline with final task specifying runAfter", finalTasks: []PipelineTask{{ @@ -1504,6 +1806,10 @@ func TestValidateFinalTasks_Failure(t *testing.T) { TaskRef: &TaskRef{Name: "final-task"}, RunAfter: []string{"non-final-task"}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: no runAfter allowed under spec.finally, final task final-task has runAfter specified`, + Paths: []string{"finally[0]"}, + }, }, { name: "invalid pipeline with final task specifying conditions", finalTasks: []PipelineTask{{ @@ -1513,6 +1819,10 @@ func TestValidateFinalTasks_Failure(t *testing.T) { ConditionRef: "some-condition", }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: no conditions allowed under spec.finally, final task final-task has conditions specified`, + Paths: []string{"finally[0]"}, + }, }, { name: "invalid pipeline with final task output resources referring to other task input", finalTasks: []PipelineTask{{ @@ -1524,6 +1834,10 @@ func TestValidateFinalTasks_Failure(t *testing.T) { }}, }, }}, + expectedError: apis.FieldError{ + Message: `no from allowed under inputs, final task final-input-2 has from specified`, + Paths: []string{"finally[0].resources.inputs[0]"}, + }, }, { name: "invalid pipeline with final tasks having reference to task results", finalTasks: []PipelineTask{{ @@ -1533,6 +1847,10 @@ func TestValidateFinalTasks_Failure(t *testing.T) { Name: "param1", Value: ArrayOrString{Type: ParamTypeString, StringVal: "$(tasks.a-task.results.output)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: no task result allowed under params,final task param param1 has set task result as its value`, + Paths: []string{"finally[0].params"}, + }, }, { name: "invalid pipeline with final task specifying when expressions", finalTasks: []PipelineTask{{ @@ -1544,6 +1862,10 @@ func TestValidateFinalTasks_Failure(t *testing.T) { Values: []string{"foo", "bar"}, }}, }}, + expectedError: apis.FieldError{ + Message: `invalid value: no when expressions allowed under spec.finally, final task final-task has when expressions specified`, + Paths: []string{"finally[0]"}, + }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1551,6 +1873,9 @@ func TestValidateFinalTasks_Failure(t *testing.T) { if err == nil { t.Errorf("Pipeline.ValidateFinalTasks() did not return error for invalid pipeline: %s", tt.name) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } @@ -1616,8 +1941,9 @@ func TestContextValid(t *testing.T) { func TestContextInvalid(t *testing.T) { tests := []struct { - name string - tasks []PipelineTask + name string + tasks []PipelineTask + expectedError apis.FieldError }{{ name: "invalid string context variable for pipeline", tasks: []PipelineTask{{ @@ -1627,6 +1953,10 @@ func TestContextInvalid(t *testing.T) { Name: "a-param", Value: ArrayOrString{StringVal: "$(context.pipeline.missing)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(context.pipeline.missing)"`, + Paths: []string{"value"}, + }, }, { name: "invalid string context variable for pipelineRun", tasks: []PipelineTask{{ @@ -1636,6 +1966,10 @@ func TestContextInvalid(t *testing.T) { Name: "a-param", Value: ArrayOrString{StringVal: "$(context.pipelineRun.missing)"}, }}, }}, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(context.pipelineRun.missing)"`, + Paths: []string{"value"}, + }, }, { name: "invalid array context variables for pipeline and pipelineRun", tasks: []PipelineTask{{ @@ -1645,12 +1979,18 @@ func TestContextInvalid(t *testing.T) { Name: "a-param", Value: ArrayOrString{ArrayVal: []string{"$(context.pipeline.missing)", "and", "$(context.pipelineRun.missing)"}}, }}, }}, + expectedError: *apis.ErrGeneric(`non-existent variable in "$(context.pipeline.missing)"`, "value").Also( + apis.ErrGeneric(`non-existent variable in "$(context.pipelineRun.missing)"`, "value")), }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := validatePipelineContextVariables(tt.tasks); err == nil { + err := validatePipelineContextVariables(tt.tasks) + if err == nil { t.Errorf("Pipeline.validatePipelineContextVariables() did not return error for invalid pipeline parameters: %s, %s", tt.name, tt.tasks[0].Params) } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("PipelineSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } }) } } diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_defaults_test.go b/pkg/apis/pipeline/v1beta1/pipelinerun_defaults_test.go index 4761b1bd1ba..1b039a44670 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_defaults_test.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_defaults_test.go @@ -41,7 +41,8 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { desc: "timeout is nil", prs: &v1beta1.PipelineRunSpec{}, want: &v1beta1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { @@ -50,14 +51,16 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1beta1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "pod template is nil", prs: &v1beta1.PipelineRunSpec{}, want: &v1beta1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { @@ -70,7 +73,8 @@ func TestPipelineRunSpec_SetDefaults(t *testing.T) { }, }, want: &v1beta1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, PodTemplate: &v1beta1.PodTemplate{ NodeSelector: map[string]string{ "label": "value", @@ -103,7 +107,8 @@ func TestPipelineRunDefaulting(t *testing.T) { in: &v1beta1.PipelineRun{}, want: &v1beta1.PipelineRun{ Spec: v1beta1.PipelineRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -115,8 +120,9 @@ func TestPipelineRunDefaulting(t *testing.T) { }, want: &v1beta1.PipelineRun{ Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{Name: "foo"}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + PipelineRef: &v1beta1.PipelineRef{Name: "foo"}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, wc: contexts.WithUpgradeViaDefaulting, @@ -139,7 +145,8 @@ func TestPipelineRunDefaulting(t *testing.T) { Type: "string", }}, }, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, wc: contexts.WithUpgradeViaDefaulting, @@ -152,8 +159,9 @@ func TestPipelineRunDefaulting(t *testing.T) { }, want: &v1beta1.PipelineRun{ Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{Name: "foo"}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + PipelineRef: &v1beta1.PipelineRef{Name: "foo"}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 21dafdb628b..907e2ab5acd 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -29,64 +28,50 @@ var _ apis.Validatable = (*PipelineRun)(nil) // Validate pipelinerun func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - return pr.Spec.Validate(ctx) + errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata") + return errs.Also(pr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } // Validate pipelinerun spec -func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ps, &PipelineRunSpec{}) { - return apis.ErrMissingField("spec") - } - +func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // can't have both pipelineRef and pipelineSpec at the same time if (ps.PipelineRef != nil && ps.PipelineRef.Name != "") && ps.PipelineSpec != nil { - return apis.ErrDisallowedFields("spec.pipelineref", "spec.pipelinespec") + errs = errs.Also(apis.ErrDisallowedFields("pipelineref", "pipelinespec")) } // Check that one of PipelineRef and PipelineSpec is present if (ps.PipelineRef == nil || (ps.PipelineRef != nil && ps.PipelineRef.Name == "")) && ps.PipelineSpec == nil { - return apis.ErrMissingField("spec.pipelineref.name", "spec.pipelinespec") + errs = errs.Also(apis.ErrMissingField("pipelineref.name", "pipelinespec")) } // Validate PipelineSpec if it's present if ps.PipelineSpec != nil { - if err := ps.PipelineSpec.Validate(ctx); err != nil { - return err - } + errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelinespec")) } if ps.Timeout != nil { // timeout should be a valid duration of at least 0. if ps.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "spec.timeout") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "timeout")) } } if ps.Status != "" { if ps.Status != PipelineRunSpecStatusCancelled { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ps.Status, PipelineRunSpecStatusCancelled), "spec.status") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ps.Status, PipelineRunSpecStatusCancelled), "status")) } } if ps.Workspaces != nil { wsNames := make(map[string]int) for idx, ws := range ps.Workspaces { - field := fmt.Sprintf("spec.workspaces[%d]", idx) - if err := ws.Validate(ctx).ViaField(field); err != nil { - return err - } + errs = errs.Also(ws.Validate(ctx).ViaFieldIndex("workspaces", idx)) if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), - Paths: []string{"spec.workspaces"}, - } + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), "name").ViaFieldIndex("workspaces", idx)) } wsNames[ws.Name] = idx } } - return nil + return errs } diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go b/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go index 907bf4dae74..4c8846c934d 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go @@ -35,21 +35,17 @@ func TestPipelineRun_Invalidate(t *testing.T) { pr v1beta1.PipelineRun want *apis.FieldError }{ - { - name: "invalid pipelinerun", - pr: v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "prmetaname", - }, - }, - want: apis.ErrMissingField("spec"), - }, { name: "invalid pipelinerun metadata", pr: v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun.name", }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "prname", + }, + }, }, want: &apis.FieldError{ Message: "Invalid resource name: special character . must not be present", @@ -111,34 +107,70 @@ func TestPipelineRun_Validate(t *testing.T) { tests := []struct { name string pr v1beta1.PipelineRun - }{ - { - name: "normal case", - pr: v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipelinelineName", - }, - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{ - Name: "prname", - }, + }{{ + name: "normal case", + pr: v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinelineName", + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "prname", }, }, - }, { - name: "no timeout", - pr: v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipelinelineName", + }, + }, { + name: "no timeout", + pr: v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinelineName", + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "prname", }, - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{ - Name: "prname", - }, - Timeout: &metav1.Duration{Duration: 0}, + Timeout: &metav1.Duration{Duration: 0}, + }, + }, + }, { + name: "array param with pipelinespec and taskspec", + pr: v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinelineName", + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineSpec: &v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "pipeline-words", + Type: v1beta1.ParamTypeArray, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "echoit", + Params: []v1beta1.Param{{ + Name: "task-words", + Value: v1beta1.ArrayOrString{ + ArrayVal: []string{"$(params.pipeline-words)"}, + }, + }}, + TaskSpec: &v1beta1.EmbeddedTask{TaskSpec: &v1beta1.TaskSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "task-words", + Type: v1beta1.ParamTypeArray, + }}, + Steps: []v1beta1.Step{{ + Container: corev1.Container{ + Name: "echo", + Image: "ubuntu", + Command: []string{"echo"}, + Args: []string{"$(params.task-words[*])"}, + }, + }}, + }}, + }}, }, }, }, - } + }} for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { @@ -155,15 +187,11 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) { spec v1beta1.PipelineRunSpec wantErr *apis.FieldError }{{ - name: "Empty pipelineSpec", - spec: v1beta1.PipelineRunSpec{}, - wantErr: apis.ErrMissingField("spec"), - }, { name: "pipelineRef without Pipeline Name", spec: v1beta1.PipelineRunSpec{ PipelineRef: &v1beta1.PipelineRef{}, }, - wantErr: apis.ErrMissingField("spec.pipelineref.name", "spec.pipelinespec"), + wantErr: apis.ErrMissingField("pipelineref.name", "pipelinespec"), }, { name: "pipelineRef and pipelineSpec together", spec: v1beta1.PipelineRunSpec{ @@ -178,7 +206,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) { }, }}}, }, - wantErr: apis.ErrDisallowedFields("spec.pipelinespec", "spec.pipelineref"), + wantErr: apis.ErrDisallowedFields("pipelinespec", "pipelineref"), }, { name: "workspaces may only appear once", spec: v1beta1.PipelineRunSpec{ @@ -195,7 +223,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) { }, wantErr: &apis.FieldError{ Message: `workspace "ws" provided by pipelinerun more than once, at index 0 and 1`, - Paths: []string{"spec.workspaces"}, + Paths: []string{"workspaces[1].name"}, }, }, { name: "workspaces must contain a valid volume config", @@ -210,11 +238,11 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) { wantErr: &apis.FieldError{ Message: "expected exactly one, got neither", Paths: []string{ - "spec.workspaces[0].configmap", - "spec.workspaces[0].emptydir", - "spec.workspaces[0].persistentvolumeclaim", - "spec.workspaces[0].secret", - "spec.workspaces[0].volumeclaimtemplate", + "workspaces[0].configmap", + "workspaces[0].emptydir", + "workspaces[0].persistentvolumeclaim", + "workspaces[0].secret", + "workspaces[0].volumeclaimtemplate", }, }, }} diff --git a/pkg/apis/pipeline/v1beta1/resource_types.go b/pkg/apis/pipeline/v1beta1/resource_types.go index 94225ebc66f..91192d3b4f0 100644 --- a/pkg/apis/pipeline/v1beta1/resource_types.go +++ b/pkg/apis/pipeline/v1beta1/resource_types.go @@ -123,10 +123,10 @@ type PipelineResourceResult struct { Key string `json:"key"` Value string `json:"value"` ResourceName string `json:"resourceName,omitempty"` - // This field should be deprecated and removed in the next API version. + // The field ResourceRef should be deprecated and removed in the next API version. // See https://github.com/tektoncd/pipeline/issues/2694 for more information. - ResourceRef PipelineResourceRef `json:"resourceRef,omitempty"` - ResultType ResultType `json:"type,omitempty"` + ResourceRef *PipelineResourceRef `json:"resourceRef,omitempty"` + ResultType ResultType `json:"type,omitempty"` } // ResultType used to find out whether a PipelineResourceResult is from a task result or not diff --git a/pkg/apis/pipeline/v1beta1/resource_types_validation.go b/pkg/apis/pipeline/v1beta1/resource_types_validation.go index a70dc0f094d..95cfa771101 100644 --- a/pkg/apis/pipeline/v1beta1/resource_types_validation.go +++ b/pkg/apis/pipeline/v1beta1/resource_types_validation.go @@ -25,29 +25,19 @@ import ( "knative.dev/pkg/apis" ) -func (tr *TaskResources) Validate(ctx context.Context) *apis.FieldError { - if tr == nil { - return nil - } - if err := validateTaskResources(tr.Inputs, "inputs"); err != nil { - return err - } - if err := validateTaskResources(tr.Outputs, "outputs"); err != nil { - return err +func (tr *TaskResources) Validate(ctx context.Context) (errs *apis.FieldError) { + if tr != nil { + errs = errs.Also(validateTaskResources(tr.Inputs).ViaField("inputs")) + errs = errs.Also(validateTaskResources(tr.Outputs).ViaField("outputs")) } - return nil + return errs } -func validateTaskResources(resources []TaskResource, name string) *apis.FieldError { - for _, resource := range resources { - if err := validateResourceType(resource, fmt.Sprintf("taskspec.resources.%s.%s.type", name, resource.Name)); err != nil { - return err - } +func validateTaskResources(resources []TaskResource) (errs *apis.FieldError) { + for idx, resource := range resources { + errs = errs.Also(validateResourceType(resource, fmt.Sprintf("%s.type", resource.Name))).ViaIndex(idx) } - if err := checkForDuplicates(resources, fmt.Sprintf("taskspec.resources.%s.name", name)); err != nil { - return err - } - return nil + return errs.Also(checkForDuplicates(resources, "name")) } func checkForDuplicates(resources []TaskResource, path string) *apis.FieldError { diff --git a/pkg/apis/pipeline/v1beta1/sidecar_replacements.go b/pkg/apis/pipeline/v1beta1/sidecar_replacements.go new file mode 100644 index 00000000000..e4951d57b16 --- /dev/null +++ b/pkg/apis/pipeline/v1beta1/sidecar_replacements.go @@ -0,0 +1,26 @@ +/* + Copyright 2020 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1beta1 + +import ( + "github.com/tektoncd/pipeline/pkg/substitution" +) + +func ApplySidecarReplacements(sidecar *Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) { + sidecar.Script = substitution.ApplyReplacements(sidecar.Script, stringReplacements) + ApplyContainerReplacements(&sidecar.Container, stringReplacements, arrayReplacements) +} diff --git a/pkg/apis/pipeline/v1beta1/sidecar_replacements_test.go b/pkg/apis/pipeline/v1beta1/sidecar_replacements_test.go new file mode 100644 index 00000000000..bca75facff7 --- /dev/null +++ b/pkg/apis/pipeline/v1beta1/sidecar_replacements_test.go @@ -0,0 +1,131 @@ +/* + Copyright 2019 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1beta1_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" +) + +func TestApplySidecarReplacements(t *testing.T) { + replacements := map[string]string{ + "replace.me": "replaced!", + } + + arrayReplacements := map[string][]string{ + "array.replace.me": {"val1", "val2"}, + } + + s := v1beta1.Sidecar{ + Script: "$(replace.me)", + Container: corev1.Container{ + Name: "$(replace.me)", + Image: "$(replace.me)", + Command: []string{"$(array.replace.me)"}, + Args: []string{"$(array.replace.me)"}, + WorkingDir: "$(replace.me)", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "$(replace.me)", + }, + }, + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "$(replace.me)", + }, + }, + }}, + Env: []corev1.EnvVar{{ + Name: "not_me", + Value: "$(replace.me)", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "$(replace.me)", + }, + Key: "$(replace.me)", + }, + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "$(replace.me)", + }, + Key: "$(replace.me)", + }, + }, + }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "$(replace.me)", + MountPath: "$(replace.me)", + SubPath: "$(replace.me)", + }}, + }, + } + + expected := v1beta1.Sidecar{ + Script: "replaced!", + Container: corev1.Container{ + Name: "replaced!", + Image: "replaced!", + Command: []string{"val1", "val2"}, + Args: []string{"val1", "val2"}, + WorkingDir: "replaced!", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "replaced!", + }, + }, + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "replaced!", + }, + }, + }}, + Env: []corev1.EnvVar{{ + Name: "not_me", + Value: "replaced!", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "replaced!", + }, + Key: "replaced!", + }, + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "replaced!", + }, + Key: "replaced!", + }, + }, + }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "replaced!", + MountPath: "replaced!", + SubPath: "replaced!", + }}, + }, + } + v1beta1.ApplySidecarReplacements(&s, replacements, arrayReplacements) + if d := cmp.Diff(s, expected); d != "" { + t.Errorf("Container replacements failed: %s", d) + } +} diff --git a/pkg/apis/pipeline/v1beta1/task_types.go b/pkg/apis/pipeline/v1beta1/task_types.go index 34fe2c3bb87..405527f8b32 100644 --- a/pkg/apis/pipeline/v1beta1/task_types.go +++ b/pkg/apis/pipeline/v1beta1/task_types.go @@ -26,6 +26,8 @@ const ( TaskRunResultType ResultType = "TaskRunResult" // PipelineResourceResultType default pipeline result value PipelineResourceResultType ResultType = "PipelineResourceResult" + // InternalTektonResultType default internal tekton result value + InternalTektonResultType ResultType = "InternalTektonResult" // UnknownResultType default unknown result type value UnknownResultType ResultType = "" ) diff --git a/pkg/apis/pipeline/v1beta1/task_validation.go b/pkg/apis/pipeline/v1beta1/task_validation.go index 52ea0d65f78..982f33f08c7 100644 --- a/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/pkg/apis/pipeline/v1beta1/task_validation.go @@ -33,88 +33,52 @@ import ( var _ apis.Validatable = (*Task)(nil) func (t *Task) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - return t.Spec.Validate(ctx) + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") + return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } -func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { - +func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { if len(ts.Steps) == 0 { - return apis.ErrMissingField("steps") - } - if err := ValidateVolumes(ts.Volumes).ViaField("volumes"); err != nil { - return err - } - if err := validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate); err != nil { - return err + errs = errs.Also(apis.ErrMissingField("steps")) } + errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) + errs = errs.Also(ValidateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) if err != nil { - return &apis.FieldError{ + errs = errs.Also(&apis.FieldError{ Message: fmt.Sprintf("error merging step template and steps: %s", err), Paths: []string{"stepTemplate"}, - } - } - - if err := validateSteps(mergedSteps).ViaField("steps"); err != nil { - return err - } - - // Validate Resources declaration - if err := ts.Resources.Validate(ctx); err != nil { - return err - } - - // Validate that the parameters type are correct - if err := ValidateParameterTypes(ts.Params); err != nil { - return err - } - - // Validate task step names - for _, step := range ts.Steps { - if errs := validation.IsDNS1123Label(step.Name); step.Name != "" && len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", step.Name), - Paths: []string{"taskspec.steps.name"}, - Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - } - - if err := ValidateParameterVariables(ts.Steps, ts.Params); err != nil { - return err - } - - if err := ValidateResourcesVariables(ts.Steps, ts.Resources); err != nil { - return err - } - - if err := validateResults(ts.Results); err != nil { - return err + Details: err.Error(), + }) } - if err := validateTaskContextVariables(ts.Steps); err != nil { - return err - } - - return nil + errs = errs.Also(validateSteps(mergedSteps).ViaField("steps")) + errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) + errs = errs.Also(ValidateParameterTypes(ts.Params).ViaField("params")) + errs = errs.Also(ValidateParameterVariables(ts.Steps, ts.Params)) + errs = errs.Also(ValidateResourcesVariables(ts.Steps, ts.Resources)) + errs = errs.Also(validateTaskContextVariables(ts.Steps)) + errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) + return errs } -func validateResults(results []TaskResult) *apis.FieldError { +func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) { for index, result := range results { - if !resultNameFormatRegex.MatchString(result.Name) { - return apis.ErrInvalidKeyName(result.Name, fmt.Sprintf("results[%d].name", index), fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) - } + errs = errs.Also(result.Validate(ctx).ViaIndex(index)) } + return errs +} +func (tr TaskResult) Validate(_ context.Context) *apis.FieldError { + if !resultNameFormatRegex.MatchString(tr.Name) { + return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) + } return nil } // a mount path which conflicts with any other declared workspaces, with the explicitly // declared volume mounts, or with the stepTemplate. The names must also be unique. -func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) *apis.FieldError { +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) { mountPaths := sets.NewString() for _, step := range steps { for _, vm := range step.VolumeMounts { @@ -128,109 +92,113 @@ func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, } wsNames := sets.NewString() - for _, w := range workspaces { + for idx, w := range workspaces { // Workspace names must be unique if wsNames.Has(w.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace name %q must be unique", w.Name), - Paths: []string{"workspaces.name"}, - } + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx)) + } else { + wsNames.Insert(w.Name) } - wsNames.Insert(w.Name) // Workspaces must not try to use mount paths that are already used mountPath := filepath.Clean(w.GetMountPath()) if _, ok := mountPaths[mountPath]; ok { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace mount path %q must be unique", mountPath), - Paths: []string{"workspaces.mountpath"}, - } + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx)) } mountPaths[mountPath] = struct{}{} } - return nil + return errs } -func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError { +func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) { // Task must not have duplicate volume names. vols := sets.NewString() - for _, v := range volumes { + for idx, v := range volumes { if vols.Has(v.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("multiple volumes with same name %q", v.Name), - Paths: []string{"name"}, - } + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx)) + } else { + vols.Insert(v.Name) } - vols.Insert(v.Name) } - return nil + return errs } -func validateSteps(steps []Step) *apis.FieldError { +func validateSteps(steps []Step) (errs *apis.FieldError) { // Task must not have duplicate step names. names := sets.NewString() for idx, s := range steps { - if s.Image == "" { - return apis.ErrMissingField("Image") - } + errs = errs.Also(validateStep(s, names).ViaIndex(idx)) + } + return errs +} + +func validateStep(s Step, names sets.String) (errs *apis.FieldError) { + if s.Image == "" { + errs = errs.Also(apis.ErrMissingField("Image")) + } - if s.Script != "" { - if len(s.Command) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d script cannot be used with command", idx), - Paths: []string{"script"}, - } - } + if s.Script != "" { + if len(s.Command) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("script cannot be used with command"), + Paths: []string{"script"}, + }) } + } - if s.Name != "" { - if names.Has(s.Name) { - return apis.ErrInvalidValue(s.Name, "name") - } - names.Insert(s.Name) + if s.Name != "" { + if names.Has(s.Name) { + errs = errs.Also(apis.ErrInvalidValue(s.Name, "name")) + } + if e := validation.IsDNS1123Label(s.Name); len(e) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", s.Name), + Paths: []string{"name"}, + Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }) } + names.Insert(s.Name) + } - for _, vm := range s.VolumeMounts { - if strings.HasPrefix(vm.MountPath, "/tekton/") && - !strings.HasPrefix(vm.MountPath, "/tekton/home") { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", idx, vm.Name, vm.MountPath), - Paths: []string{"volumeMounts.mountPath"}, - } - } - if strings.HasPrefix(vm.Name, "tekton-internal-") { - return &apis.FieldError{ - Message: fmt.Sprintf(`step %d volumeMount name %q cannot start with "tekton-internal-"`, idx, vm.Name), - Paths: []string{"volumeMounts.name"}, - } - } + for j, vm := range s.VolumeMounts { + if strings.HasPrefix(vm.MountPath, "/tekton/") && + !strings.HasPrefix(vm.MountPath, "/tekton/home") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j)) + } + if strings.HasPrefix(vm.Name, "tekton-internal-") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j)) } } - return nil + return errs } -func ValidateParameterTypes(params []ParamSpec) *apis.FieldError { +func ValidateParameterTypes(params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { - // Ensure param has a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - return apis.ErrInvalidValue(p.Type, fmt.Sprintf("taskspec.params.%s.type", p.Name)) + errs = errs.Also(p.ValidateType()) + } + return errs +} + +func (p ParamSpec) ValidateType() *apis.FieldError { + // Ensure param has a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true } + } + if !validType { + return apis.ErrInvalidValue(p.Type, fmt.Sprintf("%s.type", p.Name)) + } - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("taskspec.params.%s.type", p.Name), - fmt.Sprintf("taskspec.params.%s.default.type", p.Name), - }, - } + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("%s.type", p.Name), + fmt.Sprintf("%s.default.type", p.Name), + }, } } return nil @@ -247,10 +215,8 @@ func ValidateParameterVariables(steps []Step, params []ParamSpec) *apis.FieldErr } } - if err := validateVariables(steps, "params", parameterNames); err != nil { - return err - } - return validateArrayUsage(steps, "params", arrayParameterNames) + errs := validateVariables(steps, "params", parameterNames) + return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } func validateTaskContextVariables(steps []Step) *apis.FieldError { @@ -262,10 +228,8 @@ func validateTaskContextVariables(steps []Step) *apis.FieldError { taskContextNames := sets.NewString().Insert( "name", ) - if err := validateVariables(steps, "context\\.taskRun", taskRunContextNames); err != nil { - return err - } - return validateVariables(steps, "context\\.task", taskContextNames) + errs := validateVariables(steps, "context\\.taskRun", taskRunContextNames) + return errs.Also(validateVariables(steps, "context\\.task", taskContextNames)) } func ValidateResourcesVariables(steps []Step, resources *TaskResources) *apis.FieldError { @@ -286,102 +250,73 @@ func ValidateResourcesVariables(steps []Step, resources *TaskResources) *apis.Fi return validateVariables(steps, "resources.(?:inputs|outputs)", resourceNames) } -func validateArrayUsage(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskNoArrayReferenced("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("script", step.Script, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskArraysIsolated(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskArraysIsolated(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } +func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) } - return nil + return errs } -func validateVariables(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskVariable("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("script", step.Script, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskVariable(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskVariable(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskVariable(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } +func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) } - return nil + for i, arg := range step.Args { + errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateVariables(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepVariables(step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +func validateStepVariables(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + } + for _, env := range step.Env { + errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + } + return errs } -func validateTaskVariable(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariable(name, value, prefix, "step", "taskspec.steps", vars) +func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { + return substitution.ValidateVariableP(value, prefix, vars) } -func validateTaskNoArrayReferenced(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibited(name, value, prefix, "step", "taskspec.steps", arrayNames) +func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) } -func validateTaskArraysIsolated(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolated(name, value, prefix, "step", "taskspec.steps", arrayNames) +func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) } diff --git a/pkg/apis/pipeline/v1beta1/task_validation_test.go b/pkg/apis/pipeline/v1beta1/task_validation_test.go index f567854fea9..6cbd1488f32 100644 --- a/pkg/apis/pipeline/v1beta1/task_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/task_validation_test.go @@ -30,14 +30,14 @@ import ( var validResource = v1beta1.TaskResource{ ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: "source", + Name: "validsource", Type: "git", }, } var invalidResource = v1beta1.TaskResource{ ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: "source", + Name: "invalidsource", Type: "what", }, } @@ -380,7 +380,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value: what`, - Paths: []string{"taskspec.resources.inputs.source.type"}, + Paths: []string{"resources.inputs[0].invalidsource.type"}, }, }, { name: "one invalid input resource", @@ -392,7 +392,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value: what`, - Paths: []string{"taskspec.resources.inputs.source.type"}, + Paths: []string{"resources.inputs[1].invalidsource.type"}, }, }, { name: "duplicated inputs resources", @@ -405,7 +405,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `expected exactly one, got both`, - Paths: []string{"taskspec.resources.inputs.name"}, + Paths: []string{"resources.inputs.name"}, }, }, { name: "invalid output resource", @@ -417,7 +417,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value: what`, - Paths: []string{"taskspec.resources.outputs.source.type"}, + Paths: []string{"resources.outputs[0].invalidsource.type"}, }, }, { name: "one invalid output resource", @@ -429,7 +429,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value: what`, - Paths: []string{"taskspec.resources.outputs.source.type"}, + Paths: []string{"resources.outputs[1].invalidsource.type"}, }, }, { name: "duplicated outputs resources", @@ -442,7 +442,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `expected exactly one, got both`, - Paths: []string{"taskspec.resources.outputs.name"}, + Paths: []string{"resources.outputs.name"}, }, }, { name: "invalid param type", @@ -462,7 +462,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value: invalidtype`, - Paths: []string{"taskspec.params.param-with-invalid-type.type"}, + Paths: []string{"params.param-with-invalid-type.type"}, }, }, { name: "param mismatching default/type 1", @@ -477,7 +477,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `"array" type does not match default value's type: "string"`, - Paths: []string{"taskspec.params.task.type", "taskspec.params.task.default.type"}, + Paths: []string{"params.task.type", "params.task.default.type"}, }, }, { name: "param mismatching default/type 2", @@ -492,7 +492,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `"string" type does not match default value's type: "array"`, - Paths: []string{"taskspec.params.task.type", "taskspec.params.task.default.type"}, + Paths: []string{"params.task.type", "params.task.default.type"}, }, }, { name: "invalid step", @@ -516,7 +516,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `invalid value "replaceImage"`, - Paths: []string{"taskspec.steps.name"}, + Paths: []string{"steps[0].name"}, Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", }, }, { @@ -529,8 +529,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `non-existent variable in "--flag=$(params.inexistent)" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `non-existent variable in "--flag=$(params.inexistent)"`, + Paths: []string{"steps[0].args[0]"}, }, }, { name: "array used in unaccepted field", @@ -551,8 +551,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable type invalid in "$(params.baz)" for step image`, - Paths: []string{"taskspec.steps.image"}, + Message: `variable type invalid in "$(params.baz)"`, + Paths: []string{"steps[0].image"}, }, }, { name: "array star used in unaccepted field", @@ -573,8 +573,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable type invalid in "$(params.baz[*])" for step image`, - Paths: []string{"taskspec.steps.image"}, + Message: `variable type invalid in "$(params.baz[*])"`, + Paths: []string{"steps[0].image"}, }, }, { name: "array star used illegaly in script field", @@ -597,8 +597,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}, }, expectedError: apis.FieldError{ - Message: `variable type invalid in "$(params.baz[*])" for step script`, - Paths: []string{"taskspec.steps.script"}, + Message: `variable type invalid in "$(params.baz[*])"`, + Paths: []string{"steps[0].script"}, }, }, { name: "array not properly isolated", @@ -619,8 +619,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable is not properly isolated in "not isolated: $(params.baz)" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `variable is not properly isolated in "not isolated: $(params.baz)"`, + Paths: []string{"steps[0].args[0]"}, }, }, { name: "array star not properly isolated", @@ -641,8 +641,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable is not properly isolated in "not isolated: $(params.baz[*])" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`, + Paths: []string{"steps[0].args[0]"}, }, }, { name: "inferred array not properly isolated", @@ -663,8 +663,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable is not properly isolated in "not isolated: $(params.baz)" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `variable is not properly isolated in "not isolated: $(params.baz)"`, + Paths: []string{"steps[0].args[0]"}, }, }, { name: "inferred array star not properly isolated", @@ -685,8 +685,30 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `variable is not properly isolated in "not isolated: $(params.baz[*])" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "Inexistent param variable in volumeMount with existing", + fields: fields{ + Params: []v1beta1.ParamSpec{ + { + Name: "foo", + Description: "param", + Default: v1beta1.NewArrayOrString("default"), + }, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "mystep", + Image: "myimage", + VolumeMounts: []corev1.VolumeMount{{ + Name: "$(params.inexistent)-foo", + }}, + }}}, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.inexistent)-foo"`, + Paths: []string{"steps[0].volumeMount[0].name"}, }, }, { name: "Inexistent param variable with existing", @@ -703,8 +725,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `non-existent variable in "$(params.foo) && $(params.inexistent)" for step arg[0]`, - Paths: []string{"taskspec.steps.arg[0]"}, + Message: `non-existent variable in "$(params.foo) && $(params.inexistent)"`, + Paths: []string{"steps[0].args[0]"}, }, }, { name: "Multiple volumes with same name", @@ -718,7 +740,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: `multiple volumes with same name "workspace"`, - Paths: []string{"volumes.name"}, + Paths: []string{"volumes[1].name"}, }, }, { name: "step with script and command", @@ -732,8 +754,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}, }, expectedError: apis.FieldError{ - Message: "step 0 script cannot be used with command", - Paths: []string{"steps.script"}, + Message: "script cannot be used with command", + Paths: []string{"steps[0].script"}, }, }, { name: "step volume mounts under /tekton/", @@ -747,8 +769,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `step 0 volumeMount cannot be mounted under /tekton/ (volumeMount "foo" mounted at "/tekton/foo")`, - Paths: []string{"steps.volumeMounts.mountPath"}, + Message: `volumeMount cannot be mounted under /tekton/ (volumeMount "foo" mounted at "/tekton/foo")`, + Paths: []string{"steps[0].volumeMounts[0].mountPath"}, }, }, { name: "step volume mount name starts with tekton-internal-", @@ -762,22 +784,24 @@ func TestTaskSpecValidateError(t *testing.T) { }}}, }, expectedError: apis.FieldError{ - Message: `step 0 volumeMount name "tekton-internal-foo" cannot start with "tekton-internal-"`, - Paths: []string{"steps.volumeMounts.name"}, + Message: `volumeMount name "tekton-internal-foo" cannot start with "tekton-internal-"`, + Paths: []string{"steps[0].volumeMounts[0].name"}, }, }, { name: "declared workspaces names are not unique", fields: fields{ Steps: validSteps, Workspaces: []v1beta1.WorkspaceDeclaration{{ - Name: "same-workspace", + Name: "same-workspace", + MountPath: "/foo", }, { - Name: "same-workspace", + Name: "same-workspace", + MountPath: "/bar", }}, }, expectedError: apis.FieldError{ Message: "workspace name \"same-workspace\" must be unique", - Paths: []string{"workspaces.name"}, + Paths: []string{"workspaces[1].name"}, }, }, { name: "declared workspaces clash with each other", @@ -793,7 +817,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: "workspace mount path \"/foo\" must be unique", - Paths: []string{"workspaces.mountpath"}, + Paths: []string{"workspaces[1].mountpath"}, }, }, { name: "workspace mount path already in volumeMounts", @@ -815,7 +839,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: "workspace mount path \"/foo\" must be unique", - Paths: []string{"workspaces.mountpath"}, + Paths: []string{"workspaces[0].mountpath"}, }, }, { name: "workspace default mount path already in volumeMounts", @@ -836,7 +860,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: "workspace mount path \"/workspace/some-workspace\" must be unique", - Paths: []string{"workspaces.mountpath"}, + Paths: []string{"workspaces[0].mountpath"}, }, }, { name: "workspace mount path already in stepTemplate", @@ -855,7 +879,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: "workspace mount path \"/foo\" must be unique", - Paths: []string{"workspaces.mountpath"}, + Paths: []string{"workspaces[0].mountpath"}, }, }, { name: "workspace default mount path already in stepTemplate", @@ -873,7 +897,7 @@ func TestTaskSpecValidateError(t *testing.T) { }, expectedError: apis.FieldError{ Message: "workspace mount path \"/workspace/some-workspace\" must be unique", - Paths: []string{"workspaces.mountpath"}, + Paths: []string{"workspaces[0].mountpath"}, }, }, { name: "result name not validate", @@ -890,7 +914,7 @@ func TestTaskSpecValidateError(t *testing.T) { Details: "Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$')", }, }, { - name: "context not validate", + name: "context not validate", fields: fields{ Steps: []v1beta1.Step{{ Container: corev1.Container{ @@ -903,8 +927,8 @@ func TestTaskSpecValidateError(t *testing.T) { }}, }, expectedError: apis.FieldError{ - Message: `non-existent variable in "\n\t\t\t\t#!/usr/bin/env bash\n\t\t\t\thello \"$(context.task.missing)\"" for step script`, - Paths: []string{"taskspec.steps.script"}, + Message: `non-existent variable in "\n\t\t\t\t#!/usr/bin/env bash\n\t\t\t\thello \"$(context.task.missing)\""`, + Paths: []string{"steps[0].script"}, }, }} for _, tt := range tests { @@ -924,7 +948,7 @@ func TestTaskSpecValidateError(t *testing.T) { if err == nil { t.Fatalf("Expected an error, got nothing for %v", ts) } - if d := cmp.Diff(tt.expectedError, *err, cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { t.Errorf("TaskSpec.Validate() errors diff %s", diff.PrintWantGot(d)) } }) diff --git a/pkg/apis/pipeline/v1beta1/taskrun_defaults_test.go b/pkg/apis/pipeline/v1beta1/taskrun_defaults_test.go index 020919620e4..9995dbbad3a 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_defaults_test.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_defaults_test.go @@ -47,8 +47,9 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1beta1.TaskRunSpec{ - TaskRef: nil, - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + TaskRef: nil, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "taskref kind is empty", @@ -57,8 +58,9 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, want: &v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, + TaskRef: &v1beta1.TaskRef{Kind: v1beta1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 500 * time.Millisecond}, }, }, { desc: "timeout is nil", @@ -66,14 +68,16 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { TaskRef: &v1beta1.TaskRef{Kind: v1beta1.ClusterTaskKind}, }, want: &v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Kind: v1beta1.ClusterTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1beta1.TaskRef{Kind: v1beta1.ClusterTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { desc: "pod template is nil", trs: &v1beta1.TaskRunSpec{}, want: &v1beta1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, { desc: "pod template is not nil", @@ -85,7 +89,8 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { }, }, want: &v1beta1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, PodTemplate: &v1beta1.PodTemplate{ NodeSelector: map[string]string{ "label": "value", @@ -108,7 +113,8 @@ func TestTaskRunSpec_SetDefaults(t *testing.T) { Type: v1beta1.ParamTypeString, }}, }, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }} for _, tc := range cases { @@ -137,7 +143,8 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1beta1.TaskRunSpec{ - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -152,8 +159,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, }, { @@ -168,8 +176,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, }, wc: contexts.WithUpgradeViaDefaulting, @@ -185,8 +194,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "tekton-pipelines"}, }, Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + ServiceAccountName: config.DefaultServiceAccountValue, + TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { @@ -243,8 +253,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "something-else"}, }, Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { @@ -275,8 +286,9 @@ func TestTaskRunDefaulting(t *testing.T) { Labels: map[string]string{"app.kubernetes.io/managed-by": "user-specified"}, }, Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, - Timeout: &metav1.Duration{Duration: 5 * time.Minute}, + TaskRef: &v1beta1.TaskRef{Name: "foo", Kind: v1beta1.NamespacedTaskKind}, + ServiceAccountName: config.DefaultServiceAccountValue, + Timeout: &metav1.Duration{Duration: 5 * time.Minute}, }, }, wc: func(ctx context.Context) context.Context { diff --git a/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/pkg/apis/pipeline/v1beta1/taskrun_validation.go index 667cbe13afe..0cd4e4fd9b5 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -22,7 +22,6 @@ import ( "strings" "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -31,89 +30,69 @@ var _ apis.Validatable = (*TaskRun)(nil) // Validate taskrun func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - return tr.Spec.Validate(ctx) + errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata") + return errs.Also(tr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } // Validate taskrun spec -func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ts, &TaskRunSpec{}) { - return apis.ErrMissingField("spec") - } - +func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // can't have both taskRef and taskSpec at the same time if (ts.TaskRef != nil && ts.TaskRef.Name != "") && ts.TaskSpec != nil { - return apis.ErrDisallowedFields("spec.taskref", "spec.taskspec") + errs = errs.Also(apis.ErrDisallowedFields("taskref", "taskspec")) } // Check that one of TaskRef and TaskSpec is present if (ts.TaskRef == nil || (ts.TaskRef != nil && ts.TaskRef.Name == "")) && ts.TaskSpec == nil { - return apis.ErrMissingField("spec.taskref.name", "spec.taskspec") + errs = errs.Also(apis.ErrMissingField("taskref.name", "taskspec")) } // Validate TaskSpec if it's present if ts.TaskSpec != nil { - if err := ts.TaskSpec.Validate(ctx); err != nil { - return err - } + errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskspec")) } - if err := validateParameters(ts.Params); err != nil { - return err - } - - if err := validateWorkspaceBindings(ctx, ts.Workspaces); err != nil { - return err - } - - // Validate Resources declaration - if err := ts.Resources.Validate(ctx); err != nil { - return err - } + errs = errs.Also(validateParameters(ts.Params).ViaField("params")) + errs = errs.Also(validateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) + errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) if ts.Status != "" { if ts.Status != TaskRunSpecStatusCancelled { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ts.Status, TaskRunSpecStatusCancelled), "spec.status") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ts.Status, TaskRunSpecStatusCancelled), "status")) } } - if ts.Timeout != nil { // timeout should be a valid duration of at least 0. if ts.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "spec.timeout") + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "timeout")) } } - return nil + return errs } // validateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. -func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) *apis.FieldError { +func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { seen := sets.NewString() - for _, w := range wb { + for idx, w := range wb { if seen.Has(w.Name) { - return apis.ErrMultipleOneOf("spec.workspaces.name") + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaIndex(idx)) } seen.Insert(w.Name) - if err := w.Validate(ctx).ViaField("workspace"); err != nil { - return err - } + errs = errs.Also(w.Validate(ctx).ViaIndex(idx)) } - return nil + return errs } -func validateParameters(params []Param) *apis.FieldError { +func validateParameters(params []Param) (errs *apis.FieldError) { // Template must not duplicate parameter names. seen := sets.NewString() for _, p := range params { if seen.Has(strings.ToLower(p.Name)) { - return apis.ErrMultipleOneOf("spec.params.name") + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaKey(p.Name)) } seen.Insert(p.Name) } - return nil + return errs } diff --git a/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go b/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go index 0344fc2eb3d..4d0761a3813 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go @@ -38,13 +38,16 @@ func TestTaskRun_Invalidate(t *testing.T) { }{{ name: "invalid taskspec", task: &v1beta1.TaskRun{}, - want: apis.ErrMissingField("spec"), + want: apis.ErrMissingField("spec.taskref.name", "spec.taskspec"), }, { name: "invalid taskrun metadata", task: &v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "task.name", }, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "task"}, + }, }, want: &apis.FieldError{ Message: "Invalid resource name: special character . must not be present", @@ -92,7 +95,7 @@ func TestTaskRun_Workspaces_Invalid(t *testing.T) { }}, }, }, - wantErr: apis.ErrMissingField("workspace.persistentvolumeclaim.claimname"), + wantErr: apis.ErrMissingField("spec.workspaces[0].persistentvolumeclaim.claimname"), }, { name: "bind same workspace twice", tr: &v1beta1.TaskRun{ @@ -108,7 +111,7 @@ func TestTaskRun_Workspaces_Invalid(t *testing.T) { }}, }, }, - wantErr: apis.ErrMultipleOneOf("spec.workspaces.name"), + wantErr: apis.ErrMultipleOneOf("spec.workspaces[1].name"), }} for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { @@ -131,13 +134,13 @@ func TestTaskRunSpec_Invalidate(t *testing.T) { }{{ name: "invalid taskspec", spec: v1beta1.TaskRunSpec{}, - wantErr: apis.ErrMissingField("spec"), + wantErr: apis.ErrMissingField("taskref.name", "taskspec"), }, { name: "invalid taskref name", spec: v1beta1.TaskRunSpec{ TaskRef: &v1beta1.TaskRef{}, }, - wantErr: apis.ErrMissingField("spec.taskref.name, spec.taskspec"), + wantErr: apis.ErrMissingField("taskref.name, taskspec"), }, { name: "invalid taskref and taskspec together", spec: v1beta1.TaskRunSpec{ @@ -151,7 +154,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) { }}}, }, }, - wantErr: apis.ErrDisallowedFields("spec.taskspec", "spec.taskref"), + wantErr: apis.ErrDisallowedFields("taskspec", "taskref"), }, { name: "negative pipeline timeout", spec: v1beta1.TaskRunSpec{ @@ -160,7 +163,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) { }, Timeout: &metav1.Duration{Duration: -48 * time.Hour}, }, - wantErr: apis.ErrInvalidValue("-48h0m0s should be >= 0", "spec.timeout"), + wantErr: apis.ErrInvalidValue("-48h0m0s should be >= 0", "timeout"), }, { name: "wrong taskrun cancel", spec: v1beta1.TaskRunSpec{ @@ -169,7 +172,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) { }, Status: "TaskRunCancell", }, - wantErr: apis.ErrInvalidValue("TaskRunCancell should be TaskRunCancelled", "spec.status"), + wantErr: apis.ErrInvalidValue("TaskRunCancell should be TaskRunCancelled", "status"), }, { name: "invalid taskspec", spec: v1beta1.TaskRunSpec{ @@ -182,22 +185,22 @@ func TestTaskRunSpec_Invalidate(t *testing.T) { }, wantErr: &apis.FieldError{ Message: `invalid value "invalid-name-with-$weird-char/%"`, - Paths: []string{"taskspec.steps.name"}, + Paths: []string{"taskspec.steps[0].name"}, Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", }, }, { name: "invalid params", spec: v1beta1.TaskRunSpec{ Params: []v1beta1.Param{{ - Name: "name", + Name: "myname", Value: *v1beta1.NewArrayOrString("value"), }, { - Name: "name", + Name: "myname", Value: *v1beta1.NewArrayOrString("value"), }}, TaskRef: &v1beta1.TaskRef{Name: "mytask"}, }, - wantErr: apis.ErrMultipleOneOf("spec.params.name"), + wantErr: apis.ErrMultipleOneOf("params[myname].name"), }} for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { diff --git a/pkg/apis/pipeline/v1beta1/when_types.go b/pkg/apis/pipeline/v1beta1/when_types.go index a2cb701a9d6..00fa3b3c5fe 100644 --- a/pkg/apis/pipeline/v1beta1/when_types.go +++ b/pkg/apis/pipeline/v1beta1/when_types.go @@ -24,12 +24,12 @@ import ( // to determine whether the Task should be executed or skipped type WhenExpression struct { // Input is the string for guard checking which can be a static input or an output from a parent Task - Input string + Input string `json:"input"` // Operator that represents an Input's relationship to the values - Operator selection.Operator + Operator selection.Operator `json:"operator"` // Values is an array of strings, which is compared against the input, for guard checking // It must be non-empty - Values []string + Values []string `json:"values"` } func (we *WhenExpression) isInputInValues() bool { diff --git a/pkg/apis/pipeline/v1beta1/when_validation.go b/pkg/apis/pipeline/v1beta1/when_validation.go index ff7a49e37df..e9803017503 100644 --- a/pkg/apis/pipeline/v1beta1/when_validation.go +++ b/pkg/apis/pipeline/v1beta1/when_validation.go @@ -33,22 +33,15 @@ var validWhenOperators = []string{ } func (wes WhenExpressions) validate() *apis.FieldError { - if err := wes.validateWhenExpressionsFields(); err != nil { - return err - } - if err := wes.validateTaskResultsVariables(); err != nil { - return err - } - return nil + errs := wes.validateWhenExpressionsFields().ViaField("when") + return errs.Also(wes.validateTaskResultsVariables().ViaField("when")) } -func (wes WhenExpressions) validateWhenExpressionsFields() *apis.FieldError { - for _, we := range wes { - if err := we.validateWhenExpressionFields(); err != nil { - return err - } +func (wes WhenExpressions) validateWhenExpressionsFields() (errs *apis.FieldError) { + for idx, we := range wes { + errs = errs.Also(we.validateWhenExpressionFields().ViaIndex(idx)) } - return nil + return errs } func (we *WhenExpression) validateWhenExpressionFields() *apis.FieldError { @@ -57,16 +50,16 @@ func (we *WhenExpression) validateWhenExpressionFields() *apis.FieldError { } if !sets.NewString(validWhenOperators...).Has(string(we.Operator)) { message := fmt.Sprintf("operator %q is not recognized. valid operators: %s", we.Operator, strings.Join(validWhenOperators, ",")) - return apis.ErrInvalidValue(message, "spec.task.when") + return apis.ErrInvalidValue(message, apis.CurrentField) } if len(we.Values) == 0 { - return apis.ErrInvalidValue("expecting non-empty values field", "spec.task.when") + return apis.ErrInvalidValue("expecting non-empty values field", apis.CurrentField) } return nil } func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { - for _, we := range wes { + for idx, we := range wes { expressions, ok := we.GetVarSubstitutionExpressions() if ok { if LooksLikeContainsResultRefs(expressions) { @@ -74,7 +67,7 @@ func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { resultRefs := NewResultRefs(expressions) if len(expressions) != len(resultRefs) { message := fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs) - return apis.ErrInvalidValue(message, "spec.tasks.when") + return apis.ErrInvalidValue(message, apis.CurrentField).ViaIndex(idx) } } } @@ -82,25 +75,16 @@ func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { return nil } -func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String) *apis.FieldError { - for _, we := range wes { - if err := validateStringVariable(fmt.Sprintf("input[%s]", we.Input), we.Input, prefix, paramNames, arrayParamNames); err != nil { - return err - } +func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { + for idx, we := range wes { + errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames).ViaField("input").ViaFieldIndex("when", idx)) for _, val := range we.Values { - if err := validateStringVariable(fmt.Sprintf("values[%s]", val), val, prefix, paramNames, arrayParamNames); err != nil { - return err - } + errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames).ViaField("values").ViaFieldIndex("when", idx)) } } - return nil + return errs } -func validateStringVariable(name, value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { - if err := substitution.ValidateVariable(name, value, prefix, "task when expression", "pipelinespec.when", stringVars); err != nil { - return err - } - if err := substitution.ValidateVariableProhibited(name, value, prefix, "task when expression", "pipelinespec.when", arrayVars); err != nil { - return err - } - return nil +func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { + errs := substitution.ValidateVariableP(value, prefix, stringVars) + return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) } diff --git a/pkg/apis/pipeline/v1beta1/workspace_types.go b/pkg/apis/pipeline/v1beta1/workspace_types.go index dfa0efa1dcc..814e4f841e4 100644 --- a/pkg/apis/pipeline/v1beta1/workspace_types.go +++ b/pkg/apis/pipeline/v1beta1/workspace_types.go @@ -36,6 +36,9 @@ type WorkspaceDeclaration struct { // ReadOnly dictates whether a mounted volume is writable. By default this // field is false and so mounted volumes are writable. ReadOnly bool `json:"readOnly,omitempty"` + // Optional marks a Workspace as not being required in TaskRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` } // GetMountPath returns the mountPath for w which is the MountPath if provided or the @@ -91,6 +94,9 @@ type PipelineWorkspaceDeclaration struct { // tasks are intended to have access to the data on the workspace. // +optional Description string `json:"description,omitempty"` + // Optional marks a Workspace as not being required in PipelineRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` } // WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be diff --git a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index 97cf2670876..f04e41d45ab 100644 --- a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -459,7 +459,11 @@ func (in *PipelineResourceRef) DeepCopy() *PipelineResourceRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResourceResult) DeepCopyInto(out *PipelineResourceResult) { *out = *in - out.ResourceRef = in.ResourceRef + if in.ResourceRef != nil { + in, out := &in.ResourceRef, &out.ResourceRef + *out = new(PipelineResourceRef) + **out = **in + } return } @@ -1640,7 +1644,9 @@ func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) { if in.ResourcesResult != nil { in, out := &in.ResourcesResult, &out.ResourcesResult *out = make([]PipelineResourceResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.TaskRunResults != nil { in, out := &in.TaskRunResults, &out.TaskRunResults diff --git a/pkg/apis/resource/v1alpha1/storage/artifact_bucket_test.go b/pkg/apis/resource/v1alpha1/storage/artifact_bucket_test.go index 8d4cf6ce850..8360f2df750 100644 --- a/pkg/apis/resource/v1alpha1/storage/artifact_bucket_test.go +++ b/pkg/apis/resource/v1alpha1/storage/artifact_bucket_test.go @@ -43,7 +43,7 @@ var ( SecretKey: "serviceaccount", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", } ) @@ -56,7 +56,7 @@ func TestBucketGetCopyFromContainerSpec(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/destination"}, }}, {Container: corev1.Container{ Name: "artifact-copy-from-workspace-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/src-path/*", "/workspace/destination"}, Env: []corev1.EnvVar{{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: fmt.Sprintf("/var/bucketsecret/%s/serviceaccount", secretName)}}, @@ -73,7 +73,7 @@ func TestBucketGetCopyToContainerSpec(t *testing.T) { names.TestingSeed() want := []v1alpha1.Step{{Container: corev1.Container{ Name: "artifact-copy-to-workspace-9l9zj", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "src-path", "gs://fake-bucket/workspace/destination"}, Env: []corev1.EnvVar{{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: fmt.Sprintf("/var/bucketsecret/%s/serviceaccount", secretName)}}, diff --git a/pkg/apis/resource/v1alpha1/storage/build_gcs_test.go b/pkg/apis/resource/v1alpha1/storage/build_gcs_test.go index 242b9011b40..48ac329fcf3 100644 --- a/pkg/apis/resource/v1alpha1/storage/build_gcs_test.go +++ b/pkg/apis/resource/v1alpha1/storage/build_gcs_test.go @@ -37,7 +37,7 @@ var images = pipeline.Images{ CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", diff --git a/pkg/apis/resource/v1alpha1/storage/gcs_test.go b/pkg/apis/resource/v1alpha1/storage/gcs_test.go index 8843d61ee3b..82d1daac11d 100644 --- a/pkg/apis/resource/v1alpha1/storage/gcs_test.go +++ b/pkg/apis/resource/v1alpha1/storage/gcs_test.go @@ -98,7 +98,7 @@ func TestValidNewGCSResource(t *testing.T) { FieldName: "GOOGLE_APPLICATION_CREDENTIALS", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", } gcsRes, err := storage.NewGCSResource("test-resource", images, pr) @@ -167,7 +167,7 @@ func TestGetInputSteps(t *testing.T) { SecretKey: "key.json", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, wantSteps: []v1beta1.Step{{Container: corev1.Container{ Name: "create-dir-gcs-valid-9l9zj", @@ -183,7 +183,7 @@ gsutil rsync -d -r gs://some-bucket /workspace `, Container: corev1.Container{ Name: "fetch-gcs-valid-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Env: []corev1.EnvVar{{ Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/secret/secretName/key.json", @@ -209,7 +209,7 @@ gsutil rsync -d -r gs://some-bucket /workspace FieldName: "GOOGLE_APPLICATION_CREDENTIALS", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, wantSteps: []v1beta1.Step{{Container: corev1.Container{ Name: "create-dir-gcs-valid-mssqb", @@ -225,7 +225,7 @@ gsutil cp gs://some-bucket /workspace `, Container: corev1.Container{ Name: "fetch-gcs-valid-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Env: []corev1.EnvVar{{ Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/secret/secretName/key.json", @@ -269,11 +269,11 @@ func TestGetOutputTaskModifier(t *testing.T) { FieldName: "GOOGLE_APPLICATION_CREDENTIALS", SecretKey: "key.json", }}, - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, wantSteps: []v1beta1.Step{{Container: corev1.Container{ Name: "upload-gcs-valid-9l9zj", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"rsync", "-d", "-r", "/workspace/", "gs://some-bucket"}, Env: []corev1.EnvVar{{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/secret/secretName/key.json"}}, @@ -296,11 +296,11 @@ func TestGetOutputTaskModifier(t *testing.T) { SecretName: "secretName", FieldName: "GOOGLE_APPLICATION_CREDENTIALS", }}, - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, wantSteps: []v1beta1.Step{{Container: corev1.Container{ Name: "upload-gcs-valid-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "/workspace/*", "gs://some-bucket"}, Env: []corev1.EnvVar{ @@ -317,11 +317,11 @@ func TestGetOutputTaskModifier(t *testing.T) { Name: "gcs-valid", Location: "gs://some-bucket", TypeDir: false, - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, wantSteps: []v1beta1.Step{{Container: corev1.Container{ Name: "upload-gcs-valid-mssqb", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "/workspace/*", "gs://some-bucket"}, }}}, diff --git a/pkg/artifacts/artifact_storage_test.go b/pkg/artifacts/artifact_storage_test.go index af691340252..dd82d792658 100644 --- a/pkg/artifacts/artifact_storage_test.go +++ b/pkg/artifacts/artifact_storage_test.go @@ -43,7 +43,7 @@ var ( CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", @@ -211,7 +211,7 @@ func TestInitializeArtifactStorage(t *testing.T) { SecretName: "secret1", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, }, { desc: "location empty", @@ -256,7 +256,7 @@ func TestInitializeArtifactStorage(t *testing.T) { expectedArtifactStorage: &storage.ArtifactBucket{ Location: "gs://fake-bucket", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, }, { desc: "valid bucket with boto config", @@ -270,7 +270,7 @@ func TestInitializeArtifactStorage(t *testing.T) { expectedArtifactStorage: &storage.ArtifactBucket{ Location: "s3://fake-bucket", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", Secrets: []resourcev1alpha1.SecretParam{{ FieldName: "BOTO_CONFIG", SecretKey: "sakey", @@ -524,7 +524,7 @@ func TestGetArtifactStorageWithConfig(t *testing.T) { SecretName: "secret1", }}, ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, }, { desc: "location empty", diff --git a/pkg/entrypoint/entrypointer.go b/pkg/entrypoint/entrypointer.go index 188344a8fe7..6c9ed4edbc4 100644 --- a/pkg/entrypoint/entrypointer.go +++ b/pkg/entrypoint/entrypointer.go @@ -102,8 +102,9 @@ func (e Entrypointer) Go() error { // *but* we write postfile to make next steps bail too. e.WritePostFile(e.PostFile, err) output = append(output, v1beta1.PipelineResourceResult{ - Key: "StartedAt", - Value: time.Now().Format(timeFormat), + Key: "StartedAt", + Value: time.Now().Format(timeFormat), + ResultType: v1beta1.InternalTektonResultType, }) return err @@ -114,8 +115,9 @@ func (e Entrypointer) Go() error { e.Args = append([]string{e.Entrypoint}, e.Args...) } output = append(output, v1beta1.PipelineResourceResult{ - Key: "StartedAt", - Value: time.Now().Format(timeFormat), + Key: "StartedAt", + Value: time.Now().Format(timeFormat), + ResultType: v1beta1.InternalTektonResultType, }) err := e.Runner.Run(e.Args...) diff --git a/pkg/git/git.go b/pkg/git/git.go index 364dcc9e601..04480d5b8b3 100644 --- a/pkg/git/git.go +++ b/pkg/git/git.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "strconv" "strings" @@ -33,6 +34,11 @@ const ( sshMissingKnownHostsSSHCommand = "ssh -o StrictHostKeyChecking=accept-new" ) +var ( + // sshURLRegexFormat matches the url of SSH git repository + sshURLRegexFormat = regexp.MustCompile(`(ssh://[\w\d\.]+|.+@?.+\..+:)(:[\d]+){0,1}/*(.*)`) +) + func run(logger *zap.SugaredLogger, dir string, args ...string) (string, error) { c := exec.Command("git", args...) var output bytes.Buffer @@ -69,6 +75,7 @@ func Fetch(logger *zap.SugaredLogger, spec FetchSpec) error { if err := ensureHomeEnv(logger); err != nil { return err } + validateGitAuth(logger, spec.URL) if spec.Path != "" { if _, err := run(logger, "", "init", spec.Path); err != nil { @@ -244,3 +251,25 @@ func userHasKnownHostsFile(logger *zap.SugaredLogger) (bool, error) { f.Close() return true, nil } + +func validateGitAuth(logger *zap.SugaredLogger, url string) { + homeenv := os.Getenv("HOME") + sshCred := true + if _, err := os.Stat(homeenv + "/.ssh"); os.IsNotExist(err) { + sshCred = false + } + urlSSHFormat := ValidateGitSSHURLFormat(url) + if sshCred && !urlSSHFormat { + logger.Warnf("SSH credentials have been provided but the URL(%q) is not a valid SSH URL. This warning can be safely ignored if the URL is for a public repo or you are using basic auth", url) + } else if !sshCred && urlSSHFormat { + logger.Warnf("URL(%q) appears to need SSH authentication but no SSH credentials have been provided", url) + } +} + +// ValidateGitSSHURLFormat validates the given URL format is SSH or not +func ValidateGitSSHURLFormat(url string) bool { + if sshURLRegexFormat.MatchString(url) { + return true + } + return false +} diff --git a/pkg/git/git_test.go b/pkg/git/git_test.go new file mode 100644 index 00000000000..f685eb79add --- /dev/null +++ b/pkg/git/git_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package git + +import "testing" + +func TestValidateGitSSHURLFormat(t *testing.T) { + tests := []struct { + url string + want bool + }{ + { + url: "git@github.com:user/project.git", + want: true, + }, + { + url: "git@127.0.0.1:user/project.git", + want: true, + }, + { + url: "http://github.com/user/project.git", + want: false, + }, + { + url: "https://github.com/user/project.git", + want: false, + }, + { + url: "http://127.0.0.1/user/project.git", + want: false, + }, + { + url: "https://127.0.0.1/user/project.git", + want: false, + }, + { + url: "http://host.xz/path/to/repo.git/", + want: false, + }, + { + url: "https://host.xz/path/to/repo.git/", + want: false, + }, + { + url: "ssh://user@host.xz:port/path/to/repo.git/", + want: true, + }, + { + url: "ssh://user@host.xz/path/to/repo.git/", + want: true, + }, + { + url: "ssh://host.xz:port/path/to/repo.git/", + want: true, + }, + { + url: "ssh://host.xz/path/to/repo.git/", + want: true, + }, + { + url: "git://host.xz/path/to/repo.git/", + want: false, + }, + { + url: "/path/to/repo.git/", + want: false, + }, + { + url: "file://~/path/to/repo.git/", + want: false, + }, + { + url: "user@host.xz:/path/to/repo.git/", + want: true, + }, + { + url: "host.xz:/path/to/repo.git/", + want: true, + }, + { + url: "user@host.xz:path/to/repo.git", + want: true, + }, + } + + for _, tt := range tests { + got := ValidateGitSSHURLFormat(tt.url) + if got != tt.want { + t.Errorf("Validate URL(%v)'s SSH format got %v, want %v", tt.url, got, tt.want) + } + } +} diff --git a/pkg/pod/creds_init.go b/pkg/pod/creds_init.go index a3056007bd9..15256247118 100644 --- a/pkg/pod/creds_init.go +++ b/pkg/pod/creds_init.go @@ -17,8 +17,10 @@ limitations under the License. package pod import ( + "context" "fmt" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/credentials" "github.com/tektoncd/pipeline/pkg/credentials/dockercreds" @@ -29,7 +31,10 @@ import ( "k8s.io/client-go/kubernetes" ) -const credsInitHomeMountPrefix = "tekton-creds-init-home" +const ( + credsInitHomeMountPrefix = "tekton-creds-init-home" + sshKnownHosts = "known_hosts" +) // credsInit reads secrets available to the given service account and // searches for annotations matching a specific format (documented in @@ -42,9 +47,11 @@ const credsInitHomeMountPrefix = "tekton-creds-init-home" // Any errors encountered during this process are returned to the // caller. If no matching annotated secrets are found, nil lists with a // nil error are returned. -func credsInit(serviceAccountName, namespace string, kubeclient kubernetes.Interface) ([]string, []corev1.Volume, []corev1.VolumeMount, error) { +func credsInit(ctx context.Context, serviceAccountName, namespace string, kubeclient kubernetes.Interface) ([]string, []corev1.Volume, []corev1.VolumeMount, error) { + // service account if not specified in pipeline/task spec, read it from the ConfigMap + // and defaults to `default` if its missing from the ConfigMap as well if serviceAccountName == "" { - serviceAccountName = "default" + serviceAccountName = config.DefaultServiceAccountValue } sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, metav1.GetOptions{}) @@ -63,6 +70,10 @@ func credsInit(serviceAccountName, namespace string, kubeclient kubernetes.Inter return nil, nil, nil, err } + if err := checkGitSSHSecret(ctx, secret); err != nil { + return nil, nil, nil, err + } + matched := false for _, b := range builders { if sa := b.MatchingAnnotations(secret); len(sa) > 0 { @@ -112,3 +123,17 @@ func getCredsInitVolume() (corev1.Volume, corev1.VolumeMount) { } return v, vm } + +// checkGitSSHSecret requires `known_host` field must be included in Git SSH Secret when feature flag +// `require-git-ssh-secret-known-hosts` is true. +func checkGitSSHSecret(ctx context.Context, secret *corev1.Secret) error { + cfg := config.FromContextOrDefaults(ctx) + + if secret.Type == corev1.SecretTypeSSHAuth && cfg.FeatureFlags.RequireGitSSHSecretKnownHosts { + if _, ok := secret.Data[sshKnownHosts]; !ok { + return fmt.Errorf("TaskRun validation failed. Git SSH Secret must have \"known_hosts\" included " + + "when feature flag \"require-git-ssh-secret-known-hosts\" is set to true") + } + } + return nil +} diff --git a/pkg/pod/creds_init_test.go b/pkg/pod/creds_init_test.go index f22bc34edd7..38ff0390c94 100644 --- a/pkg/pod/creds_init_test.go +++ b/pkg/pod/creds_init_test.go @@ -17,20 +17,25 @@ limitations under the License. package pod import ( + "context" "testing" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/system" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakek8s "k8s.io/client-go/kubernetes/fake" + logtesting "knative.dev/pkg/logging/testing" ) const ( - serviceAccountName = "my-service-account" - namespace = "namespacey-mcnamespace" + serviceAccountName = "my-service-account" + namespace = "namespacey-mcnamespace" + featureFlagRequireKnownHosts = "require-git-ssh-secret-known-hosts" ) func TestCredsInit(t *testing.T) { @@ -153,7 +158,7 @@ func TestCredsInit(t *testing.T) { t.Run(c.desc, func(t *testing.T) { names.TestingSeed() kubeclient := fakek8s.NewSimpleClientset(c.objs...) - args, volumes, volumeMounts, err := credsInit(serviceAccountName, namespace, kubeclient) + args, volumes, volumeMounts, err := credsInit(context.Background(), serviceAccountName, namespace, kubeclient) if err != nil { t.Fatalf("credsInit: %v", err) } @@ -169,3 +174,94 @@ func TestCredsInit(t *testing.T) { }) } } + +func TestCheckGitSSHSecret(t *testing.T) { + for _, tc := range []struct { + desc string + configMap *corev1.ConfigMap + secret *corev1.Secret + wantErrorMsg string + }{{ + desc: "require known_hosts but secret does not include known_hosts", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()}, + Data: map[string]string{ + featureFlagRequireKnownHosts: "true", + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-creds", + Namespace: namespace, + Annotations: map[string]string{ + "tekton.dev/git-0": "github.com", + }, + }, + Type: "kubernetes.io/ssh-auth", + Data: map[string][]byte{ + "ssh-privatekey": []byte("Hello World!"), + }, + }, + wantErrorMsg: "TaskRun validation failed. Git SSH Secret must have \"known_hosts\" included " + + "when feature flag \"require-git-ssh-secret-known-hosts\" is set to true", + }, { + desc: "require known_hosts and secret includes known_hosts", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()}, + Data: map[string]string{ + featureFlagRequireKnownHosts: "true", + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-creds", + Namespace: namespace, + Annotations: map[string]string{ + "tekton.dev/git-0": "github.com", + }, + }, + Type: "kubernetes.io/ssh-auth", + Data: map[string][]byte{ + "ssh-privatekey": []byte("Hello World!"), + "known_hosts": []byte("Hello World!"), + }, + }, + }, { + desc: "not require known_hosts", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()}, + Data: map[string]string{ + featureFlagRequireKnownHosts: "false", + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-creds", + Namespace: namespace, + Annotations: map[string]string{ + "tekton.dev/git-0": "github.com", + }, + }, + Type: "kubernetes.io/ssh-auth", + Data: map[string][]byte{ + "ssh-privatekey": []byte("Hello World!"), + }, + }, + }} { + t.Run(tc.desc, func(t *testing.T) { + store := config.NewStore(logtesting.TestLogger(t)) + store.OnConfigChanged(tc.configMap) + err := checkGitSSHSecret(store.ToContext(context.Background()), tc.secret) + + if wantError := tc.wantErrorMsg != ""; wantError { + if err == nil { + t.Errorf("expected error %q, got nil", tc.wantErrorMsg) + } else if diff := cmp.Diff(tc.wantErrorMsg, err.Error()); diff != "" { + t.Errorf("unexpected (-want, +got) = %v", diff) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/pkg/pod/entrypoint.go b/pkg/pod/entrypoint.go index f4fbded8994..033ec03c6f7 100644 --- a/pkg/pod/entrypoint.go +++ b/pkg/pod/entrypoint.go @@ -88,9 +88,11 @@ var ( // TODO(#1605): Also use entrypoint injection to order sidecar start/stop. func orderContainers(entrypointImage string, extraEntrypointArgs []string, steps []corev1.Container, results []v1beta1.TaskResult) (corev1.Container, []corev1.Container, error) { initContainer := corev1.Container{ - Name: "place-tools", - Image: entrypointImage, - Command: []string{"cp", "/ko-app/entrypoint", entrypointBinary}, + Name: "place-tools", + Image: entrypointImage, + // Invoke the entrypoint binary in "cp mode" to copy itself + // into the correct location for later steps. + Command: []string{"/ko-app/entrypoint", "cp", "/ko-app/entrypoint", entrypointBinary}, VolumeMounts: []corev1.VolumeMount{toolsMount}, } @@ -207,7 +209,7 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P } if updated { if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil { - return fmt.Errorf("error adding ready annotation to Pod %q: %w", pod.Name, err) + return fmt.Errorf("error stopping sidecars of Pod %q: %w", pod.Name, err) } } return nil diff --git a/pkg/pod/entrypoint_test.go b/pkg/pod/entrypoint_test.go index 47a0125de0a..2f560465913 100644 --- a/pkg/pod/entrypoint_test.go +++ b/pkg/pod/entrypoint_test.go @@ -98,7 +98,7 @@ func TestOrderContainers(t *testing.T) { wantInit := corev1.Container{ Name: "place-tools", Image: images.EntrypointImage, - Command: []string{"cp", "/ko-app/entrypoint", entrypointBinary}, + Command: []string{"/ko-app/entrypoint", "cp", "/ko-app/entrypoint", entrypointBinary}, VolumeMounts: []corev1.VolumeMount{toolsMount}, } if d := cmp.Diff(wantInit, gotInit); d != "" { diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 845b67c9bf1..bf22d200bfb 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -106,7 +106,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec // Create Volumes and VolumeMounts for any credentials found in annotated // Secrets, along with any arguments needed by Step entrypoints to process // those secrets. - credEntrypointArgs, credVolumes, credVolumeMounts, err := credsInit(taskRun.Spec.ServiceAccountName, taskRun.Namespace, b.KubeClient) + credEntrypointArgs, credVolumes, credVolumeMounts, err := credsInit(ctx, taskRun.Spec.ServiceAccountName, taskRun.Namespace, b.KubeClient) if err != nil { return nil, err } diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 188bedcd86e..f6645a0cf7f 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -66,7 +66,7 @@ func TestPodBuild(t *testing.T) { placeToolsInit := corev1.Container{ Name: "place-tools", Image: images.EntrypointImage, - Command: []string{"cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"}, + Command: []string{"/ko-app/entrypoint", "cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"}, VolumeMounts: []corev1.VolumeMount{toolsMount}, } runtimeClassName := "gvisor" @@ -772,7 +772,7 @@ script-heredoc-randomly-generated-78c5n { Name: "place-tools", Image: images.EntrypointImage, - Command: []string{"cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"}, + Command: []string{"/ko-app/entrypoint", "cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"}, VolumeMounts: []corev1.VolumeMount{toolsMount}, }}, Containers: []corev1.Container{{ diff --git a/pkg/pod/status.go b/pkg/pod/status.go index d8dc3034800..1bdcee6034f 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -23,8 +23,8 @@ import ( "strings" "time" + "github.com/hashicorp/go-multierror" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/names" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -97,89 +97,176 @@ func SidecarsReady(podStatus corev1.PodStatus) bool { } // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. -func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, taskSpec v1beta1.TaskSpec) v1beta1.TaskRunStatus { +func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) { trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") } + sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers) + + complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed + + if complete { + updateCompletedTaskRun(trs, pod) + } else { + updateIncompleteTaskRun(trs, pod) + } + trs.PodName = pod.Name trs.Steps = []v1beta1.StepState{} trs.Sidecars = []v1beta1.SidecarState{} + var stepStatuses []corev1.ContainerStatus + var sidecarStatuses []corev1.ContainerStatus for _, s := range pod.Status.ContainerStatuses { if IsContainerStep(s.Name) { - if s.State.Terminated != nil && len(s.State.Terminated.Message) != 0 { - message, time, err := removeStartInfoFromTerminationMessage(s) + stepStatuses = append(stepStatuses, s) + } else if isContainerSidecar(s.Name) { + sidecarStatuses = append(sidecarStatuses, s) + } + } + + var merr *multierror.Error + if err := setTaskRunStatusBasedOnStepStatus(logger, stepStatuses, &tr); err != nil { + merr = multierror.Append(merr, err) + } + + setTaskRunStatusBasedOnSidecarStatus(sidecarStatuses, trs) + + trs.TaskRunResults = removeDuplicateResults(trs.TaskRunResults) + + return *trs, merr.ErrorOrNil() +} + +func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error { + trs := &tr.Status + var merr *multierror.Error + + for _, s := range stepStatuses { + if s.State.Terminated != nil && len(s.State.Terminated.Message) != 0 { + msg := s.State.Terminated.Message + + results, err := termination.ParseMessage(logger, msg) + if err != nil { + logger.Errorf("termination message could not be parsed as JSON: %v", err) + merr = multierror.Append(merr, err) + } else { + time, err := extractStartedAtTimeFromResults(results) if err != nil { - logger.Errorf("error setting the start time of step %q in taskrun %q: %w", s.Name, tr.Name, err) + logger.Errorf("error setting the start time of step %q in taskrun %q: %v", s.Name, tr.Name, err) + merr = multierror.Append(merr, err) + } + taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results) + if tr.IsSuccessful() { + trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) + trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) + } + msg, err = createMessageFromResults(filteredResults) + if err != nil { + logger.Errorf("%v", err) + err = multierror.Append(merr, err) + } else { + s.State.Terminated.Message = msg } if time != nil { s.State.Terminated.StartedAt = *time - s.State.Terminated.Message = message } } - trs.Steps = append(trs.Steps, v1beta1.StepState{ - ContainerState: *s.State.DeepCopy(), - Name: trimStepPrefix(s.Name), - ContainerName: s.Name, - ImageID: s.ImageID, - }) - } else if isContainerSidecar(s.Name) { - trs.Sidecars = append(trs.Sidecars, v1beta1.SidecarState{ - ContainerState: *s.State.DeepCopy(), - Name: TrimSidecarPrefix(s.Name), - ContainerName: s.Name, - ImageID: s.ImageID, - }) } + trs.Steps = append(trs.Steps, v1beta1.StepState{ + ContainerState: *s.State.DeepCopy(), + Name: trimStepPrefix(s.Name), + ContainerName: s.Name, + ImageID: s.ImageID, + }) } - // Complete if we did not find a step that is not complete, or the pod is in a definitely complete phase - complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed + return merr - if complete { - updateCompletedTaskRun(trs, pod) - } else { - updateIncompleteTaskRun(trs, pod) +} + +func setTaskRunStatusBasedOnSidecarStatus(sidecarStatuses []corev1.ContainerStatus, trs *v1beta1.TaskRunStatus) { + for _, s := range sidecarStatuses { + trs.Sidecars = append(trs.Sidecars, v1beta1.SidecarState{ + ContainerState: *s.State.DeepCopy(), + Name: TrimSidecarPrefix(s.Name), + ContainerName: s.Name, + ImageID: s.ImageID, + }) + } +} + +func createMessageFromResults(results []v1beta1.PipelineResourceResult) (string, error) { + if len(results) == 0 { + return "", nil + } + bytes, err := json.Marshal(results) + if err != nil { + return "", fmt.Errorf("error marshalling remaining results back into termination message: %w", err) } + return string(bytes), nil +} - // Sort step states according to the order specified in the TaskRun spec's steps. - trs.Steps = sortTaskRunStepOrder(trs.Steps, taskSpec.Steps) +func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { + var taskResults []v1beta1.TaskRunResult + var pipelineResourceResults []v1beta1.PipelineResourceResult + var filteredResults []v1beta1.PipelineResourceResult + for _, r := range results { + switch r.ResultType { + case v1beta1.TaskRunResultType: + taskRunResult := v1beta1.TaskRunResult{ + Name: r.Key, + Value: r.Value, + } + taskResults = append(taskResults, taskRunResult) + filteredResults = append(filteredResults, r) + case v1beta1.InternalTektonResultType: + // Internal messages are ignored because they're not used as external result + continue + case v1beta1.PipelineResourceResultType: + fallthrough + default: + pipelineResourceResults = append(pipelineResourceResults, r) + filteredResults = append(filteredResults, r) + } + } - return *trs + return taskResults, pipelineResourceResults, filteredResults } -// removeStartInfoFromTerminationMessage searches for a result called "StartedAt" in the JSON-formatted -// termination message of a step and returns the values to use for sets State.Terminated if it's -// found. The "StartedAt" result is also removed from the list of results in the container status. -func removeStartInfoFromTerminationMessage(s corev1.ContainerStatus) (string, *metav1.Time, error) { - r, err := termination.ParseMessage(s.State.Terminated.Message) - if err != nil { - return "", nil, fmt.Errorf("termination message could not be parsed as JSON: %w", err) +func removeDuplicateResults(taskRunResult []v1beta1.TaskRunResult) []v1beta1.TaskRunResult { + if len(taskRunResult) == 0 { + return nil + } + + uniq := make([]v1beta1.TaskRunResult, 0) + latest := make(map[string]v1beta1.TaskRunResult, 0) + for _, res := range taskRunResult { + if _, seen := latest[res.Name]; !seen { + uniq = append(uniq, res) + } + latest[res.Name] = res } - for index, result := range r { + for i, res := range uniq { + uniq[i] = latest[res.Name] + } + return uniq +} + +func extractStartedAtTimeFromResults(results []v1beta1.PipelineResourceResult) (*metav1.Time, error) { + for _, result := range results { if result.Key == "StartedAt" { t, err := time.Parse(timeFormat, result.Value) if err != nil { - return "", nil, fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err) + return nil, fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err) } - message := "" startedAt := metav1.NewTime(t) - // remove the entry for the starting time - r = append(r[:index], r[index+1:]...) - if len(r) == 0 { - message = "" - } else if bytes, err := json.Marshal(r); err != nil { - return "", nil, fmt.Errorf("error marshalling remaining results back into termination message: %w", err) - } else { - message = string(bytes) - } - return message, &startedAt, nil + return &startedAt, nil } } - return "", nil, nil + return nil, nil } func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { @@ -240,29 +327,7 @@ func areStepsComplete(pod *corev1.Pod) bool { return stepsComplete } -//SortContainerStatuses sort ContainerStatuses based on "FinishedAt" -func SortContainerStatuses(podInstance *corev1.Pod) { - sort.Slice(podInstance.Status.ContainerStatuses, func(i, j int) bool { - var ifinish, istart, jfinish, jstart time.Time - if term := podInstance.Status.ContainerStatuses[i].State.Terminated; term != nil { - ifinish = term.FinishedAt.Time - istart = term.StartedAt.Time - } - if term := podInstance.Status.ContainerStatuses[j].State.Terminated; term != nil { - jfinish = term.FinishedAt.Time - jstart = term.StartedAt.Time - } - - if ifinish.Equal(jfinish) { - return istart.Before(jstart) - } - return ifinish.Before(jfinish) - }) - -} - func getFailureMessage(pod *corev1.Pod) string { - SortContainerStatuses(pod) // First, try to surface an error about the actual build step that failed. for _, status := range pod.Status.ContainerStatuses { term := status.State.Terminated @@ -370,53 +435,49 @@ func markStatusSuccess(trs *v1beta1.TaskRunStatus) { }) } -// sortTaskRunStepOrder sorts the StepStates in the same order as the original +// sortPodContainerStatuses sorts the pod container statuses in the same order as the original // TaskSpec steps. -func sortTaskRunStepOrder(taskRunSteps []v1beta1.StepState, taskSpecSteps []v1beta1.Step) []v1beta1.StepState { - trt := &stepStateSorter{ - taskRunSteps: taskRunSteps, +func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) []corev1.ContainerStatus { + trt := &podContainerStatusSorter{ + podContainerStatuses: podContainerStatuses, } - trt.mapForSort = trt.constructTaskStepsSorter(taskSpecSteps) + trt.mapForSort = trt.constructPodContainerStatusesSorter(podSpecContainers) sort.Sort(trt) - return trt.taskRunSteps + return trt.podContainerStatuses } -// stepStateSorter implements a sorting mechanism to align the order of the steps in TaskRun +// podContainerStatusSorter implements a sorting mechanism to align the order of the pod container statuses in the pod // with the spec steps in Task. -type stepStateSorter struct { - taskRunSteps []v1beta1.StepState - mapForSort map[string]int +type podContainerStatusSorter struct { + podContainerStatuses []corev1.ContainerStatus + mapForSort map[string]int } -// constructTaskStepsSorter constructs a map matching the names of -// the steps to their indices for a task. -func (trt *stepStateSorter) constructTaskStepsSorter(taskSpecSteps []v1beta1.Step) map[string]int { +// constructPodContainerStatusesSorter constructs a map matching the names of +// the containers to their step indices for a task. +func (trt *podContainerStatusSorter) constructPodContainerStatusesSorter(podSpecContainers []corev1.Container) map[string]int { sorter := make(map[string]int) - for index, step := range taskSpecSteps { - stepName := step.Name - if stepName == "" { - stepName = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("unnamed-%d", index)) - } - sorter[stepName] = index + for index, container := range podSpecContainers { + sorter[container.Name] = index } return sorter } -// changeIndex sorts the steps of the task run, based on the +// changeIndex sorts the containers of a pod, based on the // order of the steps in the task. Instead of changing the element with the one next to it, // we directly swap it with the desired index. -func (trt *stepStateSorter) changeIndex(index int) { +func (trt *podContainerStatusSorter) changeIndex(index int) { // Check if the current index is equal to the desired index. If they are equal, do not swap; if they // are not equal, swap index j with the desired index. - desiredIndex, exist := trt.mapForSort[trt.taskRunSteps[index].Name] + desiredIndex, exist := trt.mapForSort[trt.podContainerStatuses[index].Name] if exist && index != desiredIndex { - trt.taskRunSteps[desiredIndex], trt.taskRunSteps[index] = trt.taskRunSteps[index], trt.taskRunSteps[desiredIndex] + trt.podContainerStatuses[desiredIndex], trt.podContainerStatuses[index] = trt.podContainerStatuses[index], trt.podContainerStatuses[desiredIndex] } } -func (trt *stepStateSorter) Len() int { return len(trt.taskRunSteps) } +func (trt *podContainerStatusSorter) Len() int { return len(trt.podContainerStatuses) } -func (trt *stepStateSorter) Swap(i, j int) { +func (trt *podContainerStatusSorter) Swap(i, j int) { trt.changeIndex(j) // The index j is unable to reach the last index. // When i reaches the end of the array, we need to check whether the last one needs a swap. @@ -425,7 +486,7 @@ func (trt *stepStateSorter) Swap(i, j int) { } } -func (trt *stepStateSorter) Less(i, j int) bool { +func (trt *podContainerStatusSorter) Less(i, j int) bool { // Since the logic is complicated, we move it into the Swap function to decide whether // and how to change the index. We set it to true here in order to iterate all the // elements of the array in the Swap function. diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index 7473018c272..25ec0f8f64d 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -33,17 +33,24 @@ import ( var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) +var conditionRunning apis.Condition = apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: v1beta1.TaskRunReasonRunning.String(), + Message: "Not all Steps in the Task have finished executing", +} +var conditionSucceeded apis.Condition = apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonSuccessful.String(), + Message: "All Steps have completed executing", +} + func TestMakeTaskRunStatus(t *testing.T) { - conditionRunning := apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: v1beta1.TaskRunReasonRunning.String(), - Message: "Not all Steps in the Task have finished executing", - } for _, c := range []struct { desc string podStatus corev1.PodStatus - taskSpec v1beta1.TaskSpec + pod corev1.Pod want v1beta1.TaskRunStatus }{{ desc: "empty", @@ -605,87 +612,352 @@ func TestMakeTaskRunStatus(t *testing.T) { }, }, }, { - desc: "non-json-termination-message-with-steps-afterwards-shouldnt-panic", - taskSpec: v1beta1.TaskSpec{ - Steps: []v1beta1.Step{{Container: corev1.Container{ - Name: "non-json", - }}, {Container: corev1.Container{ - Name: "after-non-json", - }}, {Container: corev1.Container{ - Name: "this-step-might-panic", - }}, {Container: corev1.Container{ - Name: "foo", - }}}, + desc: "image resource updated", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-foo", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:12345","resourceRef":{"name":"source-image"}}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:12345","resourceRef":{"name":"source-image"}}]`, + }}, + Name: "foo", + ContainerName: "step-foo", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:12345", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, }, + }, { + desc: "test result with pipeline result", podStatus: corev1.PodStatus{ - Phase: corev1.PodFailed, + Phase: corev1.PodSucceeded, ContainerStatuses: []corev1.ContainerStatus{{ - Name: "step-this-step-might-panic", - ImageID: "image", + Name: "step-bar", State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{}, + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`, + }, }, - }, { - Name: "step-foo", - ImageID: "image", + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}},{"key":"resultName","value":"resultValue","type":"TaskRunResult"}]`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:1234", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Value: "resultValue", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with pipeline result - no result type", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-banana", State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{}, + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`, + }, }, - }, { - Name: "step-non-json", - ImageID: "image", + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}},{"key":"resultName","value":"resultValue","type":"TaskRunResult"}]`, + }}, + Name: "banana", + ContainerName: "step-banana", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:1234", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Value: "resultValue", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "two test results", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-one", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 1, - Message: "this is a non-json termination message. dont panic!", + Message: `[{"key":"resultNameOne","value":"resultValueOne", "type": "TaskRunResult"}, {"key":"resultNameTwo","value":"resultValueTwo", "type": "TaskRunResult"}]`, }, }, }, { - Name: "step-after-non-json", - ImageID: "image", + Name: "step-two", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultNameOne","value":"resultValueThree","type":"TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo","type":"TaskRunResult"}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultNameOne","value":"resultValueOne","type":"TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo","type":"TaskRunResult"}]`, + }}, + Name: "one", + ContainerName: "step-one", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultNameOne","value":"resultValueThree","type":"TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo","type":"TaskRunResult"}]`, + }}, + Name: "two", + ContainerName: "step-two", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultNameOne", + Value: "resultValueThree", + }, { + Name: "resultNameTwo", + Value: "resultValueTwo", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "taskrun status set to failed if task fails", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-mango", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{}, }, }}, }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{Conditions: []apis.Condition{{ + Reason: "Failed", + Message: "build failed for unspecified reasons.", + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }}}, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "mango", + ContainerName: "step-mango", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "termination message not adhering to pipelineresourceresult format is filtered from taskrun termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-pineapple", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"invalid":"resultName","invalid":"resultValue"}]`, + }, + }, + }}, + }, want: v1beta1.TaskRunStatus{ Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Reason: v1beta1.TaskRunReasonFailed.String(), - Message: "\"step-non-json\" exited with code 1 (image: \"image\"); for logs run: kubectl -n foo logs pod -c step-non-json\n", + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "pineapple", + ContainerName: "step-pineapple", }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "filter internaltektonresult", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-pear", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultNameOne","value":"","type":"PipelineResourceResult"}, {"key":"resultNameTwo","value":"","type":"InternalTektonResult"}, {"key":"resultNameThree","value":"","type":"TaskRunResult"}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, }, TaskRunStatusFields: v1beta1.TaskRunStatusFields{ Steps: []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 1, - Message: "this is a non-json termination message. dont panic!", + Message: `[{"key":"resultNameOne","value":"","type":"PipelineResourceResult"},{"key":"resultNameThree","value":"","type":"TaskRunResult"}]`, }}, - - Name: "non-json", - ContainerName: "step-non-json", - ImageID: "image", + Name: "pear", + ContainerName: "step-pear", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "resultNameOne", + Value: "", + ResultType: "PipelineResourceResult", + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultNameThree", + Value: "", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "correct TaskRun status step order regardless of pod container status order", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "step-first", + }, { + Name: "step-second", + }, { + Name: "step-third", + }, { + Name: "step-", + }, { + Name: "step-fourth", + }}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-second", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-fourth", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-first", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-third", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }}, + }, + }, + want: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionSucceeded}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + Name: "first", + ContainerName: "step-first", }, { ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{}}, - Name: "after-non-json", - ContainerName: "step-after-non-json", - ImageID: "image", + Name: "second", + ContainerName: "step-second", }, { ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{}}, - Name: "this-step-might-panic", - ContainerName: "step-this-step-might-panic", - ImageID: "image", + Name: "third", + ContainerName: "step-third", }, { ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{}}, - Name: "foo", - ContainerName: "step-foo", - ImageID: "image", + Name: "", + ContainerName: "step-", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "fourth", + ContainerName: "step-fourth", }}, Sidecars: []v1beta1.SidecarState{}, // We don't actually care about the time, just that it's not nil @@ -695,14 +967,17 @@ func TestMakeTaskRunStatus(t *testing.T) { }} { t.Run(c.desc, func(t *testing.T) { now := metav1.Now() - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "foo", - CreationTimestamp: now, - }, - Status: c.podStatus, + if cmp.Diff(c.pod, corev1.Pod{}) == "" { + c.pod = corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + CreationTimestamp: now, + }, + Status: c.podStatus, + } } + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) tr := v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ @@ -717,7 +992,10 @@ func TestMakeTaskRunStatus(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - got := MakeTaskRunStatus(logger, tr, pod, c.taskSpec) + got, err := MakeTaskRunStatus(logger, tr, &c.pod) + if err != nil { + t.Errorf("MakeTaskRunResult: %s", err) + } // Common traits, set for test case brevity. c.want.PodName = "pod" @@ -739,6 +1017,125 @@ func TestMakeTaskRunStatus(t *testing.T) { } } +func TestMakeRunStatusJSONError(t *testing.T) { + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "step-non-json", + }, { + Name: "step-after-non-json", + }, { + Name: "step-this-step-might-panic", + }, { + Name: "step-foo", + }}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-this-step-might-panic", + ImageID: "image", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-foo", + ImageID: "image", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, { + Name: "step-non-json", + ImageID: "image", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Message: "this is a non-json termination message. dont panic!", + }, + }, + }, { + Name: "step-after-non-json", + ImageID: "image", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }}, + }, + } + wantTr := v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonFailed.String(), + Message: "\"step-non-json\" exited with code 1 (image: \"image\"); for logs run: kubectl -n foo logs pod -c step-non-json\n", + }}, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + PodName: "pod", + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Message: "this is a non-json termination message. dont panic!", + }}, + Name: "non-json", + ContainerName: "step-non-json", + ImageID: "image", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "after-non-json", + ContainerName: "step-after-non-json", + ImageID: "image", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "this-step-might-panic", + ContainerName: "step-this-step-might-panic", + ImageID: "image", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}}, + Name: "foo", + ContainerName: "step-foo", + ImageID: "image", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + } + tr := v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + } + + logger, _ := logging.NewLogger("", "status") + gotTr, err := MakeTaskRunStatus(logger, tr, pod) + if err == nil { + t.Error("Expected error, got nil") + } + + ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { + if x == nil { + return y == nil + } + return y != nil + }) + if d := cmp.Diff(wantTr, gotTr, ignoreVolatileTime, ensureTimeNotNil); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + +} + func TestSidecarsReady(t *testing.T) { for _, c := range []struct { desc string @@ -829,101 +1226,6 @@ func TestSidecarsReady(t *testing.T) { } } -func TestSortTaskRunStepOrder(t *testing.T) { - steps := []v1beta1.Step{{Container: corev1.Container{ - Name: "hello", - }}, {Container: corev1.Container{ - Name: "exit", - }}, {Container: corev1.Container{ - Name: "world", - }}} - - stepStates := []v1beta1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "world", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 1, - Reason: "Error", - }, - }, - Name: "exit", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "hello", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "nop", - }} - - gotStates := sortTaskRunStepOrder(stepStates, steps) - var gotNames []string - for _, g := range gotStates { - gotNames = append(gotNames, g.Name) - } - - want := []string{"hello", "exit", "world", "nop"} - if d := cmp.Diff(want, gotNames); d != "" { - t.Errorf("Unexpected step order %s", diff.PrintWantGot(d)) - } -} - -func TestSortContainerStatuses(t *testing.T) { - samplePod := corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "hello", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - FinishedAt: metav1.Time{Time: time.Now()}, - }, - }, - }, { - Name: "my", - State: corev1.ContainerState{ - // No Terminated status, terminated == 0 (and no panic) - }, - }, { - Name: "world", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - FinishedAt: metav1.Time{Time: time.Now().Add(time.Second * -5)}, - }, - }, - }, - }, - }, - } - SortContainerStatuses(&samplePod) - var gotNames []string - for _, status := range samplePod.Status.ContainerStatuses { - gotNames = append(gotNames, status.Name) - } - - want := []string{"my", "world", "hello"} - if d := cmp.Diff(want, gotNames); d != "" { - t.Errorf("Unexpected step order %s", diff.PrintWantGot(d)) - } - -} - func TestMarkStatusRunning(t *testing.T) { trs := v1beta1.TaskRunStatus{} markStatusRunning(&trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") diff --git a/pkg/reconciler/events/cloudevent/cloud_event_controller.go b/pkg/reconciler/events/cloudevent/cloud_event_controller.go index 21ae50e6b32..970c097bd23 100644 --- a/pkg/reconciler/events/cloudevent/cloud_event_controller.go +++ b/pkg/reconciler/events/cloudevent/cloud_event_controller.go @@ -140,6 +140,7 @@ func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error wasIn := make(chan error) go func() { wasIn <- nil + logger.Debugf("Sending cloudevent of type %q", event.Type()) if result := ceClient.Send(cloudevents.ContextWithRetriesExponentialBackoff(ctx, 10*time.Millisecond, 10), *event); !cloudevents.IsACK(result) { logger.Warnf("Failed to send cloudevent: %s", result.Error()) recorder := controller.GetEventRecorder(ctx) diff --git a/pkg/reconciler/pipelinerun/cancel.go b/pkg/reconciler/pipelinerun/cancel.go index 86bbb7f9fcb..81dcea11819 100644 --- a/pkg/reconciler/pipelinerun/cancel.go +++ b/pkg/reconciler/pipelinerun/cancel.go @@ -19,37 +19,44 @@ package pipelinerun import ( "encoding/json" "fmt" + "log" "strings" "time" - "go.uber.org/zap" - jsonpatch "gomodules.xyz/jsonpatch/v2" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "go.uber.org/zap" + jsonpatch "gomodules.xyz/jsonpatch/v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "knative.dev/pkg/apis" ) -// cancelPipelineRun marks the PipelineRun as cancelled and any resolved TaskRun(s) too. -func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error { - errs := []string{} +var cancelPatchBytes []byte - // Use Patch to update the TaskRuns since the TaskRun controller may be operating on the - // TaskRuns at the same time and trying to update the entire object may cause a race - b, err := getCancelPatch() +func init() { + var err error + cancelPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{{ + Operation: "add", + Path: "/spec/status", + Value: v1beta1.TaskRunSpecStatusCancelled, + }}) if err != nil { - return fmt.Errorf("couldn't make patch to update TaskRun cancellation: %v", err) + log.Fatalf("failed to marshal cancel patch bytes: %v", err) } +} + +// cancelPipelineRun marks the PipelineRun as cancelled and any resolved TaskRun(s) too. +func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error { + errs := []string{} // Loop over the TaskRuns in the PipelineRun status. // If a TaskRun is not in the status yet we should not cancel it anyways. for taskRunName := range pr.Status.TaskRuns { logger.Infof("cancelling TaskRun %s", taskRunName) - if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(taskRunName, types.JSONPatchType, b, ""); err != nil { + if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(taskRunName, types.JSONPatchType, cancelPatchBytes, ""); err != nil { errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error()) continue } @@ -77,16 +84,3 @@ func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clien } return nil } - -func getCancelPatch() ([]byte, error) { - patches := []jsonpatch.JsonPatchOperation{{ - Operation: "add", - Path: "/spec/status", - Value: v1beta1.TaskRunSpecStatusCancelled, - }} - patchBytes, err := json.Marshal(patches) - if err != nil { - return nil, fmt.Errorf("failed to marshal patch bytes in order to cancel: %v", err) - } - return patchBytes, nil -} diff --git a/pkg/reconciler/pipelinerun/cancel_test.go b/pkg/reconciler/pipelinerun/cancel_test.go index 916e005e84f..764adf72285 100644 --- a/pkg/reconciler/pipelinerun/cancel_test.go +++ b/pkg/reconciler/pipelinerun/cancel_test.go @@ -20,7 +20,6 @@ import ( "context" "testing" - tb "github.com/tektoncd/pipeline/internal/builder/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/test" @@ -43,30 +42,39 @@ func TestCancelPipelineRun(t *testing.T) { }, }, }, { - name: "1-taskrun", - pipelineRun: tb.PipelineRun("test-pipeline-run-cancelled", tb.PipelineRunNamespace("foo"), - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunCancelled, - ), - tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus("t1", &v1beta1.PipelineRunTaskRunStatus{ - PipelineTaskName: "task-1", - })), - ), - taskRuns: []*v1beta1.TaskRun{tb.TaskRun("t1", tb.TaskRunNamespace("foo"))}, + name: "one-taskrun", + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-cancelled"}, + Spec: v1beta1.PipelineRunSpec{ + Status: v1beta1.PipelineRunSpecStatusCancelled, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + }, }, { name: "multiple-taskruns", - pipelineRun: tb.PipelineRun("test-pipeline-run-cancelled", tb.PipelineRunNamespace("foo"), - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunCancelled, - ), - tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus( - "t1", &v1beta1.PipelineRunTaskRunStatus{PipelineTaskName: "task-1"}), - tb.PipelineRunTaskRunsStatus( - "t2", &v1beta1.PipelineRunTaskRunStatus{PipelineTaskName: "task-2"})), - ), - taskRuns: []*v1beta1.TaskRun{tb.TaskRun("t1", tb.TaskRunNamespace("foo")), tb.TaskRun("t2", tb.TaskRunNamespace("foo"))}, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-cancelled"}, + Spec: v1beta1.PipelineRunSpec{ + Status: v1beta1.PipelineRunSpecStatusCancelled, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + "t2": {PipelineTaskName: "task-2"}, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, }} for _, tc := range testCases { tc := tc @@ -79,8 +87,7 @@ func TestCancelPipelineRun(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() c, _ := test.SeedTestData(t, ctx, d) - err := cancelPipelineRun(logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline) - if err != nil { + if err := cancelPipelineRun(logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline); err != nil { t.Fatal(err) } // This PipelineRun should still be complete and false, and the status should reflect that @@ -88,7 +95,7 @@ func TestCancelPipelineRun(t *testing.T) { if cond.IsTrue() { t.Errorf("Expected PipelineRun status to be complete and false, but was %v", cond) } - l, err := c.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{}) + l, err := c.Pipeline.TektonV1beta1().TaskRuns("").List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index c1f06d09447..5879cd7bac2 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -387,6 +387,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err // Apply parameter substitution from the PipelineRun pipelineSpec = resources.ApplyParameters(pipelineSpec, pr) pipelineSpec = resources.ApplyContexts(pipelineSpec, pipelineMeta.Name, pr) + pipelineSpec = resources.ApplyWorkspaces(pipelineSpec, pr) // pipelineRunState holds a list of pipeline tasks after resolving conditions and pipeline resources // pipelineRunState also holds a taskRun for each pipeline task after the taskRun is created @@ -427,7 +428,15 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err return controller.NewPermanentError(err) } - for _, rprt := range pipelineRunState { + // Build PipelineRunFacts with a list of resolved pipeline tasks, + // dag tasks graph and final tasks graph + pipelineRunFacts := &resources.PipelineRunFacts{ + State: pipelineRunState, + TasksGraph: d, + FinalTasksGraph: dfinally, + } + + for _, rprt := range pipelineRunFacts.State { err := taskrun.ValidateResolvedTaskResources(rprt.PipelineTask.Params, rprt.ResolvedTaskResources) if err != nil { logger.Errorf("Failed to validate pipelinerun %q with error %v", pr.Name, err) @@ -436,7 +445,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } } - if pipelineRunState.IsBeforeFirstTaskRun() { + if pipelineRunFacts.State.IsBeforeFirstTaskRun() { if pr.HasVolumeClaimTemplate() { // create workspace PVC from template if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(pr.Spec.Workspaces, pr.GetOwnerReference(), pr.Namespace); err != nil { @@ -466,11 +475,11 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err return controller.NewPermanentError(err) } - if err := c.runNextSchedulableTask(ctx, pr, d, dfinally, pipelineRunState, as); err != nil { + if err := c.runNextSchedulableTask(ctx, pr, pipelineRunFacts, as); err != nil { return err } - after := pipelineRunState.GetPipelineConditionStatus(pr, logger, d, dfinally) + after := pipelineRunFacts.GetPipelineConditionStatus(pr, logger) switch after.Status { case corev1.ConditionTrue: pr.Status.MarkSucceeded(after.Reason, after.Message) @@ -481,8 +490,8 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } // Read the condition the way it was set by the Mark* helpers after = pr.Status.GetCondition(apis.ConditionSucceeded) - pr.Status.TaskRuns = pipelineRunState.GetTaskRunsStatus(pr) - pr.Status.SkippedTasks = pipelineRunState.GetSkippedTasks(pr, d) + pr.Status.TaskRuns = pipelineRunFacts.State.GetTaskRunsStatus(pr) + pr.Status.SkippedTasks = pipelineRunFacts.GetSkippedTasks() logger.Infof("PipelineRun %s status is being set to %s", pr.Name, after) return nil } @@ -490,28 +499,19 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err // runNextSchedulableTask gets the next schedulable Tasks from the dag based on the current // pipeline run state, and starts them // after all DAG tasks are done, it's responsible for scheduling final tasks and start executing them -func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.PipelineRun, d *dag.Graph, dfinally *dag.Graph, pipelineRunState resources.PipelineRunState, as artifacts.ArtifactStorageInterface) error { +func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.PipelineRun, pipelineRunFacts *resources.PipelineRunFacts, as artifacts.ArtifactStorageInterface) error { logger := logging.FromContext(ctx) recorder := controller.GetEventRecorder(ctx) - var nextRprts []*resources.ResolvedPipelineRunTask - - // when pipeline run is stopping, do not schedule any new task and only - // wait for all running tasks to complete and report their status - if !pipelineRunState.IsStopping(d) { - // candidateTasks is initialized to DAG root nodes to start pipeline execution - // candidateTasks is derived based on successfully finished tasks and/or skipped tasks - candidateTasks, err := dag.GetSchedulable(d, pipelineRunState.SuccessfulOrSkippedDAGTasks(d)...) - if err != nil { - logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err) - return controller.NewPermanentError(err) - } - // nextRprts holds a list of pipeline tasks which should be executed next - nextRprts = pipelineRunState.GetNextTasks(candidateTasks) + // nextRprts holds a list of pipeline tasks which should be executed next + nextRprts, err := pipelineRunFacts.DAGExecutionQueue() + if err != nil { + logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err) + return controller.NewPermanentError(err) } - resolvedResultRefs, err := resources.ResolveResultRefs(pipelineRunState, nextRprts) + resolvedResultRefs, err := resources.ResolveResultRefs(pipelineRunFacts.State, nextRprts) if err != nil { logger.Infof("Failed to resolve all task params for %q with error %v", pr.Name, err) pr.Status.MarkFailed(ReasonFailedValidation, err.Error()) @@ -521,10 +521,10 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip resources.ApplyTaskResults(nextRprts, resolvedResultRefs) // GetFinalTasks only returns tasks when a DAG is complete - nextRprts = append(nextRprts, pipelineRunState.GetFinalTasks(d, dfinally)...) + nextRprts = append(nextRprts, pipelineRunFacts.GetFinalTasks()...) for _, rprt := range nextRprts { - if rprt == nil || rprt.Skip(pipelineRunState, d) { + if rprt == nil || rprt.Skip(pipelineRunFacts) { continue } if rprt.ResolvedConditionChecks == nil || rprt.ResolvedConditionChecks.IsSuccess() { diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index ea4b205148d..ff49b4761be 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -69,7 +69,7 @@ var ( CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", @@ -152,6 +152,7 @@ func getPipelineRunController(t *testing.T, d test.Data) (test.Assets, func()) { Clients: c, Informers: informers, Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder), + Ctx: ctx, }, cancel } @@ -462,7 +463,8 @@ func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { tb.TaskRunLabel("tekton.dev/pipeline", "test-pipeline"), tb.TaskRunLabel("tekton.dev/pipelineRun", "test-pipeline-run-success"), tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineTaskLabelKey, "unit-test-task-spec"), - tb.TaskRunSpec(tb.TaskRunTaskSpec(tb.Step("myimage", tb.StepName("mystep")))), + tb.TaskRunSpec(tb.TaskRunTaskSpec(tb.Step("myimage", tb.StepName("mystep"))), + tb.TaskRunServiceAccountName(config.DefaultServiceAccountValue)), ) // ignore IgnoreUnexported ignore both after and before steps fields @@ -746,7 +748,7 @@ func TestReconcile_InvalidPipelineRunNames(t *testing.T) { defer cancel() c := testAssets.Controller - err := c.Reconciler.Reconcile(context.Background(), tc.pipelineRun) + err := c.Reconciler.Reconcile(testAssets.Ctx, tc.pipelineRun) // No reason to keep reconciling something that doesnt or can't exist if err != nil { t.Errorf("Did not expect to see error when reconciling invalid PipelineRun but saw %q", err) @@ -1243,7 +1245,7 @@ func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { return true, nil, fmt.Errorf("i'm sorry Dave, i'm afraid i can't do that") }) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-fails-to-cancel") + err := c.Reconciler.Reconcile(testAssets.Ctx, "foo/test-pipeline-fails-to-cancel") if err == nil { t.Errorf("Expected to see error returned from reconcile after failing to cancel TaskRun but saw none!") } @@ -3941,7 +3943,7 @@ func (prt PipelineRunTest) reconcileRun(namespace, pipelineRunName string, wantE c := prt.TestAssets.Controller clients := prt.TestAssets.Clients - reconcileError := c.Reconciler.Reconcile(context.Background(), namespace+"/"+pipelineRunName) + reconcileError := c.Reconciler.Reconcile(prt.TestAssets.Ctx, namespace+"/"+pipelineRunName) if permanentError { // When a PipelineRun is invalid and can't run, we expect a permanent error that will // tell the Reconciler to not keep trying to reconcile. @@ -3981,5 +3983,7 @@ func getTaskRunWithTaskSpec(tr, pr, p, t string, labels, annotations map[string] tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineTaskLabelKey, t), tb.TaskRunLabels(labels), tb.TaskRunAnnotations(annotations), - tb.TaskRunSpec(tb.TaskRunTaskSpec(tb.Step("myimage", tb.StepName("mystep"))))) + tb.TaskRunSpec(tb.TaskRunTaskSpec(tb.Step("myimage", tb.StepName("mystep"))), + tb.TaskRunServiceAccountName(config.DefaultServiceAccountValue), + )) } diff --git a/pkg/reconciler/pipelinerun/resources/apply.go b/pkg/reconciler/pipelinerun/resources/apply.go index d216b09e364..3e026f0a986 100644 --- a/pkg/reconciler/pipelinerun/resources/apply.go +++ b/pkg/reconciler/pipelinerun/resources/apply.go @@ -84,6 +84,20 @@ func ApplyTaskResults(targets PipelineRunState, resolvedResultRefs ResolvedResul } } +func ApplyWorkspaces(p *v1beta1.PipelineSpec, pr *v1beta1.PipelineRun) *v1beta1.PipelineSpec { + p = p.DeepCopy() + replacements := map[string]string{} + for _, declaredWorkspace := range p.Workspaces { + key := fmt.Sprintf("workspaces.%s.bound", declaredWorkspace.Name) + replacements[key] = "false" + } + for _, boundWorkspace := range pr.Spec.Workspaces { + key := fmt.Sprintf("workspaces.%s.bound", boundWorkspace.Name) + replacements[key] = "true" + } + return ApplyReplacements(p, replacements, map[string][]string{}) +} + // ApplyReplacements replaces placeholders for declared parameters with the specified replacements. func ApplyReplacements(p *v1beta1.PipelineSpec, replacements map[string]string, arrayReplacements map[string][]string) *v1beta1.PipelineSpec { p = p.DeepCopy() diff --git a/pkg/reconciler/pipelinerun/resources/apply_test.go b/pkg/reconciler/pipelinerun/resources/apply_test.go index 7f0ac734209..283f0ab6bb0 100644 --- a/pkg/reconciler/pipelinerun/resources/apply_test.go +++ b/pkg/reconciler/pipelinerun/resources/apply_test.go @@ -21,7 +21,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - tb "github.com/tektoncd/pipeline/internal/builder/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" @@ -31,183 +30,232 @@ import ( ) func TestApplyParameters(t *testing.T) { - tests := []struct { + for _, tt := range []struct { name string - original *v1beta1.Pipeline - run *v1beta1.PipelineRun - expected *v1beta1.Pipeline + original v1beta1.PipelineSpec + params []v1beta1.Param + expected v1beta1.PipelineSpec }{{ name: "single parameter", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(params.first-param)"), - tb.PipelineTaskParam("first-task-second-param", "$(params.second-param)"), - tb.PipelineTaskParam("first-task-third-param", "static value"), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "default-value"), - tb.PipelineTaskParam("first-task-second-param", "second-value"), - tb.PipelineTaskParam("first-task-third-param", "static value"), - ))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("$(params.first-param)")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("$(params.second-param)")}, + {Name: "first-task-third-param", Value: *v1beta1.NewArrayOrString("static value")}, + }, + }}, + }, + params: []v1beta1.Param{{Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value")}}, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("default-value")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("second-value")}, + {Name: "first-task-third-param", Value: *v1beta1.NewArrayOrString("static value")}, + }, + }}, + }, }, { name: "single parameter with when expression", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskWhenExpression("$(params.first-param)", selection.In, []string{"$(params.second-param)"})), - tb.FinalPipelineTask("final-task-1", "first-task"))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskWhenExpression("default-value", selection.In, []string{"second-value"})), - tb.FinalPipelineTask("final-task-1", "first-task"))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "$(params.first-param)", + Operator: selection.In, + Values: []string{"$(params.second-param)"}, + }}, + }}, + }, + params: []v1beta1.Param{{Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value")}}, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "default-value", + Operator: selection.In, + Values: []string{"second-value"}, + }}, + }}, + }, }, { name: "pipeline parameter nested inside task parameter", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(input.workspace.$(params.first-param))"), - tb.PipelineTaskParam("first-task-second-param", "$(input.workspace.$(params.second-param))"), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline")), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(input.workspace.default-value)"), - tb.PipelineTaskParam("first-task-second-param", "$(input.workspace.default-value)"), - ))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("$(input.workspace.$(params.first-param))")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("$(input.workspace.$(params.second-param))")}, + }, + }}, + }, + params: nil, // no parameter values. + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("$(input.workspace.default-value)")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("$(input.workspace.default-value)")}, + }, + }}, + }, }, { name: "parameters in task condition", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskCondition("task-condition", - tb.PipelineTaskConditionParam("cond-first-param", "$(params.first-param)"), - tb.PipelineTaskConditionParam("cond-second-param", "$(params.second-param)"), - ), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskCondition("task-condition", - tb.PipelineTaskConditionParam("cond-first-param", "default-value"), - tb.PipelineTaskConditionParam("cond-second-param", "second-value"), - ), - ))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Conditions: []v1beta1.PipelineTaskCondition{{ + Params: []v1beta1.Param{ + {Name: "cond-first-param", Value: *v1beta1.NewArrayOrString("$(params.first-param)")}, + {Name: "cond-second-param", Value: *v1beta1.NewArrayOrString("$(params.second-param)")}, + }, + }}, + }}, + }, + params: []v1beta1.Param{{Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value")}}, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Conditions: []v1beta1.PipelineTaskCondition{{ + Params: []v1beta1.Param{ + {Name: "cond-first-param", Value: *v1beta1.NewArrayOrString("default-value")}, + {Name: "cond-second-param", Value: *v1beta1.NewArrayOrString("second-value")}, + }, + }}, + }}, + }, }, { name: "array parameter", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeArray, tb.ParamSpecDefault( - "default", "array", "value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeArray), - tb.PipelineParamSpec("fourth-param", v1beta1.ParamTypeArray), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "firstelement", "$(params.first-param)"), - tb.PipelineTaskParam("first-task-second-param", "first", "$(params.second-param)"), - tb.PipelineTaskParam("first-task-third-param", "static value"), - tb.PipelineTaskParam("first-task-fourth-param", "first", "$(params.fourth-param)"), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value", "array"), - tb.PipelineRunParam("fourth-param", "fourth-value", "array"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeArray, tb.ParamSpecDefault( - "default", "array", "value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeArray), - tb.PipelineParamSpec("fourth-param", v1beta1.ParamTypeArray), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "firstelement", "default", "array", "value"), - tb.PipelineTaskParam("first-task-second-param", "first", "second-value", "array"), - tb.PipelineTaskParam("first-task-third-param", "static value"), - tb.PipelineTaskParam("first-task-fourth-param", "first", "fourth-value", "array"), - ))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeArray, Default: v1beta1.NewArrayOrString("default", "array", "value")}, + {Name: "second-param", Type: v1beta1.ParamTypeArray}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("firstelement", "$(params.first-param)")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("firstelement", "$(params.second-param)")}, + }, + }}, + }, + params: []v1beta1.Param{ + {Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value", "array")}, + }, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeArray, Default: v1beta1.NewArrayOrString("default", "array", "value")}, + {Name: "second-param", Type: v1beta1.ParamTypeArray}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "first-task-first-param", Value: *v1beta1.NewArrayOrString("firstelement", "default", "array", "value")}, + {Name: "first-task-second-param", Value: *v1beta1.NewArrayOrString("firstelement", "second-value", "array")}, + }, + }}, + }, }, { name: "parameter evaluation with final tasks", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.FinalPipelineTask("final-task-1", "final-task", - tb.PipelineTaskParam("final-task-first-param", "$(params.first-param)"), - tb.PipelineTaskParam("final-task-second-param", "$(params.second-param)"), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.FinalPipelineTask("final-task-1", "final-task", - tb.PipelineTaskParam("final-task-first-param", "default-value"), - tb.PipelineTaskParam("final-task-second-param", "second-value"), - ))), + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Finally: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("$(params.first-param)")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("$(params.second-param)")}, + }, + }}, + }, + params: []v1beta1.Param{{Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value")}}, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Finally: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("default-value")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("second-value")}, + }, + }}, + }, }, { name: "parameter evaluation with both tasks and final tasks", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(params.first-param)"), - ), - tb.PipelineTask("first-task-2", "first-task", - tb.PipelineTaskWhenExpression("$(params.first-param)", selection.In, []string{"$(params.second-param)"})), - tb.FinalPipelineTask("final-task-1", "final-task", - tb.PipelineTaskParam("final-task-second-param", "$(params.second-param)"), - ))), - run: tb.PipelineRun("test-pipeline-run", - tb.PipelineRunSpec("test-pipeline", - tb.PipelineRunParam("second-param", "second-value"))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineParamSpec("first-param", v1beta1.ParamTypeString, tb.ParamSpecDefault("default-value")), - tb.PipelineParamSpec("second-param", v1beta1.ParamTypeString), - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "default-value"), - ), - tb.PipelineTask("first-task-2", "first-task", - tb.PipelineTaskWhenExpression("default-value", selection.In, []string{"second-value"})), - tb.FinalPipelineTask("final-task-1", "final-task", - tb.PipelineTaskParam("final-task-second-param", "second-value"), - ))), - }} - for _, tt := range tests { + original: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("$(params.first-param)")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("$(params.second-param)")}, + }, + }}, + Finally: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("$(params.first-param)")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("$(params.second-param)")}, + }, + }}, + }, + params: []v1beta1.Param{{Name: "second-param", Value: *v1beta1.NewArrayOrString("second-value")}}, + expected: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{ + {Name: "first-param", Type: v1beta1.ParamTypeString, Default: v1beta1.NewArrayOrString("default-value")}, + {Name: "second-param", Type: v1beta1.ParamTypeString}, + }, + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("default-value")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("second-value")}, + }, + }}, + Finally: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{ + {Name: "final-task-first-param", Value: *v1beta1.NewArrayOrString("default-value")}, + {Name: "final-task-second-param", Value: *v1beta1.NewArrayOrString("second-value")}, + }, + }}, + }, + }} { t.Run(tt.name, func(t *testing.T) { - got := ApplyParameters(&tt.original.Spec, tt.run) - if d := cmp.Diff(&tt.expected.Spec, got); d != "" { + t.Parallel() + run := &v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Params: tt.params, + }, + } + got := ApplyParameters(&tt.original, run) + if d := cmp.Diff(&tt.expected, got); d != "" { t.Errorf("ApplyParameters() got diff %s", diff.PrintWantGot(d)) } }) @@ -442,109 +490,122 @@ func TestContext(t *testing.T) { for _, tc := range []struct { description string pr *v1beta1.PipelineRun - original *v1beta1.Pipeline - expected *v1beta1.Pipeline + original v1beta1.Param + expected v1beta1.Param }{{ - description: "context pipeline name replacement without pipelineRun in spec", - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipeline.name)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "test-pipeline-1"), - ))), - pr: &v1beta1.PipelineRun{}, + description: "context.pipeline.name defined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "name"}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipeline.name)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("test-pipeline-1")}, }, { - description: "context pipeline name replacement with pipelineRun in spec", - pr: tb.PipelineRun("pipelineRunName"), - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipeline.name)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "test-pipeline-1"), - ))), + description: "context.pipelineRun.name defined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "name"}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.name)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("name-1")}, }, { - description: "context pipelineRunName replacement with defined pipelineRun in spec", - pr: tb.PipelineRun("pipelineRunName"), - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipelineRun.name)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "pipelineRunName-1"), - ))), + description: "context.pipelineRun.name undefined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: ""}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.name)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("-1")}, }, { - description: "context pipelineRunNameNamespace replacement with defined pipelineRunNamepsace in spec", - pr: tb.PipelineRun("pipelineRunName", tb.PipelineRunNamespace("prns")), - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipelineRun.namespace)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "prns-1"), - ))), + description: "context.pipelineRun.namespace defined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "namespace"}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.namespace)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("namespace-1")}, }, { - description: "context pipelineRunName replacement with no defined pipeline in spec", - pr: &v1beta1.PipelineRun{}, - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipelineRun.name)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "-1"), - ))), + description: "context.pipelineRun.namespace undefined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: ""}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.namespace)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("-1")}, }, { - description: "context pipelineRunNamespace replacement with no defined pipelineRunNamespace in spec", - pr: tb.PipelineRun("pipelineRunName"), - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipelineRun.namespace)-1"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "-1"), - ))), + description: "context.pipelineRun.uid defined", + pr: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{UID: "UID"}, + }, + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.uid)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("UID-1")}, }, { - description: "context pipeline name replacement with pipelinerun uid", + description: "context.pipelineRun.uid undefined", pr: &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - UID: "UID-1", - }, + ObjectMeta: metav1.ObjectMeta{UID: ""}, }, - original: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "$(context.pipelineRun.uid)"), - ))), - expected: tb.Pipeline("test-pipeline", - tb.PipelineSpec( - tb.PipelineTask("first-task-1", "first-task", - tb.PipelineTaskParam("first-task-first-param", "UID-1"), - ))), + original: v1beta1.Param{Value: *v1beta1.NewArrayOrString("$(context.pipelineRun.uid)-1")}, + expected: v1beta1.Param{Value: *v1beta1.NewArrayOrString("-1")}, }} { t.Run(tc.description, func(t *testing.T) { - got := ApplyContexts(&tc.original.Spec, tc.original.Name, tc.pr) - if d := cmp.Diff(tc.expected.Spec, *got); d != "" { + orig := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline"}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{tc.original}, + }}, + }, + } + got := ApplyContexts(&orig.Spec, orig.Name, tc.pr) + if d := cmp.Diff(tc.expected, got.Tasks[0].Params[0]); d != "" { t.Errorf(diff.PrintWantGot(d)) } }) } } + +func TestApplyWorkspaces(t *testing.T) { + for _, tc := range []struct { + description string + declarations []v1beta1.PipelineWorkspaceDeclaration + bindings []v1beta1.WorkspaceBinding + variableUsage string + expectedReplacement string + }{{ + description: "workspace declared and bound", + declarations: []v1beta1.PipelineWorkspaceDeclaration{{ + Name: "foo", + }}, + bindings: []v1beta1.WorkspaceBinding{{ + Name: "foo", + }}, + variableUsage: "$(workspaces.foo.bound)", + expectedReplacement: "true", + }, { + description: "workspace declared not bound", + declarations: []v1beta1.PipelineWorkspaceDeclaration{{ + Name: "foo", + Optional: true, + }}, + bindings: []v1beta1.WorkspaceBinding{}, + variableUsage: "$(workspaces.foo.bound)", + expectedReplacement: "false", + }} { + t.Run(tc.description, func(t *testing.T) { + p1 := v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Params: []v1beta1.Param{{Value: *v1beta1.NewArrayOrString(tc.variableUsage)}}, + }}, + Workspaces: tc.declarations, + } + pr := &v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "test-pipeline", + }, + Workspaces: tc.bindings, + }, + } + p2 := ApplyWorkspaces(&p1, pr) + str := p2.Tasks[0].Params[0].Value.StringVal + if str != tc.expectedReplacement { + t.Errorf("expected %q, received %q", tc.expectedReplacement, str) + } + }) + } +} diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 51c8a58f3b0..514dc3ea578 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -30,7 +30,6 @@ import ( "github.com/tektoncd/pipeline/pkg/contexts" "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/names" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" ) @@ -134,17 +133,15 @@ func (t ResolvedPipelineRunTask) IsStarted() bool { return true } -func (t *ResolvedPipelineRunTask) checkParentsDone(state PipelineRunState, d *dag.Graph) bool { - stateMap := state.ToMap() +func (t *ResolvedPipelineRunTask) checkParentsDone(facts *PipelineRunFacts) bool { + stateMap := facts.State.ToMap() // check if parent tasks are done executing, // if any of the parents is not yet scheduled or still running, // wait for it to complete before evaluating when expressions - node := d.Nodes[t.PipelineTask.Name] - if isTaskInGraph(t.PipelineTask.Name, d) { - for _, p := range node.Prev { - if !stateMap[p.Task.HashKey()].IsDone() { - return false - } + node := facts.TasksGraph.Nodes[t.PipelineTask.Name] + for _, p := range node.Prev { + if !stateMap[p.Task.HashKey()].IsDone() { + return false } } return true @@ -156,7 +153,12 @@ func (t *ResolvedPipelineRunTask) checkParentsDone(state PipelineRunState, d *da // (3) its parent task was skipped // (4) Pipeline is in stopping state (one of the PipelineTasks failed) // Note that this means Skip returns false if a conditionCheck is in progress -func (t *ResolvedPipelineRunTask) Skip(state PipelineRunState, d *dag.Graph) bool { +func (t *ResolvedPipelineRunTask) Skip(facts *PipelineRunFacts) bool { + // finally tasks are never skipped. If this is a final task, return false + if facts.isFinalTask(t.PipelineTask.Name) { + return false + } + // it already has TaskRun associated with it - PipelineTask not skipped if t.IsStarted() { return false @@ -170,7 +172,7 @@ func (t *ResolvedPipelineRunTask) Skip(state PipelineRunState, d *dag.Graph) boo } // Check if the when expressions are false, based on the input's relationship to the values - if t.checkParentsDone(state, d) { + if t.checkParentsDone(facts) { if len(t.PipelineTask.WhenExpressions) > 0 { if !t.PipelineTask.WhenExpressions.HaveVariables() { if !t.PipelineTask.WhenExpressions.AllowsExecution() { @@ -181,19 +183,17 @@ func (t *ResolvedPipelineRunTask) Skip(state PipelineRunState, d *dag.Graph) boo } // Skip the PipelineTask if pipeline is in stopping state - if isTaskInGraph(t.PipelineTask.Name, d) && state.IsStopping(d) { + if facts.IsStopping() { return true } - stateMap := state.ToMap() + stateMap := facts.State.ToMap() // Recursively look at parent tasks to see if they have been skipped, // if any of the parents have been skipped, skip as well - node := d.Nodes[t.PipelineTask.Name] - if isTaskInGraph(t.PipelineTask.Name, d) { - for _, p := range node.Prev { - if stateMap[p.Task.HashKey()].Skip(state, d) { - return true - } + node := facts.TasksGraph.Nodes[t.PipelineTask.Name] + for _, p := range node.Prev { + if stateMap[p.Task.HashKey()].Skip(facts) { + return true } } return false @@ -255,8 +255,11 @@ func ValidateWorkspaceBindings(p *v1beta1.PipelineSpec, pr *v1beta1.PipelineRun) } for _, ws := range p.Workspaces { + if ws.Optional { + continue + } if _, ok := pipelineRunWorkspaces[ws.Name]; !ok { - return fmt.Errorf("pipeline expects workspace with name %q be provided by pipelinerun", ws.Name) + return fmt.Errorf("pipeline requires workspace with name %q be provided by pipelinerun", ws.Name) } } return nil diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index b7542f432e0..1ca72cbf7e8 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -835,7 +835,7 @@ func TestIsSkipped(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - dag, err := dagFromState(tc.state) + d, err := dagFromState(tc.state) if err != nil { t.Fatalf("Could not get a dag from the TC state %#v: %v", tc.state, err) } @@ -844,7 +844,12 @@ func TestIsSkipped(t *testing.T) { if rprt == nil { t.Fatalf("Could not get task %s from the state: %v", tc.taskName, tc.state) } - isSkipped := rprt.Skip(tc.state, dag) + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: d, + FinalTasksGraph: &dag.Graph{}, + } + isSkipped := rprt.Skip(&facts) if d := cmp.Diff(isSkipped, tc.expected); d != "" { t.Errorf("Didn't get expected isSkipped %s", diff.PrintWantGot(d)) } @@ -1711,15 +1716,73 @@ func TestGetResourcesFromBindings_Extra(t *testing.T) { } } -func TestValidateWorkspaceBindings(t *testing.T) { - p := tb.Pipeline("pipelines", tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - )) - pr := tb.PipelineRun("pipelinerun", tb.PipelineRunSpec("pipeline", - tb.PipelineRunWorkspaceBindingEmptyDir("bar"), - )) - if err := ValidateWorkspaceBindings(&p.Spec, pr); err == nil { - t.Fatalf("Expected error indicating `foo` workspace was not provided but got no error") +func TestValidateWorkspaceBindingsWithValidWorkspaces(t *testing.T) { + for _, tc := range []struct { + name string + spec *v1beta1.PipelineSpec + run *v1beta1.PipelineRun + err string + }{{ + name: "include required workspace", + spec: &v1beta1.PipelineSpec{ + Workspaces: []v1beta1.PipelineWorkspaceDeclaration{{ + Name: "foo", + }}, + }, + run: &v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "foo", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + }, + }, { + name: "omit optional workspace", + spec: &v1beta1.PipelineSpec{ + Workspaces: []v1beta1.PipelineWorkspaceDeclaration{{ + Name: "foo", + Optional: true, + }}, + }, + run: &v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Workspaces: []v1beta1.WorkspaceBinding{}, + }, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + if err := ValidateWorkspaceBindings(tc.spec, tc.run); err != nil { + t.Errorf("Unexpected error: %v", err) + } + }) + } +} + +func TestValidateWorkspaceBindingsWithInvalidWorkspaces(t *testing.T) { + for _, tc := range []struct { + name string + spec *v1beta1.PipelineSpec + run *v1beta1.PipelineRun + err string + }{{ + name: "missing required workspace", + spec: &v1beta1.PipelineSpec{ + Workspaces: []v1beta1.PipelineWorkspaceDeclaration{{ + Name: "foo", + }}, + }, + run: &v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Workspaces: []v1beta1.WorkspaceBinding{}, + }, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + if err := ValidateWorkspaceBindings(tc.spec, tc.run); err == nil { + t.Fatalf("Expected error indicating `foo` workspace was not provided but got no error") + } + }) } } diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go index fc18949076b..b2e4a09627c 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go @@ -32,6 +32,12 @@ import ( // state of the PipelineRun. type PipelineRunState []*ResolvedPipelineRunTask +type PipelineRunFacts struct { + State PipelineRunState + TasksGraph *dag.Graph + FinalTasksGraph *dag.Graph +} + // ToMap returns a map that maps pipeline task name to the resolved pipeline run task func (state PipelineRunState) ToMap() map[string]*ResolvedPipelineRunTask { m := make(map[string]*ResolvedPipelineRunTask) @@ -41,17 +47,6 @@ func (state PipelineRunState) ToMap() map[string]*ResolvedPipelineRunTask { return m } -// IsDone returns true when all pipeline tasks have respective taskRun created and -// that taskRun has either succeeded or failed after all possible retry attempts -func (state PipelineRunState) IsDone() bool { - for _, t := range state { - if !t.IsDone() { - return false - } - } - return true -} - // IsBeforeFirstTaskRun returns true if the PipelineRun has not yet started its first TaskRun func (state PipelineRunState) IsBeforeFirstTaskRun() bool { for _, t := range state { @@ -62,22 +57,6 @@ func (state PipelineRunState) IsBeforeFirstTaskRun() bool { return true } -// IsStopping returns true if the PipelineRun won't be scheduling any new Task because -// at least one task already failed or was cancelled in the specified dag -func (state PipelineRunState) IsStopping(d *dag.Graph) bool { - for _, t := range state { - if isTaskInGraph(t.PipelineTask.Name, d) { - if t.IsCancelled() { - return true - } - if t.IsFailure() { - return true - } - } - } - return false -} - // GetNextTasks returns a list of tasks which should be executed next i.e. // a list of tasks from candidateTasks which aren't yet indicated in state to be running and // a list of cancelled/failed tasks from candidateTasks which haven't exhausted their retries @@ -101,13 +80,29 @@ func (state PipelineRunState) GetNextTasks(candidateTasks sets.String) []*Resolv return tasks } -// SuccessfulOrSkippedDAGTasks returns a list of the names of all of the PipelineTasks in state +// IsStopping returns true if the PipelineRun won't be scheduling any new Task because +// at least one task already failed or was cancelled in the specified dag +func (facts *PipelineRunFacts) IsStopping() bool { + for _, t := range facts.State { + if facts.isDAGTask(t.PipelineTask.Name) { + if t.IsCancelled() { + return true + } + if t.IsFailure() { + return true + } + } + } + return false +} + +// SuccessfulOrSkippedTasks returns a list of the names of all of the PipelineTasks in state // which have successfully completed or skipped -func (state PipelineRunState) SuccessfulOrSkippedDAGTasks(d *dag.Graph) []string { +func (facts *PipelineRunFacts) SuccessfulOrSkippedDAGTasks() []string { tasks := []string{} - for _, t := range state { - if isTaskInGraph(t.PipelineTask.Name, d) { - if t.IsSuccessful() || t.Skip(state, d) { + for _, t := range facts.State { + if facts.isDAGTask(t.PipelineTask.Name) { + if t.IsSuccessful() || t.Skip(facts) { tasks = append(tasks, t.PipelineTask.Name) } } @@ -117,14 +112,14 @@ func (state PipelineRunState) SuccessfulOrSkippedDAGTasks(d *dag.Graph) []string // checkTasksDone returns true if all tasks from the specified graph are finished executing // a task is considered done if it has failed/succeeded/skipped -func (state PipelineRunState) checkTasksDone(d *dag.Graph) bool { - for _, t := range state { +func (facts *PipelineRunFacts) checkTasksDone(d *dag.Graph) bool { + for _, t := range facts.State { if isTaskInGraph(t.PipelineTask.Name, d) { if t.TaskRun == nil { // this task might have skipped if taskRun is nil // continue and ignore if this task was skipped // skipped task is considered part of done - if t.Skip(state, d) { + if t.Skip(facts) { continue } return false @@ -137,29 +132,53 @@ func (state PipelineRunState) checkTasksDone(d *dag.Graph) bool { return true } +func (facts *PipelineRunFacts) CheckDAGTasksDone() bool { + return facts.checkTasksDone(facts.TasksGraph) +} + +func (facts *PipelineRunFacts) CheckFinalTasksDone() bool { + return facts.checkTasksDone(facts.FinalTasksGraph) +} + +func (facts *PipelineRunFacts) DAGExecutionQueue() (PipelineRunState, error) { + tasks := PipelineRunState{} + // when pipeline run is stopping, do not schedule any new task and only + // wait for all running tasks to complete and report their status + if !facts.IsStopping() { + // candidateTasks is initialized to DAG root nodes to start pipeline execution + // candidateTasks is derived based on successfully finished tasks and/or skipped tasks + candidateTasks, err := dag.GetSchedulable(facts.TasksGraph, facts.SuccessfulOrSkippedDAGTasks()...) + if err != nil { + return tasks, err + } + tasks = facts.State.GetNextTasks(candidateTasks) + } + return tasks, nil +} + // GetFinalTasks returns a list of final tasks without any taskRun associated with it // GetFinalTasks returns final tasks only when all DAG tasks have finished executing successfully or skipped or // any one DAG task resulted in failure -func (state PipelineRunState) GetFinalTasks(d *dag.Graph, dfinally *dag.Graph) []*ResolvedPipelineRunTask { - tasks := []*ResolvedPipelineRunTask{} +func (facts *PipelineRunFacts) GetFinalTasks() PipelineRunState { + tasks := PipelineRunState{} finalCandidates := sets.NewString() // check either pipeline has finished executing all DAG pipelineTasks // or any one of the DAG pipelineTask has failed - if state.checkTasksDone(d) { + if facts.CheckDAGTasksDone() { // return list of tasks with all final tasks - for _, t := range state { - if isTaskInGraph(t.PipelineTask.Name, dfinally) && !t.IsSuccessful() { + for _, t := range facts.State { + if facts.isFinalTask(t.PipelineTask.Name) && !t.IsSuccessful() { finalCandidates.Insert(t.PipelineTask.Name) } } - tasks = state.GetNextTasks(finalCandidates) + tasks = facts.State.GetNextTasks(finalCandidates) } return tasks } // GetPipelineConditionStatus will return the Condition that the PipelineRun prName should be // updated with, based on the status of the TaskRuns in state. -func (state PipelineRunState) GetPipelineConditionStatus(pr *v1beta1.PipelineRun, logger *zap.SugaredLogger, dag *dag.Graph, dfinally *dag.Graph) *apis.Condition { +func (facts *PipelineRunFacts) GetPipelineConditionStatus(pr *v1beta1.PipelineRun, logger *zap.SugaredLogger) *apis.Condition { // We have 4 different states here: // 1. Timed out -> Failed // 2. All tasks are done and at least one has failed or has been cancelled -> Failed @@ -193,12 +212,12 @@ func (state PipelineRunState) GetPipelineConditionStatus(pr *v1beta1.PipelineRun // - Some successful, some skipped: ReasonCompleted // - Some cancelled, none failed: ReasonCancelled // - At least one failed: ReasonFailed - for _, rprt := range state { + for _, rprt := range facts.State { allTasks = append(allTasks, rprt.PipelineTask.Name) switch { case rprt.IsSuccessful(): withStatusTasks = append(withStatusTasks, rprt.PipelineTask.Name) - case rprt.Skip(state, dag): + case rprt.Skip(facts): withStatusTasks = append(withStatusTasks, rprt.PipelineTask.Name) skipTasks = append(skipTasks, v1beta1.SkippedTask{Name: rprt.PipelineTask.Name}) // At least one is skipped and no failure yet, mark as completed @@ -237,7 +256,7 @@ func (state PipelineRunState) GetPipelineConditionStatus(pr *v1beta1.PipelineRun // transition pipeline into stopping state when one of the tasks(dag/final) cancelled or one of the dag tasks failed // for a pipeline with final tasks, single dag task failure does not transition to interim stopping state // pipeline stays in running state until all final tasks are done before transitioning to failed state - if cancelledTasks > 0 || (failedTasks > 0 && state.checkTasksDone(dfinally)) { + if cancelledTasks > 0 || (failedTasks > 0 && facts.CheckFinalTasksDone()) { reason = v1beta1.PipelineRunReasonStopping.String() } else { reason = v1beta1.PipelineRunReasonRunning.String() @@ -251,10 +270,10 @@ func (state PipelineRunState) GetPipelineConditionStatus(pr *v1beta1.PipelineRun } } -func (state PipelineRunState) GetSkippedTasks(pr *v1beta1.PipelineRun, d *dag.Graph) []v1beta1.SkippedTask { +func (facts *PipelineRunFacts) GetSkippedTasks() []v1beta1.SkippedTask { skipped := []v1beta1.SkippedTask{} - for _, rprt := range state { - if rprt.Skip(state, d) { + for _, rprt := range facts.State { + if rprt.Skip(facts) { skipped = append(skipped, v1beta1.SkippedTask{Name: rprt.PipelineTask.Name}) } } @@ -310,6 +329,20 @@ func (state PipelineRunState) GetTaskRunsStatus(pr *v1beta1.PipelineRun) map[str return status } +func (facts *PipelineRunFacts) isDAGTask(pipelineTaskName string) bool { + if _, ok := facts.TasksGraph.Nodes[pipelineTaskName]; ok { + return true + } + return false +} + +func (facts *PipelineRunFacts) isFinalTask(pipelineTaskName string) bool { + if _, ok := facts.FinalTasksGraph.Nodes[pipelineTaskName]; ok { + return true + } + return false +} + // Check if a PipelineTask belongs to the specified Graph func isTaskInGraph(pipelineTaskName string, d *dag.Graph) bool { if _, ok := d.Nodes[pipelineTaskName]; ok { diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go index 2a0e15f5d63..6c911a1b2e3 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go @@ -34,8 +34,7 @@ import ( duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) -func TestIsDone(t *testing.T) { - +func TestPipelineRunFacts_CheckDAGTasksDoneDone(t *testing.T) { var taskCancelledByStatusState = PipelineRunState{{ PipelineTask: &pts[4], // 2 retries needed TaskRunName: "pipelinerun-mytask1", @@ -90,15 +89,6 @@ func TestIsDone(t *testing.T) { }, }} - var noPipelineTaskState = PipelineRunState{{ - PipelineTask: nil, - TaskRunName: "pipelinerun-mytask1", - TaskRun: withRetries(makeFailed(trs[0])), - ResolvedTaskResources: &resources.ResolvedTaskResources{ - TaskSpec: &task.Spec, - }, - }} - var noTaskRunState = PipelineRunState{{ PipelineTask: &pts[4], // 2 retries needed TaskRunName: "pipelinerun-mytask1", @@ -143,11 +133,6 @@ func TestIsDone(t *testing.T) { state: taskExpectedState, expected: false, ptExpected: []bool{false}, - }, { - name: "no-pipelineTask", - state: noPipelineTaskState, - expected: false, - ptExpected: []bool{false}, }, { name: "No-taskrun", state: noTaskRunState, @@ -157,15 +142,24 @@ func TestIsDone(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { + d, err := DagFromState(tc.state) + if err != nil { + t.Fatalf("Unexpected error while buildig DAG for state %v: %v", tc.state, err) + } + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: d, + FinalTasksGraph: &dag.Graph{}, + } - isDone := tc.state.IsDone() + isDone := facts.checkTasksDone(d) if d := cmp.Diff(isDone, tc.expected); d != "" { t.Errorf("Didn't get expected IsDone %s", diff.PrintWantGot(d)) } for i, pt := range tc.state { isDone = pt.IsDone() if d := cmp.Diff(isDone, tc.ptExpected[i]); d != "" { - t.Errorf("Didn't get expected (ResolvedPipelineRunTask) IsDone %s", diff.PrintWantGot(d)) + t.Errorf("Didn't get expected (ResolvedPipelineRunTask) checkTasksDone %s", diff.PrintWantGot(d)) } } @@ -463,11 +457,16 @@ func TestPipelineRunState_SuccessfulOrSkippedDAGTasks(t *testing.T) { }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - dag, err := dagFromState(tc.state) + d, err := dagFromState(tc.state) if err != nil { t.Fatalf("Unexpected error while buildig DAG for state %v: %v", tc.state, err) } - names := tc.state.SuccessfulOrSkippedDAGTasks(dag) + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: d, + FinalTasksGraph: &dag.Graph{}, + } + names := facts.SuccessfulOrSkippedDAGTasks() if d := cmp.Diff(names, tc.expectedNames); d != "" { t.Errorf("Expected to get completed names %v but got something different %s", tc.expectedNames, diff.PrintWantGot(d)) } @@ -482,7 +481,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state PipelineRunState DAGTasks []v1beta1.PipelineTask finalTasks []v1beta1.PipelineTask - expectedFinalTasks []*ResolvedPipelineRunTask + expectedFinalTasks PipelineRunState }{{ // tasks: [ mytask1, mytask2] // none finally @@ -492,7 +491,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: oneStartedState, DAGTasks: []v1beta1.PipelineTask{pts[0], pts[1]}, finalTasks: []v1beta1.PipelineTask{}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }, { // tasks: [ mytask1] // finally: [mytask2] @@ -501,7 +500,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: noneStartedState, DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }, { // tasks: [ mytask1] // finally: [mytask2] @@ -510,7 +509,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: oneStartedState, DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }, { // tasks: [ mytask1] // finally: [mytask2] @@ -519,7 +518,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: oneFinishedState, DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{oneFinishedState[1]}, + expectedFinalTasks: PipelineRunState{oneFinishedState[1]}, }, { // tasks: [ mytask1] // finally: [mytask2] @@ -528,7 +527,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: oneFailedState, DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{oneFinishedState[1]}, + expectedFinalTasks: PipelineRunState{oneFinishedState[1]}, }, { // tasks: [ mytask6 with condition] // finally: [mytask2] @@ -537,7 +536,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(conditionCheckStartedState, noneStartedState[0]), DAGTasks: []v1beta1.PipelineTask{pts[5]}, finalTasks: []v1beta1.PipelineTask{pts[0]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }, { // tasks: [ mytask6 with condition] // finally: [mytask2] @@ -546,7 +545,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(conditionCheckSuccessNoTaskStartedState, noneStartedState[0]), DAGTasks: []v1beta1.PipelineTask{pts[5]}, finalTasks: []v1beta1.PipelineTask{pts[0]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }, { // tasks: [ mytask6 with condition] // finally: [mytask2] @@ -555,7 +554,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(conditionCheckFailedWithNoOtherTasksState, noneStartedState[0]), DAGTasks: []v1beta1.PipelineTask{pts[5]}, finalTasks: []v1beta1.PipelineTask{pts[0]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[0]}, + expectedFinalTasks: PipelineRunState{noneStartedState[0]}, }, { // tasks: [ mytask1, mytask6 with condition] // finally: [mytask2] @@ -564,7 +563,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(conditionCheckFailedWithOthersPassedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[5], pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { // tasks: [ mytask1, mytask6 with condition] // finally: [mytask2] @@ -573,7 +572,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(conditionCheckFailedWithOthersFailedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[5], pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { // tasks: [ mytask6 with condition, mytask7 runAfter mytask6] // finally: [mytask2] @@ -582,7 +581,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(taskWithParentSkippedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[5], pts[6]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { // tasks: [ mytask1, mytask6 with condition, mytask8 runAfter mytask6] // finally: [mytask2] @@ -591,7 +590,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(taskWithMultipleParentsSkippedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[0], pts[5], pts[7]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { // tasks: [ mytask1, mytask6 with condition, mytask8 runAfter mytask6, mytask9 runAfter mytask1 and mytask6] // finally: [mytask2] @@ -601,7 +600,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(taskWithGrandParentSkippedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[0], pts[5], pts[7], pts[8]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { //tasks: [ mytask1, mytask6 with condition, mytask8 runAfter mytask6, mytask9 runAfter mytask1 and mytask6] //finally: [mytask2] @@ -611,7 +610,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(taskWithGrandParentsOneFailedState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[0], pts[5], pts[7], pts[8]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{noneStartedState[1]}, + expectedFinalTasks: PipelineRunState{noneStartedState[1]}, }, { //tasks: [ mytask1, mytask6 with condition, mytask8 runAfter mytask6, mytask9 runAfter mytask1 and mytask6] //finally: [mytask2] @@ -620,7 +619,7 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { state: append(taskWithGrandParentsOneNotRunState, noneStartedState[1]), DAGTasks: []v1beta1.PipelineTask{pts[0], pts[5], pts[7], pts[8]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, - expectedFinalTasks: []*ResolvedPipelineRunTask{}, + expectedFinalTasks: PipelineRunState{}, }} for _, tc := range tcs { dagGraph, err := dag.Build(v1beta1.PipelineTaskList(tc.DAGTasks)) @@ -632,7 +631,12 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { t.Fatalf("Unexpected error while buildig DAG for final pipelineTasks %v: %v", tc.finalTasks, err) } t.Run(tc.name, func(t *testing.T) { - next := tc.state.GetFinalTasks(dagGraph, finalGraph) + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: dagGraph, + FinalTasksGraph: finalGraph, + } + next := facts.GetFinalTasks() if d := cmp.Diff(tc.expectedFinalTasks, next); d != "" { t.Errorf("Didn't get expected final Tasks for %s (%s): %s", tc.name, tc.desc, diff.PrintWantGot(d)) } @@ -882,7 +886,12 @@ func TestGetPipelineConditionStatus(t *testing.T) { if err != nil { t.Fatalf("Unexpected error while buildig DAG for state %v: %v", tc.state, err) } - c := tc.state.GetPipelineConditionStatus(pr, zap.NewNop().Sugar(), d, &dag.Graph{}) + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: d, + FinalTasksGraph: &dag.Graph{}, + } + c := facts.GetPipelineConditionStatus(pr, zap.NewNop().Sugar()) wantCondition := &apis.Condition{ Type: apis.ConditionSucceeded, Status: tc.expectedStatus, @@ -993,7 +1002,12 @@ func TestGetPipelineConditionStatus_WithFinalTasks(t *testing.T) { if err != nil { t.Fatalf("Unexpected error while buildig graph for final tasks %v: %v", tc.finalTasks, err) } - c := tc.state.GetPipelineConditionStatus(pr, zap.NewNop().Sugar(), d, df) + facts := PipelineRunFacts{ + State: tc.state, + TasksGraph: d, + FinalTasksGraph: df, + } + c := facts.GetPipelineConditionStatus(pr, zap.NewNop().Sugar()) wantCondition := &apis.Condition{ Type: apis.ConditionSucceeded, Status: tc.expectedStatus, @@ -1025,7 +1039,12 @@ func TestGetPipelineConditionStatus_PipelineTimeouts(t *testing.T) { }, }, } - c := oneFinishedState.GetPipelineConditionStatus(pr, zap.NewNop().Sugar(), d, &dag.Graph{}) + facts := PipelineRunFacts{ + State: oneFinishedState, + TasksGraph: d, + FinalTasksGraph: &dag.Graph{}, + } + c := facts.GetPipelineConditionStatus(pr, zap.NewNop().Sugar()) if c.Status != corev1.ConditionFalse && c.Reason != v1beta1.PipelineRunReasonTimedOut.String() { t.Fatalf("Expected to get status %s but got %s for state %v", corev1.ConditionFalse, c.Status, oneFinishedState) } diff --git a/pkg/reconciler/taskrun/resources/apply.go b/pkg/reconciler/taskrun/resources/apply.go index 1cd29944a09..52da59addb5 100644 --- a/pkg/reconciler/taskrun/resources/apply.go +++ b/pkg/reconciler/taskrun/resources/apply.go @@ -20,7 +20,8 @@ import ( "fmt" "path/filepath" - "github.com/tektoncd/pipeline/pkg/workspace" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -108,24 +109,36 @@ func ApplyContexts(spec *v1beta1.TaskSpec, rtr *ResolvedTaskResources, tr *v1bet return ApplyReplacements(spec, replacements, map[string][]string{}) } -// ApplyWorkspaces applies the substitution from paths that the workspaces in w are mounted to, the -// volumes that wb are realized with in the task spec ts and the PersistentVolumeClaim names for the +// ApplyWorkspaces applies the substitution from paths that the workspaces in declarations mounted to, the +// volumes that bindings are realized with in the task spec and the PersistentVolumeClaim names for the // workspaces. -func ApplyWorkspaces(spec *v1beta1.TaskSpec, w []v1beta1.WorkspaceDeclaration, wb []v1beta1.WorkspaceBinding) *v1beta1.TaskSpec { +func ApplyWorkspaces(spec *v1beta1.TaskSpec, declarations []v1beta1.WorkspaceDeclaration, bindings []v1beta1.WorkspaceBinding, vols map[string]corev1.Volume) *v1beta1.TaskSpec { stringReplacements := map[string]string{} - for _, ww := range w { - stringReplacements[fmt.Sprintf("workspaces.%s.path", ww.Name)] = ww.GetMountPath() + bindNames := sets.NewString() + for _, binding := range bindings { + bindNames.Insert(binding.Name) } - v := workspace.GetVolumes(wb) - for name, vv := range v { - stringReplacements[fmt.Sprintf("workspaces.%s.volume", name)] = vv.Name + + for _, declaration := range declarations { + prefix := fmt.Sprintf("workspaces.%s.", declaration.Name) + if declaration.Optional && !bindNames.Has(declaration.Name) { + stringReplacements[prefix+"bound"] = "false" + stringReplacements[prefix+"path"] = "" + } else { + stringReplacements[prefix+"bound"] = "true" + stringReplacements[prefix+"path"] = declaration.GetMountPath() + } + } + + for name, vol := range vols { + stringReplacements[fmt.Sprintf("workspaces.%s.volume", name)] = vol.Name } - for _, w := range wb { - if w.PersistentVolumeClaim != nil { - stringReplacements[fmt.Sprintf("workspaces.%s.claim", w.Name)] = w.PersistentVolumeClaim.ClaimName + for _, binding := range bindings { + if binding.PersistentVolumeClaim != nil { + stringReplacements[fmt.Sprintf("workspaces.%s.claim", binding.Name)] = binding.PersistentVolumeClaim.ClaimName } else { - stringReplacements[fmt.Sprintf("workspaces.%s.claim", w.Name)] = "" + stringReplacements[fmt.Sprintf("workspaces.%s.claim", binding.Name)] = "" } } return ApplyReplacements(spec, stringReplacements, map[string][]string{}) @@ -214,7 +227,7 @@ func ApplyReplacements(spec *v1beta1.TaskSpec, stringReplacements map[string]str // Apply variable substitution to the sidecar definitions sidecars := spec.Sidecars for i := range sidecars { - v1beta1.ApplyContainerReplacements(&sidecars[i].Container, stringReplacements, arrayReplacements) + v1beta1.ApplySidecarReplacements(&sidecars[i], stringReplacements, arrayReplacements) } return spec diff --git a/pkg/reconciler/taskrun/resources/apply_test.go b/pkg/reconciler/taskrun/resources/apply_test.go index b6f096ea265..5de99c1e989 100644 --- a/pkg/reconciler/taskrun/resources/apply_test.go +++ b/pkg/reconciler/taskrun/resources/apply_test.go @@ -25,6 +25,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/resource" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" + "github.com/tektoncd/pipeline/pkg/workspace" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" @@ -39,7 +40,7 @@ var ( CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer-image:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-tbs/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", @@ -708,49 +709,92 @@ func TestApplyWorkspaces(t *testing.T) { }, }}, } - want := applyMutation(ts, func(spec *v1beta1.TaskSpec) { - spec.StepTemplate.Env[0].Value = "ws-9l9zj" - spec.StepTemplate.Env[1].Value = "foo" - spec.StepTemplate.Env[2].Value = "" + for _, tc := range []struct { + name string + spec *v1beta1.TaskSpec + decls []v1beta1.WorkspaceDeclaration + binds []v1beta1.WorkspaceBinding + want *v1beta1.TaskSpec + }{{ + name: "workspace-variable-replacement", + spec: ts.DeepCopy(), + decls: []v1beta1.WorkspaceDeclaration{{ + Name: "myws", + }, { + Name: "otherws", + MountPath: "/foo", + }}, + binds: []v1beta1.WorkspaceBinding{{ + Name: "myws", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "foo", + }, + }, { + Name: "otherws", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + want: applyMutation(ts, func(spec *v1beta1.TaskSpec) { + spec.StepTemplate.Env[0].Value = "ws-9l9zj" + spec.StepTemplate.Env[1].Value = "foo" + spec.StepTemplate.Env[2].Value = "" - spec.Steps[0].Name = "ws-9l9zj" - spec.Steps[0].Image = "ws-mz4c7" - spec.Steps[0].WorkingDir = "ws-mz4c7" - spec.Steps[0].Args = []string{"/workspace/myws"} + spec.Steps[0].Name = "ws-9l9zj" + spec.Steps[0].Image = "ws-mz4c7" + spec.Steps[0].WorkingDir = "ws-mz4c7" + spec.Steps[0].Args = []string{"/workspace/myws"} - spec.Steps[1].VolumeMounts[0].Name = "ws-9l9zj" - spec.Steps[1].VolumeMounts[0].MountPath = "path/to//foo" - spec.Steps[1].VolumeMounts[0].SubPath = "ws-9l9zj" + spec.Steps[1].VolumeMounts[0].Name = "ws-9l9zj" + spec.Steps[1].VolumeMounts[0].MountPath = "path/to//foo" + spec.Steps[1].VolumeMounts[0].SubPath = "ws-9l9zj" - spec.Steps[2].Env[0].Value = "ws-9l9zj" - spec.Steps[2].Env[1].ValueFrom.SecretKeyRef.LocalObjectReference.Name = "ws-9l9zj" - spec.Steps[2].Env[1].ValueFrom.SecretKeyRef.Key = "ws-9l9zj" - spec.Steps[2].EnvFrom[0].Prefix = "ws-9l9zj" - spec.Steps[2].EnvFrom[0].ConfigMapRef.LocalObjectReference.Name = "ws-9l9zj" + spec.Steps[2].Env[0].Value = "ws-9l9zj" + spec.Steps[2].Env[1].ValueFrom.SecretKeyRef.LocalObjectReference.Name = "ws-9l9zj" + spec.Steps[2].Env[1].ValueFrom.SecretKeyRef.Key = "ws-9l9zj" + spec.Steps[2].EnvFrom[0].Prefix = "ws-9l9zj" + spec.Steps[2].EnvFrom[0].ConfigMapRef.LocalObjectReference.Name = "ws-9l9zj" - spec.Volumes[0].Name = "ws-9l9zj" - spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name = "ws-9l9zj" - spec.Volumes[1].VolumeSource.Secret.SecretName = "ws-9l9zj" - spec.Volumes[2].VolumeSource.PersistentVolumeClaim.ClaimName = "ws-9l9zj" - }) - w := []v1beta1.WorkspaceDeclaration{{ - Name: "myws", + spec.Volumes[0].Name = "ws-9l9zj" + spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name = "ws-9l9zj" + spec.Volumes[1].VolumeSource.Secret.SecretName = "ws-9l9zj" + spec.Volumes[2].VolumeSource.PersistentVolumeClaim.ClaimName = "ws-9l9zj" + }), }, { - Name: "otherws", - MountPath: "/foo", - }} - wb := []v1beta1.WorkspaceBinding{{ - Name: "myws", - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "foo", - }, + name: "optional-workspace-provided-variable-replacement", + spec: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{ + Script: `test "$(workspaces.ows.bound)" = "true" && echo "$(workspaces.ows.path)"`, + }}}, + decls: []v1beta1.WorkspaceDeclaration{{ + Name: "ows", + Optional: true, + }}, + binds: []v1beta1.WorkspaceBinding{{ + Name: "ows", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + want: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{ + Script: `test "true" = "true" && echo "/workspace/ows"`, + }}}, }, { - Name: "otherws", - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }} - got := resources.ApplyWorkspaces(ts, w, wb) - if d := cmp.Diff(want, got); d != "" { - t.Errorf("TestApplyWorkspaces() got diff %s", diff.PrintWantGot(d)) + name: "optional-workspace-omitted-variable-replacement", + spec: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{ + Script: `test "$(workspaces.ows.bound)" = "true" && echo "$(workspaces.ows.path)"`, + }}}, + decls: []v1beta1.WorkspaceDeclaration{{ + Name: "ows", + Optional: true, + }}, + binds: []v1beta1.WorkspaceBinding{}, // intentionally omitted ows binding + want: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{ + Script: `test "false" = "true" && echo ""`, + }}}, + }} { + t.Run(tc.name, func(t *testing.T) { + vols := workspace.CreateVolumes(tc.binds) + got := resources.ApplyWorkspaces(tc.spec, tc.decls, tc.binds, vols) + if d := cmp.Diff(tc.want, got); d != "" { + t.Errorf("TestApplyWorkspaces() got diff %s", diff.PrintWantGot(d)) + } + }) } } diff --git a/pkg/reconciler/taskrun/resources/input_resource_test.go b/pkg/reconciler/taskrun/resources/input_resource_test.go index 668fa6311a8..fe6c6646df8 100644 --- a/pkg/reconciler/taskrun/resources/input_resource_test.go +++ b/pkg/reconciler/taskrun/resources/input_resource_test.go @@ -41,7 +41,7 @@ var ( CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", @@ -683,7 +683,7 @@ gsutil cp gs://fake-bucket/rules.zip /workspace/gcs-dir `, Container: corev1.Container{ Name: "fetch-storage1-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, }}, Resources: &v1beta1.TaskResources{ @@ -1075,7 +1075,7 @@ gsutil cp gs://fake-bucket/rules.zip /workspace/gcs-input-resource `, Container: corev1.Container{ Name: "fetch-gcs-input-resource-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", }, }}, Resources: &v1beta1.TaskResources{ @@ -1145,7 +1145,7 @@ gsutil rsync -d -r gs://fake-bucket/rules.zip /workspace/gcs-input-resource `, Container: corev1.Container{ Name: "fetch-storage-gcs-keys-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: []corev1.VolumeMount{ {Name: "volume-storage-gcs-keys-secret-name", MountPath: "/var/secret/secret-name"}, }, @@ -1300,7 +1300,7 @@ func TestAddStepsToTaskWithBucketFromConfigMap(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/gitspace"}, }}, {Container: corev1.Container{ Name: "artifact-copy-from-gitspace-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/prev-task-path/*", "/workspace/gitspace"}, Env: gcsEnv, @@ -1344,7 +1344,7 @@ func TestAddStepsToTaskWithBucketFromConfigMap(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/gcs-dir"}, }}, {Container: corev1.Container{ Name: "artifact-copy-from-workspace-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/prev-task-path/*", "/workspace/gcs-dir"}, Env: gcsEnv, @@ -1396,7 +1396,7 @@ func TestAddStepsToTaskWithBucketFromConfigMap(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/gcs-dir"}, }}, {Container: corev1.Container{ Name: "artifact-copy-from-workspace-l22wn", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/prev-task-path/*", "/workspace/gcs-dir"}, Env: gcsEnv, @@ -1407,7 +1407,7 @@ func TestAddStepsToTaskWithBucketFromConfigMap(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/gcs-dir"}, }}, {Container: corev1.Container{ Name: "artifact-copy-from-workspace2-j2tds", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/prev-task-path2/*", "/workspace/gcs-dir"}, Env: gcsEnv, diff --git a/pkg/reconciler/taskrun/resources/output_resource_test.go b/pkg/reconciler/taskrun/resources/output_resource_test.go index 40bb83f6fa2..f9ac07c487e 100644 --- a/pkg/reconciler/taskrun/resources/output_resource_test.go +++ b/pkg/reconciler/taskrun/resources/output_resource_test.go @@ -486,7 +486,7 @@ func TestValidOutputResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: []corev1.VolumeMount{{ Name: "volume-source-gcs-sname", MountPath: "/var/secret/sname", @@ -576,7 +576,7 @@ func TestValidOutputResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: []corev1.VolumeMount{{ Name: "volume-source-gcs-sname", MountPath: "/var/secret/sname", }}, @@ -647,7 +647,7 @@ func TestValidOutputResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: []corev1.VolumeMount{{ Name: "volume-source-gcs-sname", MountPath: "/var/secret/sname", }}, @@ -708,7 +708,7 @@ func TestValidOutputResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: []corev1.VolumeMount{{ Name: "volume-source-gcs-sname", MountPath: "/var/secret/sname", }}, @@ -1000,7 +1000,7 @@ func TestValidOutputResourcesWithBucketStorage(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/output/source-workspace"}, }}, {Container: corev1.Container{ Name: "artifact-copy-to-source-git-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "/workspace/output/source-workspace", "gs://fake-bucket/pipeline-task-name"}, }}}, @@ -1051,7 +1051,7 @@ func TestValidOutputResourcesWithBucketStorage(t *testing.T) { Command: []string{"mkdir", "-p", "/workspace/output/source-workspace"}, }}, {Container: corev1.Container{ Name: "artifact-copy-to-source-git-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "/workspace/output/source-workspace", "gs://fake-bucket/pipeline-task-name"}, }}}, @@ -1448,7 +1448,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "artifact-copy-from-source-workspace-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{ "cp", @@ -1467,7 +1467,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-bucket-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: nil, Command: []string{"gsutil"}, Args: []string{"rsync", "-d", "-r", "/workspace/output/source-workspace", "gs://some-bucket"}, @@ -1562,7 +1562,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "artifact-copy-from-source-workspace-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{ "cp", @@ -1588,7 +1588,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "artifact-copy-from-source-workspace-2-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"cp", "-P", "-r", "gs://fake-bucket/pipeline-task-path-2/*", "/workspace/faraway-disk-2"}, Env: []corev1.EnvVar{ @@ -1600,7 +1600,7 @@ func TestInputOutputBucketResources(t *testing.T) { VolumeMounts: []corev1.VolumeMount{{Name: "volume-bucket-sname", MountPath: "/var/bucketsecret/sname"}}, }}, {Container: corev1.Container{ Name: "upload-source-gcs-bucket-3-j2tds", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{"rsync", "-d", "-r", "/workspace/output/source-workspace-3", "gs://some-bucket-3"}, }}, @@ -1677,7 +1677,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-bucket-mz4c7", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", Command: []string{"gsutil"}, Args: []string{ "rsync", @@ -1689,7 +1689,7 @@ func TestInputOutputBucketResources(t *testing.T) { }}, {Container: corev1.Container{ Name: "upload-source-gcs-bucket-2-78c5n", - Image: "google/cloud-sdk", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk", VolumeMounts: nil, Command: []string{"gsutil"}, Args: []string{"rsync", "-d", "-r", "/workspace/output/source-workspace-2", "gs://some-bucket-2"}, diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 4edda8897ee..3f7d3193de4 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -41,7 +41,6 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" - "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/pkg/timeout" "github.com/tektoncd/pipeline/pkg/workspace" corev1 "k8s.io/api/core/v1" @@ -123,6 +122,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg c.timeoutHandler.Release(tr.GetNamespacedName()) pod, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) if err == nil { + logger.Debugf("Stopping sidecars for TaskRun %q of Pod %q", tr.Name, tr.Status.PodName) err = podconvert.StopSidecars(c.Images.NopImage, c.KubeClientSet, *pod) if err == nil { // Check if any SidecarStatuses are still shown as Running after stopping @@ -189,8 +189,9 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // Reconcile this copy of the task run and then write back any status // updates regardless of whether the reconciliation errored out. if err = c.reconcile(ctx, tr, taskSpec, rtr); err != nil { - logger.Errorf("Reconcile error: %v", err.Error()) + logger.Errorf("Reconcile: %v", err.Error()) } + // Emit events (only when ConditionSucceeded was changed) return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err) } @@ -387,6 +388,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, } go c.timeoutHandler.Wait(tr.GetNamespacedName(), *tr.Status.StartTime, *tr.Spec.Timeout) } + if err := c.tracker.Track(tr.GetBuildPodRef(), tr); err != nil { logger.Errorf("Failed to create tracker for build pod %q for taskrun %q: %v", tr.Name, tr.Name, err) return err @@ -403,9 +405,8 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, } // Convert the Pod's status to the equivalent TaskRun Status. - tr.Status = podconvert.MakeTaskRunStatus(logger, *tr, pod, *taskSpec) - - if err := updateTaskRunResourceResult(tr, *pod); err != nil { + tr.Status, err = podconvert.MakeTaskRunStatus(logger, *tr, pod) + if err != nil { return err } @@ -423,14 +424,16 @@ func (c *Reconciler) updateTaskRunWithDefaultWorkspaces(ctx context.Context, tr } workspaceBindings := map[string]v1beta1.WorkspaceBinding{} for _, tsWorkspace := range taskSpec.Workspaces { - workspaceBindings[tsWorkspace.Name] = v1beta1.WorkspaceBinding{ - Name: tsWorkspace.Name, - SubPath: defaultWS.SubPath, - VolumeClaimTemplate: defaultWS.VolumeClaimTemplate, - PersistentVolumeClaim: defaultWS.PersistentVolumeClaim, - EmptyDir: defaultWS.EmptyDir, - ConfigMap: defaultWS.ConfigMap, - Secret: defaultWS.Secret, + if !tsWorkspace.Optional { + workspaceBindings[tsWorkspace.Name] = v1beta1.WorkspaceBinding{ + Name: tsWorkspace.Name, + SubPath: defaultWS.SubPath, + VolumeClaimTemplate: defaultWS.VolumeClaimTemplate, + PersistentVolumeClaim: defaultWS.PersistentVolumeClaim, + EmptyDir: defaultWS.EmptyDir, + ConfigMap: defaultWS.ConfigMap, + Secret: defaultWS.Secret, + } } } @@ -464,34 +467,35 @@ func (c *Reconciler) updateLabelsAndAnnotations(tr *v1beta1.TaskRun) (*v1beta1.T } func (c *Reconciler) handlePodCreationError(ctx context.Context, tr *v1beta1.TaskRun, err error) error { - var msg string - if isExceededResourceQuotaError(err) { + switch { + case isExceededResourceQuotaError(err): backoff, currentlyBackingOff := c.timeoutHandler.GetBackoff(tr.GetNamespacedName(), *tr.Status.StartTime, *tr.Spec.Timeout) if !currentlyBackingOff { go c.timeoutHandler.SetTimer(tr.GetNamespacedName(), time.Until(backoff.NextAttempt)) } - msg = fmt.Sprintf("TaskRun Pod exceeded available resources, reattempted %d times", backoff.NumAttempts) + msg := fmt.Sprintf("TaskRun Pod exceeded available resources, reattempted %d times", backoff.NumAttempts) tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown, Reason: podconvert.ReasonExceededResourceQuota, Message: fmt.Sprintf("%s: %v", msg, err), }) - // return a transient error, so that the key is requeued - return err - } - // The pod creation failed, not because of quota issues. The most likely - // reason is that something is wrong with the spec of the Task, that we could - // not check with validation before - i.e. pod template fields - msg = fmt.Sprintf("failed to create task run pod %q: %v. Maybe ", tr.Name, err) - if tr.Spec.TaskRef != nil { - msg += fmt.Sprintf("missing or invalid Task %s/%s", tr.Namespace, tr.Spec.TaskRef.Name) - } else { - msg += "invalid TaskSpec" + case isTaskRunValidationFailed(err): + tr.Status.MarkResourceFailed(podconvert.ReasonFailedValidation, err) + default: + // The pod creation failed with unknown reason. The most likely + // reason is that something is wrong with the spec of the Task, that we could + // not check with validation before - i.e. pod template fields + msg := fmt.Sprintf("failed to create task run pod %q: %v. Maybe ", tr.Name, err) + if tr.Spec.TaskRef != nil { + msg += fmt.Sprintf("missing or invalid Task %s/%s", tr.Namespace, tr.Spec.TaskRef.Name) + } else { + msg += "invalid TaskSpec" + } + err = controller.NewPermanentError(errors.New(msg)) + tr.Status.MarkResourceFailed(podconvert.ReasonCouldntGetTask, err) } - newErr := controller.NewPermanentError(errors.New(msg)) - tr.Status.MarkResourceFailed(podconvert.ReasonCouldntGetTask, newErr) - return newErr + return err } // failTaskRun stops a TaskRun with the provided Reason @@ -501,7 +505,7 @@ func (c *Reconciler) handlePodCreationError(ctx context.Context, tr *v1beta1.Tas func (c *Reconciler) failTaskRun(ctx context.Context, tr *v1beta1.TaskRun, reason v1beta1.TaskRunReason, message string) error { logger := logging.FromContext(ctx) - logger.Warn("stopping task run %q because of %q", tr.Name, reason) + logger.Warnf("stopping task run %q because of %q", tr.Name, reason) tr.Status.MarkResourceFailed(reason, errors.New(message)) completionTime := metav1.Time{Time: time.Now()} @@ -599,13 +603,16 @@ func (c *Reconciler) createPod(ctx context.Context, tr *v1beta1.TaskRun, rtr *re ts = resources.ApplyResources(ts, inputResources, "inputs") ts = resources.ApplyResources(ts, outputResources, "outputs") + // Get the randomized volume names assigned to workspace bindings + workspaceVolumes := workspace.CreateVolumes(tr.Spec.Workspaces) + // Apply workspace resource substitution - ts = resources.ApplyWorkspaces(ts, ts.Workspaces, tr.Spec.Workspaces) + ts = resources.ApplyWorkspaces(ts, ts.Workspaces, tr.Spec.Workspaces, workspaceVolumes) // Apply task result substitution ts = resources.ApplyTaskResults(ts) - ts, err = workspace.Apply(*ts, tr.Spec.Workspaces) + ts, err = workspace.Apply(*ts, tr.Spec.Workspaces, workspaceVolumes) if err != nil { logger.Errorf("Failed to create a pod for taskrun: %s due to workspace error %v", tr.Name, err) return nil, err @@ -628,71 +635,25 @@ func (c *Reconciler) createPod(ctx context.Context, tr *v1beta1.TaskRun, rtr *re return nil, fmt.Errorf("translating TaskSpec to Pod: %w", err) } - return c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(pod) -} - -type DeletePod func(podName string, options *metav1.DeleteOptions) error - -func updateTaskRunResourceResult(taskRun *v1beta1.TaskRun, pod corev1.Pod) error { - podconvert.SortContainerStatuses(&pod) - - if taskRun.IsSuccessful() { - for idx, cs := range pod.Status.ContainerStatuses { - if cs.State.Terminated != nil { - msg := cs.State.Terminated.Message - r, err := termination.ParseMessage(msg) - if err != nil { - return fmt.Errorf("parsing message for container status %d: %v", idx, err) - } - taskResults, pipelineResourceResults := getResults(r) - taskRun.Status.TaskRunResults = append(taskRun.Status.TaskRunResults, taskResults...) - taskRun.Status.ResourcesResult = append(taskRun.Status.ResourcesResult, pipelineResourceResults...) - } + pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(pod) + if err == nil && willOverwritePodSetAffinity(tr) { + if recorder := controller.GetEventRecorder(ctx); recorder != nil { + recorder.Eventf(tr, corev1.EventTypeWarning, "PodAffinityOverwrite", "Pod template affinity is overwritten by affinity assistant for pod %q", pod.Name) } - taskRun.Status.TaskRunResults = removeDuplicateResults(taskRun.Status.TaskRunResults) } - return nil + return pod, err } -func getResults(results []v1beta1.PipelineResourceResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult) { - var taskResults []v1beta1.TaskRunResult - var pipelineResourceResults []v1beta1.PipelineResourceResult - for _, r := range results { - switch r.ResultType { - case v1beta1.TaskRunResultType: - taskRunResult := v1beta1.TaskRunResult{ - Name: r.Key, - Value: r.Value, - } - taskResults = append(taskResults, taskRunResult) - case v1beta1.PipelineResourceResultType: - fallthrough - default: - pipelineResourceResults = append(pipelineResourceResults, r) - } - } - return taskResults, pipelineResourceResults -} - -func removeDuplicateResults(taskRunResult []v1beta1.TaskRunResult) []v1beta1.TaskRunResult { - uniq := make([]v1beta1.TaskRunResult, 0) - latest := make(map[string]v1beta1.TaskRunResult, 0) - for _, res := range taskRunResult { - if _, seen := latest[res.Name]; !seen { - uniq = append(uniq, res) - } - latest[res.Name] = res - } - for i, res := range uniq { - uniq[i] = latest[res.Name] - } - return uniq -} +type DeletePod func(podName string, options *metav1.DeleteOptions) error func isExceededResourceQuotaError(err error) bool { return err != nil && k8serrors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") } +func isTaskRunValidationFailed(err error) bool { + return err != nil && strings.Contains(err.Error(), "TaskRun validation failed") +} + // resourceImplBinding maps pipeline resource names to the actual resource type implementations func resourceImplBinding(resources map[string]*resourcev1alpha1.PipelineResource, images pipeline.Images) (map[string]v1beta1.PipelineResourceInterface, error) { p := make(map[string]v1beta1.PipelineResourceInterface) @@ -781,3 +742,13 @@ func storeTaskSpec(ctx context.Context, tr *v1beta1.TaskRun, ts *v1beta1.TaskSpe } return nil } + +// willOverwritePodSetAffinity returns a bool indicating whether the +// affinity for pods will be overwritten with affinity assistant. +func willOverwritePodSetAffinity(taskRun *v1beta1.TaskRun) bool { + var podTemplate v1beta1.PodTemplate + if taskRun.Spec.PodTemplate != nil { + podTemplate = *taskRun.Spec.PodTemplate + } + return taskRun.Annotations[workspace.AnnotationAffinityAssistantName] != "" && podTemplate.Affinity != nil +} diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index c14510d168d..3975e2b4bb5 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -53,7 +53,6 @@ import ( ktesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" "knative.dev/pkg/apis" - duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" "knative.dev/pkg/logging" @@ -78,7 +77,7 @@ var ( CredsImage: "override-with-creds:latest", KubeconfigWriterImage: "override-with-kubeconfig-writer:latest", ShellImage: "busybox", - GsutilImage: "google/cloud-sdk", + GsutilImage: "gcr.io/google.com/cloudsdktool/cloud-sdk", BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest", PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", @@ -221,7 +220,7 @@ var ( getPlaceToolsInitContainer = func(ops ...tb.ContainerOp) tb.PodSpecOp { actualOps := []tb.ContainerOp{ - tb.Command("cp", "/ko-app/entrypoint", entrypointLocation), + tb.Command("/ko-app/entrypoint", "cp", "/ko-app/entrypoint", entrypointLocation), tb.VolumeMount("tekton-internal-tools", "/tekton/tools"), tb.Args(), } @@ -301,6 +300,7 @@ func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) { Clients: c, Informers: informers, Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder), + Ctx: ctx, }, cancel } @@ -484,7 +484,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } if len(clients.Kube.Actions()) == 0 { @@ -566,6 +566,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-run-home-env", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -610,6 +611,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-run-working-dir", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -667,7 +669,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { }); err != nil { t.Fatal(err) } - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } if len(clients.Kube.Actions()) == 0 { @@ -752,7 +754,7 @@ func TestReconcile_CloudEvents(t *testing.T) { t.Fatal(err) } - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } if len(clients.Kube.Actions()) == 0 { @@ -934,6 +936,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-run-success", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1029,6 +1032,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-substitution", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes( workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-78c5n", @@ -1162,6 +1166,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-with-taskspec", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-mz4c7", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1235,6 +1240,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-with-cluster-task", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1281,6 +1287,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-with-resource-spec", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-mz4c7", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1354,6 +1361,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-with-pod", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1399,6 +1407,7 @@ func TestReconcile(t *testing.T) { tb.PodOwnerReference("TaskRun", "test-taskrun-with-credentials-variable", tb.OwnerReferenceAPIVersion(currentAPIVersion)), tb.PodSpec( + tb.PodServiceAccountName(config.DefaultServiceAccountValue), tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-9l9zj", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, @@ -1450,7 +1459,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } if len(clients.Kube.Actions()) == 0 { @@ -1533,88 +1542,6 @@ func TestReconcile_SetsStartTime(t *testing.T) { } } -func TestReconcile_SortTaskRunStatusSteps(t *testing.T) { - taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec( - tb.TaskRunTaskRef(taskMultipleSteps.Name)), - tb.TaskRunStatus( - tb.PodName("the-pod"), - ), - ) - - // The order of the container statuses has been shuffled, not aligning with the order of the - // spec steps of the Task any more. After Reconcile is called, we should see the order of status - // steps in TaksRun has been converted to the same one as in spec steps of the Task. - d := test.Data{ - TaskRuns: []*v1beta1.TaskRun{taskRun}, - Tasks: []*v1beta1.Task{taskMultipleSteps}, - Pods: []*corev1.Pod{{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "the-pod", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodSucceeded, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "step-nop", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, { - Name: "step-x-step", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, { - Name: "step-v-step", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, { - Name: "step-z-step", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }}, - }, - }}, - } - testAssets, cancel := getTaskRunController(t, d) - defer cancel() - if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { - t.Errorf("expected no error reconciling valid TaskRun but got %v", err) - } - - newTr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) - } - verifyTaskRunStatusStep(t, newTr) -} - -func verifyTaskRunStatusStep(t *testing.T, taskRun *v1beta1.TaskRun) { - actualStepOrder := []string{} - for _, state := range taskRun.Status.Steps { - actualStepOrder = append(actualStepOrder, state.Name) - } - expectedStepOrder := []string{} - for _, state := range taskMultipleSteps.Spec.Steps { - expectedStepOrder = append(expectedStepOrder, state.Name) - } - // Add a nop in the end. This may be removed in future. - expectedStepOrder = append(expectedStepOrder, "nop") - if d := cmp.Diff(expectedStepOrder, actualStepOrder); d != "" { - t.Errorf("The status steps in TaksRun doesn't match the spec steps in Task %s", diff.PrintWantGot(d)) - } -} - func TestReconcile_DoesntChangeStartTime(t *testing.T) { startTime := time.Date(2000, 1, 1, 1, 1, 1, 1, time.UTC) taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec( @@ -1690,7 +1617,7 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconcileErr := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)) + reconcileErr := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)) // When a TaskRun is invalid and can't run, we return a permanent error because // a regular error will tell the Reconciler to keep trying to reconcile; instead we want to stop @@ -1750,7 +1677,7 @@ func TestReconcilePodFetchError(t *testing.T) { return true, nil, errors.New("induce failure fetching pods") }) - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err == nil { t.Fatal("expected error when reconciling a Task for which we couldn't get the corresponding Pod but got nil") } } @@ -1808,7 +1735,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when Reconcile() : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) @@ -1844,7 +1771,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { // lister cache is update to reflect the result of the previous Reconcile. testAssets.Informers.TaskRun.Informer().GetIndexer().Add(newTr) - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when Reconcile(): %v", err) } @@ -1894,7 +1821,7 @@ func TestReconcileOnCompletedTaskRun(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) @@ -1926,7 +1853,7 @@ func TestReconcileOnCancelledTaskRun(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients - if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) @@ -2041,7 +1968,7 @@ func TestReconcileTimeouts(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) @@ -2110,6 +2037,12 @@ func TestHandlePodCreationError(t *testing.T) { expectedType: apis.ConditionSucceeded, expectedStatus: corev1.ConditionUnknown, expectedReason: podconvert.ReasonExceededResourceQuota, + }, { + description: "taskrun validation failed", + err: errors.New("TaskRun validation failed"), + expectedType: apis.ConditionSucceeded, + expectedStatus: corev1.ConditionFalse, + expectedReason: podconvert.ReasonFailedValidation, }, { description: "errors other than exceeded quota fail the taskrun", err: errors.New("this is a fatal error"), @@ -2298,7 +2231,7 @@ func TestReconcileCloudEvents(t *testing.T) { t.Fatal(err) } - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } @@ -2315,277 +2248,6 @@ func TestReconcileCloudEvents(t *testing.T) { } } -func TestUpdateTaskRunResourceResult(t *testing.T) { - for _, c := range []struct { - desc string - pod corev1.Pod - taskRunStatus *v1beta1.TaskRunStatus - want []resourcev1alpha1.PipelineResourceResult - }{{ - desc: "image resource updated", - pod: corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`, - }, - }, - }}, - }, - }, - want: []resourcev1alpha1.PipelineResourceResult{{ - Key: "digest", - Value: "sha256:1234", - ResourceRef: resourcev1alpha1.PipelineResourceRef{Name: "source-image"}, - }}, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - tr := &v1beta1.TaskRun{} - tr.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }) - if err := updateTaskRunResourceResult(tr, c.pod); err != nil { - t.Errorf("updateTaskRunResourceResult: %s", err) - } - if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" { - t.Errorf("updateTaskRunResourceResult %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestUpdateTaskRunResult(t *testing.T) { - for _, c := range []struct { - desc string - pod corev1.Pod - taskRunStatus *v1beta1.TaskRunStatus - wantResults []v1beta1.TaskRunResult - want []resourcev1alpha1.PipelineResourceResult - }{{ - desc: "test result with pipeline result", - pod: corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}, "type": "PipelineResourceResult"}]`, - }, - }, - }}, - }, - }, - wantResults: []v1beta1.TaskRunResult{{ - Name: "resultName", - Value: "resultValue", - }}, - want: []resourcev1alpha1.PipelineResourceResult{{ - Key: "digest", - Value: "sha256:1234", - ResourceRef: resourcev1alpha1.PipelineResourceRef{Name: "source-image"}, - ResultType: "PipelineResourceResult", - }}, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - tr := &v1beta1.TaskRun{} - tr.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }) - if err := updateTaskRunResourceResult(tr, c.pod); err != nil { - t.Errorf("updateTaskRunResourceResult: %s", err) - } - if d := cmp.Diff(c.wantResults, tr.Status.TaskRunResults); d != "" { - t.Errorf("updateTaskRunResourceResult TaskRunResults %s", diff.PrintWantGot(d)) - } - if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" { - t.Errorf("updateTaskRunResourceResult ResourcesResult %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestUpdateTaskRunResult2(t *testing.T) { - for _, c := range []struct { - desc string - pod corev1.Pod - taskRunStatus *v1beta1.TaskRunStatus - wantResults []v1beta1.TaskRunResult - want []resourcev1alpha1.PipelineResourceResult - }{{ - desc: "test result with pipeline result - no result type", - pod: corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`, - }, - }, - }}, - }, - }, - wantResults: []v1beta1.TaskRunResult{{ - Name: "resultName", - Value: "resultValue", - }}, - want: []resourcev1alpha1.PipelineResourceResult{{ - Key: "digest", - Value: "sha256:1234", - ResourceRef: resourcev1alpha1.PipelineResourceRef{Name: "source-image"}, - }}, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - tr := &v1beta1.TaskRun{} - tr.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }) - if err := updateTaskRunResourceResult(tr, c.pod); err != nil { - t.Errorf("updateTaskRunResourceResult: %s", err) - } - if d := cmp.Diff(c.wantResults, tr.Status.TaskRunResults); d != "" { - t.Errorf("updateTaskRunResourceResult %s", diff.PrintWantGot(d)) - } - if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" { - t.Errorf("updateTaskRunResourceResult %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestUpdateTaskRunResultTwoResults(t *testing.T) { - for _, c := range []struct { - desc string - pod corev1.Pod - taskRunStatus *v1beta1.TaskRunStatus - want []v1beta1.TaskRunResult - }{{ - desc: "two test results", - pod: corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"resultNameOne","value":"resultValueOne", "type": "TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo", "type": "TaskRunResult"}]`, - }, - }, - }, { - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"resultNameOne","value":"resultValueThree", "type": "TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo", "type": "TaskRunResult"}]`, - }, - }, - }}, - }, - }, - want: []v1beta1.TaskRunResult{{ - Name: "resultNameOne", - Value: "resultValueThree", - }, { - Name: "resultNameTwo", - Value: "resultValueTwo", - }}, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - tr := &v1beta1.TaskRun{} - tr.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }) - if err := updateTaskRunResourceResult(tr, c.pod); err != nil { - t.Errorf("updateTaskRunResourceResult: %s", err) - } - if d := cmp.Diff(c.want, tr.Status.TaskRunResults); d != "" { - t.Errorf("updateTaskRunResourceResult %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestUpdateTaskRunResultWhenTaskFailed(t *testing.T) { - for _, c := range []struct { - desc string - podStatus corev1.PodStatus - taskRunStatus *v1beta1.TaskRunStatus - wantResults []v1beta1.TaskRunResult - want []resourcev1alpha1.PipelineResourceResult - }{{ - desc: "update task results when task fails", - podStatus: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"name":"source-image","digest":"sha256:1234"}]`, - }, - }, - }}, - }, - taskRunStatus: &v1beta1.TaskRunStatus{ - Status: duckv1beta1.Status{Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - }}}, - }, - wantResults: nil, - want: nil, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - if d := cmp.Diff(c.want, c.taskRunStatus.ResourcesResult); d != "" { - t.Errorf("updateTaskRunResourceResult resources %s", diff.PrintWantGot(d)) - } - if d := cmp.Diff(c.wantResults, c.taskRunStatus.TaskRunResults); d != "" { - t.Errorf("updateTaskRunResourceResult results %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestUpdateTaskRunResourceResult_Errors(t *testing.T) { - for _, c := range []struct { - desc string - pod corev1.Pod - taskRunStatus *v1beta1.TaskRunStatus - want []resourcev1alpha1.PipelineResourceResult - }{{ - desc: "image resource exporter with malformed json output", - pod: corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Message: `MALFORMED JSON{"digest":"sha256:1234"}`, - }, - }, - }}, - }, - }, - taskRunStatus: &v1beta1.TaskRunStatus{ - Status: duckv1beta1.Status{Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }}}, - }, - want: nil, - }} { - t.Run(c.desc, func(t *testing.T) { - names.TestingSeed() - if err := updateTaskRunResourceResult(&v1beta1.TaskRun{Status: *c.taskRunStatus}, c.pod); err == nil { - t.Error("Expected error, got nil") - } - if d := cmp.Diff(c.want, c.taskRunStatus.ResourcesResult); d != "" { - t.Errorf("updateTaskRunResourceResult %s", diff.PrintWantGot(d)) - } - }) - } -} - func TestReconcile_Single_SidecarState(t *testing.T) { runningState := corev1.ContainerStateRunning{StartedAt: metav1.Time{Time: time.Now()}} taskRun := tb.TaskRun("test-taskrun-sidecars", @@ -2848,6 +2510,96 @@ func TestReconcileInvalidDefaultWorkspace(t *testing.T) { } } +// TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace tests a reconcile +// of a TaskRun that has omitted a Workspace that the Task has marked as optional +// with a Default TaskRun workspace defined. The default workspace should not be +// injected in place of the omitted optional workspace. +func TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace(t *testing.T) { + optionalWorkspaceMountPath := "/foo/bar/baz" + taskWithOptionalWorkspace := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-task-with-optional-workspace", + Namespace: "default", + }, + Spec: v1beta1.TaskSpec{ + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "optional-ws", + MountPath: optionalWorkspaceMountPath, + Optional: true, + }}, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "simple-step", + Image: "foo", + Command: []string{"/mycmd"}, + }}}, + }, + } + taskRunOmittingWorkspace := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun", + Namespace: "default", + }, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "test-task-with-optional-workspace", + }, + }, + } + + d := test.Data{ + Tasks: []*v1beta1.Task{taskWithOptionalWorkspace}, + TaskRuns: []*v1beta1.TaskRun{taskRunOmittingWorkspace}, + } + + d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetDefaultsConfigName(), Namespace: system.GetNamespace()}, + Data: map[string]string{ + "default-task-run-workspace-binding": "emptyDir: {}", + }, + }) + names.TestingSeed() + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + clients := testAssets.Clients + + t.Logf("Creating SA %s in %s", "default", "foo") + if _, err := clients.Kube.CoreV1().ServiceAccounts("default").Create(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + }); err != nil { + t.Fatal(err) + } + + if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRunOmittingWorkspace)); err != nil { + t.Errorf("Unexpected reconcile error for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) + } + + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRunOmittingWorkspace.Namespace).Get(taskRunOmittingWorkspace.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) + } + + pod, err := clients.Kube.CoreV1().Pods(taskRunOmittingWorkspace.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting Pod for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) + } + for _, c := range pod.Spec.Containers { + for _, vm := range c.VolumeMounts { + if vm.MountPath == optionalWorkspaceMountPath { + t.Errorf("Workspace with VolumeMount at %s should not have been found for Optional Workspace but was injected by Default TaskRun Workspace", optionalWorkspaceMountPath) + } + } + } + + for _, c := range tr.Status.Conditions { + if c.Type == apis.ConditionSucceeded && c.Status == corev1.ConditionFalse { + t.Errorf("Unexpected unsuccessful condition for TaskRun %q:\n%#v", taskRunOmittingWorkspace.Name, tr.Status.Conditions) + } + } +} + func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { for _, tt := range []struct { desc string @@ -2911,7 +2663,7 @@ func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { clients := testAssets.Clients c := testAssets.Controller - reconcileErr := c.Reconciler.Reconcile(context.Background(), getRunName(tt.d.TaskRuns[0])) + reconcileErr := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tt.d.TaskRuns[0])) // When a TaskRun is invalid and can't run, we return a permanent error because // a regular error will tell the Reconciler to keep trying to reconcile; instead we want to stop @@ -3364,3 +3116,68 @@ func Test_storeTaskSpec(t *testing.T) { t.Fatalf(diff.PrintWantGot(d)) } } + +func TestWillOverwritePodAffinity(t *testing.T) { + affinity := &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + Namespaces: []string{"tekton-pipelines"}, + }, + }, + }, + } + affinityAssistantName := "pipeline.tekton.dev/affinity-assistant" + + tcs := []struct { + name string + hasTemplateAffinity bool + annotations map[string]string + expected bool + }{ + { + name: "no settings", + expected: false, + }, + { + name: "no PodTemplate affinity set", + annotations: map[string]string{ + affinityAssistantName: "affinity-assistant", + }, + expected: false, + }, + { + name: "affinity assistant not set", + hasTemplateAffinity: true, + expected: false, + }, + { + name: "PodTemplate affinity will be overwritten with affinity assistant", + hasTemplateAffinity: true, + annotations: map[string]string{ + affinityAssistantName: "affinity-assistant", + }, + expected: true, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + tr := &v1beta1.TaskRun{ + Spec: v1beta1.TaskRunSpec{ + PodTemplate: &v1beta1.PodTemplate{}, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: tc.annotations, + }, + } + if tc.hasTemplateAffinity { + tr.Spec.PodTemplate.Affinity = affinity + } + + if got := willOverwritePodSetAffinity(tr); got != tc.expected { + t.Errorf("expected: %t got: %t", tc.expected, got) + } + }) + } +} diff --git a/pkg/substitution/substitution.go b/pkg/substitution/substitution.go index 7f63d9162df..feddb442a49 100644 --- a/pkg/substitution/substitution.go +++ b/pkg/substitution/substitution.go @@ -44,6 +44,22 @@ func ValidateVariable(name, value, prefix, locationName, path string, vars sets. return nil } +func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { + if vs, present := extractVariablesFromString(value, prefix); present { + for _, v := range vs { + v = strings.TrimSuffix(v, "[*]") + if !vars.Has(v) { + return &apis.FieldError{ + Message: fmt.Sprintf("non-existent variable in %q", value), + // Empty path is required to make the `ViaField`, … work + Paths: []string{""}, + } + } + } + } + return nil +} + // Verifies that variables matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { if vs, present := extractVariablesFromString(value, prefix); present { @@ -60,6 +76,22 @@ func ValidateVariableProhibited(name, value, prefix, locationName, path string, return nil } +func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { + if vs, present := extractVariablesFromString(value, prefix); present { + for _, v := range vs { + v = strings.TrimSuffix(v, "[*]") + if vars.Has(v) { + return &apis.FieldError{ + Message: fmt.Sprintf("variable type invalid in %q", value), + // Empty path is required to make the `ViaField`, … work + Paths: []string{""}, + } + } + } + } + return nil +} + // Verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { if vs, present := extractVariablesFromString(value, prefix); present { @@ -79,6 +111,25 @@ func ValidateVariableIsolated(name, value, prefix, locationName, path string, va return nil } +func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { + if vs, present := extractVariablesFromString(value, prefix); present { + firstMatch, _ := extractExpressionFromString(value, prefix) + for _, v := range vs { + v = strings.TrimSuffix(v, "[*]") + if vars.Has(v) { + if len(value) != len(firstMatch) { + return &apis.FieldError{ + Message: fmt.Sprintf("variable is not properly isolated in %q", value), + // Empty path is required to make the `ViaField`, … work + Paths: []string{""}, + } + } + } + } + } + return nil +} + // Extract a the first full string expressions found (e.g "$(input.params.foo)"). Return // "" and false if nothing is found. func extractExpressionFromString(s, prefix string) (string, bool) { diff --git a/pkg/termination/parse.go b/pkg/termination/parse.go index 8b637df9cd8..51d9057a068 100644 --- a/pkg/termination/parse.go +++ b/pkg/termination/parse.go @@ -21,21 +21,32 @@ import ( "sort" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "go.uber.org/zap" ) // ParseMessage parses a termination message as results. // // If more than one item has the same key, only the latest is returned. Items // are sorted by their key. -func ParseMessage(msg string) ([]v1beta1.PipelineResourceResult, error) { +func ParseMessage(logger *zap.SugaredLogger, msg string) ([]v1beta1.PipelineResourceResult, error) { if msg == "" { return nil, nil } + var r []v1beta1.PipelineResourceResult if err := json.Unmarshal([]byte(msg), &r); err != nil { return nil, fmt.Errorf("parsing message json: %v", err) } + for i, rr := range r { + if rr == (v1beta1.PipelineResourceResult{}) { + //Erase incorrect result + r[i] = r[len(r)-1] + r = r[:len(r)-1] + logger.Errorf("termination message contains non taskrun or pipelineresource result keys") + } + } + // Remove duplicates (last one wins) and sort by key. m := map[string]v1beta1.PipelineResourceResult{} for _, rr := range r { diff --git a/pkg/termination/parse_test.go b/pkg/termination/parse_test.go index a128178fb95..dae81ac510c 100644 --- a/pkg/termination/parse_test.go +++ b/pkg/termination/parse_test.go @@ -16,11 +16,13 @@ limitations under the License. package termination import ( + "strings" "testing" "github.com/google/go-cmp/cmp" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/test/diff" + "knative.dev/pkg/logging" ) func TestParseMessage(t *testing.T) { @@ -69,7 +71,8 @@ func TestParseMessage(t *testing.T) { }}, }} { t.Run(c.desc, func(t *testing.T) { - got, err := ParseMessage(c.msg) + logger, _ := logging.NewLogger("", "status") + got, err := ParseMessage(logger, c.msg) if err != nil { t.Fatalf("ParseMessage: %v", err) } @@ -80,8 +83,23 @@ func TestParseMessage(t *testing.T) { } } -func TestParseMessage_Invalid(t *testing.T) { - if _, err := ParseMessage("INVALID NOT JSON"); err == nil { - t.Error("Expected error parsing invalid JSON, got nil") +func TestParseMessageInvalidMessage(t *testing.T) { + for _, c := range []struct { + desc, msg, wantError string + }{{ + desc: "invalid JSON", + msg: "invalid JSON", + wantError: "parsing message json", + }} { + t.Run(c.desc, func(t *testing.T) { + logger, _ := logging.NewLogger("", "status") + _, err := ParseMessage(logger, c.msg) + if err == nil { + t.Errorf("Expected error parsing incorrect termination message, got nil") + } + if !strings.HasPrefix(err.Error(), c.wantError) { + t.Errorf("Expected different error: %s", c.wantError) + } + }) } } diff --git a/pkg/termination/write_test.go b/pkg/termination/write_test.go index 77fbcb737ef..7d5ca1ad76d 100644 --- a/pkg/termination/write_test.go +++ b/pkg/termination/write_test.go @@ -62,7 +62,7 @@ func TestExistingFile(t *testing.T) { if fileContents, err := ioutil.ReadFile(tmpFile.Name()); err != nil { logger.Fatalf("Unexpected error reading %v: %v", tmpFile.Name(), err) } else { - want := `[{"key":"key1","value":"hello","resourceRef":{}},{"key":"key2","value":"world","resourceRef":{}}]` + want := `[{"key":"key1","value":"hello"},{"key":"key2","value":"world"}]` if d := cmp.Diff(want, string(fileContents)); d != "" { t.Fatalf("Diff %s", diff.PrintWantGot(d)) } diff --git a/pkg/workspace/apply.go b/pkg/workspace/apply.go index ca5579d143c..002bf26e819 100644 --- a/pkg/workspace/apply.go +++ b/pkg/workspace/apply.go @@ -39,10 +39,11 @@ func (nvm nameVolumeMap) setVolumeSource(workspaceName string, volumeName string } } -// GetVolumes will return a dictionary where the keys are the names of the workspaces bound in -// wb and the value is the Volume to use. If the same Volume is bound twice, the resulting volumes -// will both have the same name to prevent the same Volume from being attached to a pod twice. -func GetVolumes(wb []v1beta1.WorkspaceBinding) map[string]corev1.Volume { +// CreateVolumes will return a dictionary where the keys are the names of the workspaces bound in +// wb and the value is a newly-created Volume to use. If the same Volume is bound twice, the +// resulting volumes will both have the same name to prevent the same Volume from being attached +// to a pod twice. The names of the returned volumes will be a short random string starting "ws-". +func CreateVolumes(wb []v1beta1.WorkspaceBinding) map[string]corev1.Volume { pvcs := map[string]corev1.Volume{} v := make(nameVolumeMap) for _, w := range wb { @@ -84,13 +85,12 @@ func getDeclaredWorkspace(name string, w []v1beta1.WorkspaceDeclaration) (*v1bet // Apply will update the StepTemplate and Volumes declaration in ts so that the workspaces // specified through wb combined with the declared workspaces in ts will be available for // all containers in the resulting pod. -func Apply(ts v1beta1.TaskSpec, wb []v1beta1.WorkspaceBinding) (*v1beta1.TaskSpec, error) { +func Apply(ts v1beta1.TaskSpec, wb []v1beta1.WorkspaceBinding, v map[string]corev1.Volume) (*v1beta1.TaskSpec, error) { // If there are no bound workspaces, we don't need to do anything if len(wb) == 0 { return &ts, nil } - v := GetVolumes(wb) addedVolumes := sets.NewString() // Initialize StepTemplate if it hasn't been already diff --git a/pkg/workspace/apply_test.go b/pkg/workspace/apply_test.go index 3f6e34733e2..19291753551 100644 --- a/pkg/workspace/apply_test.go +++ b/pkg/workspace/apply_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -func TestGetVolumes(t *testing.T) { +func TestCreateVolumes(t *testing.T) { names.TestingSeed() for _, tc := range []struct { name string @@ -185,7 +185,7 @@ func TestGetVolumes(t *testing.T) { }, }} { t.Run(tc.name, func(t *testing.T) { - v := workspace.GetVolumes(tc.workspaces) + v := workspace.CreateVolumes(tc.workspaces) if d := cmp.Diff(tc.expectedVolumes, v); d != "" { t.Errorf("Didn't get expected volumes from bindings %s", diff.PrintWantGot(d)) } @@ -511,7 +511,8 @@ func TestApply(t *testing.T) { }, }} { t.Run(tc.name, func(t *testing.T) { - ts, err := workspace.Apply(tc.ts, tc.workspaces) + vols := workspace.CreateVolumes(tc.workspaces) + ts, err := workspace.Apply(tc.ts, tc.workspaces, vols) if err != nil { t.Fatalf("Did not expect error but got %v", err) } diff --git a/pkg/workspace/validate.go b/pkg/workspace/validate.go index 18ba1535451..570934aad25 100644 --- a/pkg/workspace/validate.go +++ b/pkg/workspace/validate.go @@ -21,31 +21,43 @@ import ( "fmt" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/list" + "k8s.io/apimachinery/pkg/util/sets" ) -// ValidateBindings will return an error if the bound workspaces in wb don't satisfy the declared -// workspaces in w. -func ValidateBindings(w []v1beta1.WorkspaceDeclaration, wb []v1beta1.WorkspaceBinding) error { +// ValidateBindings will return an error if the bound workspaces in binds don't satisfy the declared +// workspaces in decls. +func ValidateBindings(decls []v1beta1.WorkspaceDeclaration, binds []v1beta1.WorkspaceBinding) error { // This will also be validated at webhook time but in case the webhook isn't invoked for some // reason we'll invoke the same validation here. - for _, b := range wb { + for _, b := range binds { if err := b.Validate(context.Background()); err != nil { return fmt.Errorf("binding %q is invalid: %v", b.Name, err) } } - declNames := make([]string, len(w)) - for i := range w { - declNames[i] = w[i].Name + declNames := sets.NewString() + bindNames := sets.NewString() + for _, decl := range decls { + declNames.Insert(decl.Name) } - bindNames := make([]string, len(wb)) - for i := range wb { - bindNames[i] = wb[i].Name + for _, bind := range binds { + bindNames.Insert(bind.Name) } - if err := list.IsSame(declNames, bindNames); err != nil { - return fmt.Errorf("bound workspaces did not match declared workspaces: %v", err) + + for _, decl := range decls { + if decl.Optional { + continue + } + if !bindNames.Has(decl.Name) { + return fmt.Errorf("declared workspace %q is required but has not been bound", decl.Name) + } } + for _, bind := range binds { + if !declNames.Has(bind.Name) { + return fmt.Errorf("workspace binding %q does not match any declared workspace", bind.Name) + } + } + return nil } diff --git a/pkg/workspace/validate_test.go b/pkg/workspace/validate_test.go index 7d115a93136..31f0e534fcc 100644 --- a/pkg/workspace/validate_test.go +++ b/pkg/workspace/validate_test.go @@ -58,6 +58,23 @@ func TestValidateBindingsValid(t *testing.T) { Name: "beth", EmptyDir: &corev1.EmptyDirVolumeSource{}, }}, + }, { + name: "Included optional workspace", + declarations: []v1alpha1.WorkspaceDeclaration{{ + Name: "beth", + Optional: true, + }}, + bindings: []v1alpha1.WorkspaceBinding{{ + Name: "beth", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, { + name: "Omitted optional workspace", + declarations: []v1alpha1.WorkspaceDeclaration{{ + Name: "beth", + Optional: true, + }}, + bindings: []v1alpha1.WorkspaceBinding{}, }} { t.Run(tc.name, func(t *testing.T) { if err := ValidateBindings(tc.declarations, tc.bindings); err != nil { diff --git a/tekton/ko/Dockerfile b/tekton/ko/Dockerfile index 7df82733aa0..9cecc787144 100644 --- a/tekton/ko/Dockerfile +++ b/tekton/ko/Dockerfile @@ -1,4 +1,4 @@ -FROM google/cloud-sdk:latest +FROM gcr.io/google.com/cloudsdktool/cloud-sdk:latest # Install golang RUN curl https://dl.google.com/go/go1.14.linux-amd64.tar.gz > go1.14.tar.gz diff --git a/tekton/publish.yaml b/tekton/publish.yaml index 4ed3d948535..2549b9dcadc 100644 --- a/tekton/publish.yaml +++ b/tekton/publish.yaml @@ -1,4 +1,4 @@ -apiVersion: tekton.dev/v1alpha1 +apiVersion: tekton.dev/v1beta1 kind: Task metadata: name: publish-tekton-pipelines @@ -56,7 +56,7 @@ spec: - /kaniko/executor args: - --dockerfile=/workspace/go/src/github.com/tektoncd/pipeline/images/Dockerfile - - --destination=$(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtBaseImage.url) + - --destination=$(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtBaseImage.url) - --context=/workspace/go/src/github.com/tektoncd/pipeline volumeMounts: @@ -76,8 +76,8 @@ spec: # This matches the value configured in .ko.yaml defaultBaseImage: gcr.io/distroless/static:nonroot baseImageOverrides: - $(inputs.params.pathToProject)/$(outputs.resources.builtCredsInitImage.url): $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/build-base:latest - $(inputs.params.pathToProject)/$(outputs.resources.builtGitInitImage.url): $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/build-base:latest + $(params.pathToProject)/$(resources.outputs.builtCredsInitImage.url): $(params.imageRegistry)/$(params.pathToProject)/build-base:latest + $(params.pathToProject)/$(resources.outputs.builtGitInitImage.url): $(params.imageRegistry)/$(params.pathToProject)/build-base:latest # These match values configured in .ko.yaml $(inputs.params.pathToProject)/$(outputs.resources.builtEntrypointImage.url): gcr.io/distroless/base:debug-nonroot @@ -100,13 +100,13 @@ spec: command: ["mkdir"] args: - "-p" - - "/workspace/output/bucket/previous/$(inputs.params.versionTag)/" + - "/workspace/output/bucket/previous/$(params.versionTag)/" - name: run-ko image: gcr.io/tekton-releases/dogfooding/ko-gcloud:latest env: - name: KO_DOCKER_REPO - value: $(inputs.params.imageRegistry) + value: $(params.imageRegistry) - name: GOPATH value: /workspace/go - name: GO111MODULE @@ -147,10 +147,10 @@ spec: # Rewrite "devel" to inputs.params.versionTag sed -i -e 's/\(pipeline.tekton.dev\/release\): "devel"/\1: "$(inputs.params.versionTag)"/g' -e 's/\(app.kubernetes.io\/version\): "devel"/\1: "$(inputs.params.versionTag)"/g' -e 's/\(version\): "devel"/\1: "$(inputs.params.versionTag)"/g' -e 's/\("-version"\), "devel"/\1, "$(inputs.params.versionTag)"/g' /workspace/go/src/github.com/tektoncd/pipeline/config/*.yaml - OUTPUT_BUCKET_RELEASE_DIR="/workspace/output/bucket/previous/$(inputs.params.versionTag)" + OUTPUT_BUCKET_RELEASE_DIR="/workspace/output/bucket/previous/$(params.versionTag)" # Publish images and create release.yaml - ko resolve --preserve-import-paths -t $(inputs.params.versionTag) -f /workspace/go/src/github.com/tektoncd/pipeline/config/ > $OUTPUT_BUCKET_RELEASE_DIR/release.yaml + ko resolve --preserve-import-paths -t $(params.versionTag) -f /workspace/go/src/github.com/tektoncd/pipeline/config/ > $OUTPUT_BUCKET_RELEASE_DIR/release.yaml # Publish images and create release.notags.yaml # This is useful if your container runtime doesn't support the `image-reference:tag@digest` notation # This is currently the case for `cri-o` (and most likely others) @@ -166,36 +166,36 @@ spec: #!/bin/sh set -ex - if [[ "$(inputs.params.releaseAsLatest)" == "true" ]] + if [[ "$(params.releaseAsLatest)" == "true" ]] then mkdir -p "/workspace/output/bucket/latest/" - OUTPUT_BUCKET_RELEASE_DIR="/workspace/output/bucket/previous/$(inputs.params.versionTag)" + OUTPUT_BUCKET_RELEASE_DIR="/workspace/output/bucket/previous/$(params.versionTag)" OUTPUT_BUCKET_LATEST_DIR="/workspace/output/bucket/latest" cp "$OUTPUT_BUCKET_RELEASE_DIR/release.yaml" "$OUTPUT_BUCKET_LATEST_DIR/release.yaml" cp "$OUTPUT_BUCKET_RELEASE_DIR/release.notags.yaml" "$OUTPUT_BUCKET_LATEST_DIR/release.notags.yaml" fi - name: tag-images - image: google/cloud-sdk + image: gcr.io/google.com/cloudsdktool/cloud-sdk script: | #!/usr/bin/env bash set -ex REGIONS=(us eu asia) IMAGES=( - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtEntrypointImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtNopImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtKubeconfigWriterImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtCredsInitImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtGitInitImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtControllerImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtWebhookImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtDigestExporterImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtPullRequestInitImage.url):$(inputs.params.versionTag) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtGcsFetcherImage.url):$(inputs.params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtEntrypointImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtNopImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtKubeconfigWriterImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtCredsInitImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtGitInitImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtControllerImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtWebhookImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtDigestExporterImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtPullRequestInitImage.url):$(params.versionTag) + $(params.imageRegistry)/$(params.pathToProject)/$(resources.outputs.builtGcsFetcherImage.url):$(params.versionTag) ) # Parse the built images from the release.yaml generated by ko - BUILT_IMAGES=( $(/workspace/go/src/github.com/tektoncd/pipeline/tekton/koparse/koparse.py --path /workspace/output/bucket/previous/$(inputs.params.versionTag)/release.yaml --base $(inputs.params.imageRegistry)/$(inputs.params.pathToProject) --images ${IMAGES[@]}) ) + BUILT_IMAGES=( $(/workspace/go/src/github.com/tektoncd/pipeline/tekton/koparse/koparse.py --path /workspace/output/bucket/previous/$(params.versionTag)/release.yaml --base $(params.imageRegistry)/$(params.pathToProject) --images ${IMAGES[@]}) ) # Auth with account credentials gcloud auth activate-service-account --key-file=/secret/release.json @@ -207,21 +207,21 @@ spec: IMAGE_WITHOUT_SHA_AND_TAG=${IMAGE_WITHOUT_SHA%%:*} IMAGE_WITH_SHA=${IMAGE_WITHOUT_SHA_AND_TAG}@${IMAGE##*@} - if [[ "$(inputs.params.releaseAsLatest)" == "true" ]] + if [[ "$(params.releaseAsLatest)" == "true" ]] then gcloud -q container images add-tag ${IMAGE_WITH_SHA} ${IMAGE_WITHOUT_SHA_AND_TAG}:latest fi for REGION in "${REGIONS[@]}" do - if [[ "$(inputs.params.releaseAsLatest)" == "true" ]] + if [[ "$(params.releaseAsLatest)" == "true" ]] then - for TAG in "latest" $(inputs.params.versionTag) + for TAG in "latest" $(params.versionTag) do gcloud -q container images add-tag ${IMAGE_WITH_SHA} ${REGION}.${IMAGE_WITHOUT_SHA_AND_TAG}:$TAG done else - TAG="$(inputs.params.versionTag)" + TAG="$(params.versionTag)" gcloud -q container images add-tag ${IMAGE_WITH_SHA} ${REGION}.${IMAGE_WITHOUT_SHA_AND_TAG}:$TAG fi done diff --git a/tekton/release-pipeline-nightly.yaml b/tekton/release-pipeline-nightly.yaml index 46189ae0f37..7cd4b90eb54 100644 --- a/tekton/release-pipeline-nightly.yaml +++ b/tekton/release-pipeline-nightly.yaml @@ -1,4 +1,4 @@ -apiVersion: tekton.dev/v1alpha1 +apiVersion: tekton.dev/v1beta1 kind: Pipeline metadata: name: pipeline-release-nightly diff --git a/tekton/release-pipeline.yaml b/tekton/release-pipeline.yaml index a9b0eecad30..6061ade72dd 100644 --- a/tekton/release-pipeline.yaml +++ b/tekton/release-pipeline.yaml @@ -1,5 +1,5 @@ --- -apiVersion: tekton.dev/v1alpha1 +apiVersion: tekton.dev/v1beta1 kind: Pipeline metadata: name: pipeline-release diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index 09e6716b591..e9b271cc02d 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -73,7 +73,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { Spec: v1beta1.TaskSpec{ Steps: []v1beta1.Step{{Container: corev1.Container{ Name: "step1", - Image: "google/cloud-sdk:alpine", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk:alpine", Command: []string{"/bin/bash"}, Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)}, VolumeMounts: []corev1.VolumeMount{{ @@ -304,7 +304,7 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck Spec: v1beta1.TaskSpec{ Steps: []v1beta1.Step{{Container: corev1.Container{ Name: "step1", - Image: "google/cloud-sdk:alpine", + Image: "gcr.io/google.com/cloudsdktool/cloud-sdk:alpine", Command: []string{"/bin/bash"}, Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)}, VolumeMounts: []corev1.VolumeMount{{ diff --git a/test/cancel_test.go b/test/cancel_test.go index e894b16da71..9fb7e762caf 100644 --- a/test/cancel_test.go +++ b/test/cancel_test.go @@ -47,7 +47,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { pipelineRunName := "cancel-me" pipelineRun := &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, Spec: v1beta1.PipelineRunSpec{ PipelineSpec: &v1beta1.PipelineSpec{ Tasks: []v1beta1.PipelineTask{{ @@ -82,10 +82,8 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } var wg sync.WaitGroup - var trName []string t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", pipelineRunName, namespace) for _, taskrunItem := range taskrunList.Items { - trName = append(trName, taskrunItem.Name) wg.Add(1) go func(name string) { defer wg.Done() @@ -133,6 +131,15 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } wg.Wait() + var trName []string + taskrunList, err = c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) + } + for _, taskrunItem := range taskrunList.Items { + trName = append(trName, taskrunItem.Name) + } + matchKinds := map[string][]string{"PipelineRun": {pipelineRunName}, "TaskRun": trName} // Expected failure events: 1 for the pipelinerun cancel, 1 for each TaskRun expectedNumberOfEvents := 1 + len(trName) diff --git a/test/controller.go b/test/controller.go index 939b79b628f..2c4fdf12191 100644 --- a/test/controller.go +++ b/test/controller.go @@ -102,6 +102,7 @@ type Assets struct { Clients Clients Informers Informers Recorder *record.FakeRecorder + Ctx context.Context } func AddToInformer(t *testing.T, store cache.Store) func(ktesting.Action) (bool, runtime.Object, error) { diff --git a/test/examples_test.go b/test/examples_test.go index f5ecdc7931c..d66a5512793 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "bytes" "errors" "io/ioutil" "os" @@ -33,18 +34,16 @@ import ( knativetest "knative.dev/pkg/test" ) -var ( - pipelineRunTimeout = 10 * time.Minute -) +const pipelineRunTimeout = 10 * time.Minute -const ( - DEFAULT_KO_DOCKER_REPO = `gcr.io\/christiewilson-catfactory` - DEFAULT_NAMESPACE = `namespace: default` +var ( + defaultKoDockerRepoRE = regexp.MustCompile("gcr.io/christiewilson-catfactory") + defaultNamespaceRE = regexp.MustCompile("namespace: default") ) -// GetCreatedTektonCrd parses output of an external ko invocation provided as +// getCreatedTektonCRD parses output of an external ko invocation provided as // input, as is the kind of Tekton CRD to search for (ie. taskrun) -func GetCreatedTektonCrd(input []byte, kind string) (string, error) { +func getCreatedTektonCRD(input []byte, kind string) (string, error) { re := regexp.MustCompile(kind + `.tekton.dev\/(.+) created`) submatch := re.FindSubmatch(input) if submatch == nil || len(submatch) < 2 { @@ -54,60 +53,52 @@ func GetCreatedTektonCrd(input []byte, kind string) (string, error) { } func waitValidatePipelineRunDone(t *testing.T, c *clients, pipelineRunName string) { - err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Succeed(pipelineRunName), pipelineRunName) - - if err != nil { + if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Succeed(pipelineRunName), pipelineRunName); err != nil { t.Fatalf("Failed waiting for pipeline run done: %v", err) } - return } func waitValidateTaskRunDone(t *testing.T, c *clients, taskRunName string) { // Per test basis - err := WaitForTaskRunState(c, taskRunName, Succeed(taskRunName), taskRunName) - - if err != nil { + if err := WaitForTaskRunState(c, taskRunName, Succeed(taskRunName), taskRunName); err != nil { t.Fatalf("Failed waiting for task run done: %v", err) } - return } -// SubstituteEnv substitutes docker repos and bucket paths from the system -// environment for input to allow tests on local clusters. It also updates the -// namespace for ServiceAccounts so that they work under test -func SubstituteEnv(input []byte, namespace string) ([]byte, error) { +// substituteEnv substitutes docker repos and bucket paths from the system +// environment for input to allow tests on local clusters. It also unsets the +// namespace for ServiceAccounts so that they work under test. +func substituteEnv(input []byte, namespace string) ([]byte, error) { + // Replace the placeholder image repo with the value of the + // KO_DOCKER_REPO env var. val, ok := os.LookupEnv("KO_DOCKER_REPO") - var output []byte - if ok { - re := regexp.MustCompile(DEFAULT_KO_DOCKER_REPO) - output = re.ReplaceAll(input, []byte(val)) - } else { + if !ok { return nil, errors.New("KO_DOCKER_REPO is not set") } + output := defaultKoDockerRepoRE.ReplaceAll(input, []byte(val)) + + // Strip any "namespace: default"s, all resources will be created in + // the test namespace using `ko create -n` + output = defaultNamespaceRE.ReplaceAll(output, []byte("namespace: "+namespace)) - re := regexp.MustCompile(DEFAULT_NAMESPACE) - output = re.ReplaceAll(output, []byte(strings.ReplaceAll(DEFAULT_NAMESPACE, "default", namespace))) return output, nil } -// KoCreate wraps the ko binary and invokes `ko create` for input within +// koCreate wraps the ko binary and invokes `ko create` for input within // namespace -func KoCreate(input []byte, namespace string) ([]byte, error) { +func koCreate(input []byte, namespace string) ([]byte, error) { cmd := exec.Command("ko", "create", "-n", namespace, "-f", "-") - cmd.Stdin = strings.NewReader(string(input)) - - out, err := cmd.CombinedOutput() - return out, err + cmd.Stdin = bytes.NewReader(input) + return cmd.CombinedOutput() } -// DeleteClusterTask removes a single clustertask by name using provided -// clientset. Test state is used for logging. DeleteClusterTask does not wait +// deleteClusterTask removes a single clustertask by name using provided +// clientset. Test state is used for logging. deleteClusterTask does not wait // for the clustertask to be deleted, so it is still possible to have name // conflicts during test -func DeleteClusterTask(t *testing.T, c *clients, name string) { +func deleteClusterTask(t *testing.T, c *clients, name string) { t.Logf("Deleting clustertask %s", name) - err := c.ClusterTaskClient.Delete(name, &metav1.DeleteOptions{}) - if err != nil { + if err := c.ClusterTaskClient.Delete(name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete clustertask: %v", err) } } @@ -126,23 +117,22 @@ func exampleTest(path string, waitValidateFunc waitFunc, kind string) func(t *te defer tearDown(t, c, namespace) inputExample, err := ioutil.ReadFile(path) - if err != nil { t.Fatalf("Error reading file: %v", err) } - subbedInput, err := SubstituteEnv(inputExample, namespace) + subbedInput, err := substituteEnv(inputExample, namespace) if err != nil { t.Skipf("Couldn't substitute environment: %v", err) } - out, err := KoCreate(subbedInput, namespace) + out, err := koCreate(subbedInput, namespace) if err != nil { t.Fatalf("%s Output: %s", err, out) } - // Parse from KoCreate for now - name, err := GetCreatedTektonCrd(out, kind) + // Parse from koCreate for now + name, err := getCreatedTektonCRD(out, kind) if name == "" { // Nothing to check from ko create, this is not a taskrun or pipeline // run. Some examples in the directory do not directly output a TaskRun @@ -154,10 +144,10 @@ func exampleTest(path string, waitValidateFunc waitFunc, kind string) func(t *te // NOTE: If an example creates more than one clustertask, they will not all // be cleaned up - clustertask, err := GetCreatedTektonCrd(out, "clustertask") + clustertask, err := getCreatedTektonCRD(out, "clustertask") if clustertask != "" { - knativetest.CleanupOnInterrupt(func() { DeleteClusterTask(t, c, clustertask) }, t.Logf) - defer DeleteClusterTask(t, c, clustertask) + knativetest.CleanupOnInterrupt(func() { deleteClusterTask(t, c, clustertask) }, t.Logf) + defer deleteClusterTask(t, c, clustertask) } else if err != nil { t.Fatalf("Failed to get created clustertask: %v", err) } diff --git a/test/git_checkout_test.go b/test/git_checkout_test.go index f57c4f238b8..ac160446039 100644 --- a/test/git_checkout_test.go +++ b/test/git_checkout_test.go @@ -38,6 +38,8 @@ const ( // is either fetched or pulled successfully under different resource // parameters. func TestGitPipelineRun(t *testing.T) { + skipIfExcluded(t) + for _, tc := range []struct { name string repo string diff --git a/test/multiarch_utils.go b/test/multiarch_utils.go index c063a23e2df..67eeab25b64 100644 --- a/test/multiarch_utils.go +++ b/test/multiarch_utils.go @@ -106,7 +106,6 @@ func initExcludedTests() sets.String { "TestTaskRunPipelineRunCancel", "TestEntrypointRunningStepsInOrder", "TestGitPipelineRun", - "TestGitPipelineRunFail", "TestHelmDeployPipelineRun", "TestKanikoTaskRun", "TestPipelineRun", diff --git a/test/v1alpha1/artifact_bucket_test.go b/test/v1alpha1/artifact_bucket_test.go index 28d4ec62266..30a8661dbf5 100644 --- a/test/v1alpha1/artifact_bucket_test.go +++ b/test/v1alpha1/artifact_bucket_test.go @@ -72,7 +72,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { SecretName: bucketSecretName, }, })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.Step("gcr.io/google.com/cloudsdktool/cloud-sdk:alpine", tb.StepName("step1"), tb.StepCommand("/bin/bash"), tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)), tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), @@ -238,7 +238,7 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck SecretName: bucketSecretName, }, })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.Step("gcr.io/google.com/cloudsdktool/cloud-sdk:alpine", tb.StepName("step1"), tb.StepCommand("/bin/bash"), tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)), tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), diff --git a/test/v1alpha1/cancel_test.go b/test/v1alpha1/cancel_test.go index bccd3ebbc65..467c716a94f 100644 --- a/test/v1alpha1/cancel_test.go +++ b/test/v1alpha1/cancel_test.go @@ -48,7 +48,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { pipelineRunName := "cancel-me" pipelineRun := &v1alpha1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, Spec: v1alpha1.PipelineRunSpec{ PipelineSpec: &v1alpha1.PipelineSpec{ Tasks: []v1alpha1.PipelineTask{{ @@ -83,10 +83,8 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } var wg sync.WaitGroup - var trName []string t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", pipelineRunName, namespace) for _, taskrunItem := range taskrunList.Items { - trName = append(trName, taskrunItem.Name) wg.Add(1) go func(name string) { defer wg.Done() @@ -134,6 +132,14 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } wg.Wait() + var trName []string + taskrunList, err = c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) + } + for _, taskrunItem := range taskrunList.Items { + trName = append(trName, taskrunItem.Name) + } matchKinds := map[string][]string{"PipelineRun": {pipelineRunName}, "TaskRun": trName} // Expected failure events: 1 for the pipelinerun cancel, 1 for each TaskRun expectedNumberOfEvents := 1 + len(trName) diff --git a/test/v1alpha1/workspace_test.go b/test/v1alpha1/workspace_test.go index a12b12ae8fb..11498effd7f 100644 --- a/test/v1alpha1/workspace_test.go +++ b/test/v1alpha1/workspace_test.go @@ -161,7 +161,7 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { t.Fatalf("Failed to create PipelineRun: %s", err) } - if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline expects workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) } diff --git a/test/workspace_test.go b/test/workspace_test.go index 08f4e9c914f..9efd83df65c 100644 --- a/test/workspace_test.go +++ b/test/workspace_test.go @@ -231,7 +231,89 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { t.Fatalf("Failed to create PipelineRun: %s", err) } - if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline expects workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) } } + +func TestWorkspaceVolumeNameMatchesVolumeVariableReplacement(t *testing.T) { + c, namespace := setup(t) + + taskName := "foo-task" + taskRunName := "foo-taskrun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "foo", + Image: "alpine", + Command: []string{"echo"}, + Args: []string{"$(workspaces.test.volume)"}, + }}}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test/file", + ReadOnly: true, + }}, + }, + } + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: taskName}, + ServiceAccountName: "default", + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "test", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + } + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) + if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "success"); err != nil { + t.Errorf("Error waiting for TaskRun to finish with error: %s", err) + } + + tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + if tr.Status.PodName == "" { + t.Fatal("Error getting a PodName (empty)") + } + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + + workspaceVariableValue := "" + for _, container := range p.Spec.Containers { + if container.Name == "step-foo" { + argsLen := len(container.Args) + workspaceVariableValue = container.Args[argsLen-1] + break + } + } + + volumeNames := []string{} + for _, volume := range p.Spec.Volumes { + if volume.Name == workspaceVariableValue { + return + } + volumeNames = append(volumeNames, volume.Name) + } + t.Fatalf("Workspace volume variable %q does not match any volume name in Pod volumes list %#v", workspaceVariableValue, volumeNames) +}