diff --git a/Concepts.md b/Concepts.md index 19672df75f1..7324609e111 100644 --- a/Concepts.md +++ b/Concepts.md @@ -1,7 +1,7 @@ # Pipeline CRDs Pipeline CRDs is an open source implementation to configure and run CI/CD style pipelines for your kubernetes application. -Pipeline CRDs creates [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) as building blocks to declare pipelines. +Pipeline CRDs creates [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) as building blocks to declare pipelines. A custom resource is an extension of Kubernetes API which can create a custom [Kubernetest Object](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#understanding-kubernetes-objects). Once a custom resource is installed, users can create and access its objects with kubectl, just as they do for built-in resources like pods, deployments etc. @@ -20,7 +20,9 @@ A task will run inside a container on your cluster. A Task declares, 1. Outputs the task will produce. 1. Sequence of steps to execute. - Each step defines an container image. This image is of type [Builder Image](https://github.com/knative/docs/blob/master/build/builder-contract.md). A Builder Image is an image whose entrypoint is a tool that performs some action and exits with a zero status on success. These entrypoints are often command-line tools, for example, git, docker, mvn, and so on. + Each step defines an container image. This image is of type [Builder Image](https://github.com/knative/docs/blob/master/build/builder-contract.md). A Builder Image is an image whose `command` performs some action and exits with a zero status on success. + + NOTE: Currently to get the logs out of a Builder Image, entrypoint overrides are used. This means that each step in `steps:` must have a container with a `command:` specified. Here is an example simple Task definition which echoes "hello world". The `hello-world` task does not define any inputs or outputs. @@ -37,8 +39,9 @@ spec: steps: - name: echo image: busybox - args: + command: - echo + args: - "hello world!" ``` Examples of `Task` definitions with inputs and outputs are [here](./examples) diff --git a/config/200-clusterrole.yaml b/config/200-clusterrole.yaml index f7a22759d9c..e3b6dcae0ea 100644 --- a/config/200-clusterrole.yaml +++ b/config/200-clusterrole.yaml @@ -4,7 +4,7 @@ metadata: name: knative-build-pipeline-admin rules: - apiGroups: [""] - resources: ["pods", "namespaces", "secrets", "events", "serviceaccounts", "configmaps"] + resources: ["pods", "namespaces", "secrets", "events", "serviceaccounts", "configmaps", "persistentvolumeclaims"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["extensions"] resources: ["deployments"] @@ -20,4 +20,4 @@ rules: verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["build.knative.dev"] resources: ["builds", "buildtemplates", "clusterbuildtemplates"] - verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] \ No newline at end of file + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] diff --git a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go index 83f04a6ae7a..7b20bb01e47 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go @@ -93,23 +93,23 @@ func TestReconcile(t *testing.T) { Tasks: ts, PipelineParams: pp, } - c, _, client := test.GetPipelineRunController(d) + c, _, clients := test.GetPipelineRunController(d) err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-success") if err != nil { t.Errorf("Did not expect to see error when reconciling valid Pipeline but saw %s", err) } - if len(client.Actions()) == 0 { + if len(clients.Pipeline.Actions()) == 0 { t.Fatalf("Expected client to have been used to create a TaskRun but it wasn't") } // Check that the PipelineRun was reconciled correctly - reconciledRun, err := client.Pipeline().PipelineRuns("foo").Get("test-pipeline-run-success", metav1.GetOptions{}) + reconciledRun, err := clients.Pipeline.Pipeline().PipelineRuns("foo").Get("test-pipeline-run-success", metav1.GetOptions{}) if err != nil { t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) } // Check that the expected TaskRun was created - actual := client.Actions()[0].(ktesting.CreateAction).GetObject() + actual := clients.Pipeline.Actions()[0].(ktesting.CreateAction).GetObject() trueB := true expectedTaskRun := &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/entrypoint.go b/pkg/reconciler/v1alpha1/taskrun/resources/entrypoint.go new file mode 100644 index 00000000000..acdbf9222c4 --- /dev/null +++ b/pkg/reconciler/v1alpha1/taskrun/resources/entrypoint.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // MountName is the name of the pvc being mounted (which + // will contain the entrypoint binary and eventually the logs) + MountName = "tools" + + mountPoint = "/tools" + entrypointBin = mountPoint + "/entrypoint" + entrypointJSONConfigEnvVar = "ENTRYPOINT_OPTIONS" + EntrypointImage = "gcr.io/k8s-prow/entrypoint@sha256:7c7cd8906ce4982ffee326218e9fc75da2d4896d53cabc9833b9cc8d2d6b2b8f" +) + +var toolsMount = corev1.VolumeMount{ + Name: MountName, + MountPath: mountPoint, +} + +// GetCopyStep will return a Build Step (Container) that will +// copy the entrypoint binary from the entrypoint image into the +// volume mounted at mountPoint, so that it can be mounted by +// subsequent steps and used to capture logs. +func GetCopyStep() corev1.Container { + return corev1.Container{ + Name: "place-tools", + Image: EntrypointImage, + Command: []string{"/bin/cp"}, + Args: []string{"/entrypoint", entrypointBin}, + VolumeMounts: []corev1.VolumeMount{toolsMount}, + } +} + +type entrypointArgs struct { + Args []string `json:"args"` + ProcessLog string `json:"process_log"` + MarkerFile string `json:"marker_file"` +} + +func getEnvVar(cmd, args []string) (string, error) { + entrypointArgs := entrypointArgs{ + Args: append(cmd, args...), + ProcessLog: "/tools/process-log.txt", + MarkerFile: "/tools/marker-file.txt", + } + j, err := json.Marshal(entrypointArgs) + if err != nil { + return "", fmt.Errorf("couldn't marshal arguments %q for entrypoint env var: %s", entrypointArgs, err) + } + return string(j), nil +} + +// TODO: add more test cases after all, e.g. with existing env +// var and volume mounts + +// AddEntrypoint will modify each of the steps/containers such that +// the binary being run is no longer the one specified by the Command +// and the Args, but is instead the entrypoint binary, which will +// itself invoke the Command and Args, but also capture logs. +// TODO: This will not work when a step uses an image that has its +// own entrypoint, i.e. `Command` is a required field. In later iterations +// we can update the controller to inspect the image's `Entrypoint` +// and use that if required. +func AddEntrypoint(steps []corev1.Container) error { + for i := range steps { + step := &steps[i] + e, err := getEnvVar(step.Command, step.Args) + if err != nil { + return fmt.Errorf("couldn't get env var for entrypoint: %s", err) + } + step.Command = []string{entrypointBin} + step.Args = []string{} + + step.Env = append(step.Env, corev1.EnvVar{ + Name: entrypointJSONConfigEnvVar, + Value: e, + }) + step.VolumeMounts = append(step.VolumeMounts, toolsMount) + } + return nil +} diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun.go b/pkg/reconciler/v1alpha1/taskrun/taskrun.go index 46a09030378..bfadcbbfbf4 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun.go @@ -33,6 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" @@ -55,6 +56,8 @@ const ( // taskRunControllerName defines name for TaskRun Controller taskRunControllerName = "TaskRun" taskRunNameLabelKey = "taskrun.knative.dev/taskName" + + pvcSizeBytes = 5 * 1024 * 1024 * 1024 // 5 GBs ) var ( @@ -102,13 +105,6 @@ func NewController( UpdateFunc: controller.PassNew(impl.Enqueue), }) - // TODO(aaron-prindle) what to do if a task is deleted? - // taskInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - // AddFunc: impl.Enqueue, - // UpdateFunc: controller.PassNew(impl.Enqueue), - // DeleteFunc: impl.Enqueue, - // }) - c.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease()) buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.tracker.OnChanged, @@ -166,8 +162,20 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error // get build the same as the taskrun, this is the value we use for 1:1 mapping and retrieval build, err := c.BuildClientSet.BuildV1alpha1().Builds(tr.Namespace).Get(tr.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { + pvc, err := c.KubeClientSet.CoreV1().PersistentVolumeClaims(tr.Namespace).Get(tr.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // Create a persistent volume claim to hold Build logs + pvc, err = c.createPVC(tr) + if err != nil { + return fmt.Errorf("Failed to create persistent volume claim %s for task %q: %v", tr.Name, err, tr.Name) + } + } else if err != nil { + c.Logger.Errorf("Failed to reconcile taskrun: %q, failed to get pvc %q; %v", tr.Name, tr.Name, err) + return err + } + // Build is not present, create build - build, err = c.createBuild(tr) + build, err = c.createBuild(tr, pvc.Name) if err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it tr.Status.SetCondition(&duckv1alpha1.Condition{ @@ -224,8 +232,40 @@ func (c *Reconciler) updateStatus(taskrun *v1alpha1.TaskRun) (*v1alpha1.TaskRun, return newtaskrun, nil } -// createBuild creates a build from the task, using the task's buildspec. -func (c *Reconciler) createBuild(tr *v1alpha1.TaskRun) (*buildv1alpha1.Build, error) { +// createVolume will create a persistent volume mount for tr which +// will be used to gather logs using the entrypoint wrapper +func (c *Reconciler) createPVC(tr *v1alpha1.TaskRun) (*corev1.PersistentVolumeClaim, error) { + v, err := c.KubeClientSet.CoreV1().PersistentVolumeClaims(tr.Namespace).Create( + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tr.Namespace, + // This pvc is specific to this TaskRun, so we'll use the same name + Name: tr.Name, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tr, groupVersionKind), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), + }, + }, + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %s", tr.Name, err) + } + return v, nil +} + +// createBuild creates a build from the task, using the task's buildspec +// with pvcName as a volumeMount +func (c *Reconciler) createBuild(tr *v1alpha1.TaskRun, pvcName string) (*buildv1alpha1.Build, error) { // Get related task for taskrun t, err := c.taskLister.Tasks(tr.Namespace).Get(tr.Spec.TaskRef.Name) if err != nil { @@ -237,6 +277,28 @@ func (c *Reconciler) createBuild(tr *v1alpha1.TaskRun) (*buildv1alpha1.Build, er return nil, fmt.Errorf("task %s has nil BuildSpec", t.Name) } + bSpec := t.Spec.BuildSpec.DeepCopy() + bSpec.Volumes = append(bSpec.Volumes, corev1.Volume{ + Name: resources.MountName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }) + + // Override the entrypoint so that we can use our custom + // entrypoint which copies logs + err = resources.AddEntrypoint(bSpec.Steps) + if err != nil { + return nil, fmt.Errorf("Failed to add entrypoint to steps of Build: %s", err) + } + + // Add the step which will copy the entrypoint into the volume + // we are going to be using, so that all of the steps will have + // access to it. + bSpec.Steps = append([]corev1.Container{resources.GetCopyStep()}, bSpec.Steps...) + b := &buildv1alpha1.Build{ ObjectMeta: metav1.ObjectMeta{ Name: tr.Name, @@ -247,7 +309,7 @@ func (c *Reconciler) createBuild(tr *v1alpha1.TaskRun) (*buildv1alpha1.Build, er // Attach new label and pass taskrun labels to build Labels: makeLabels(tr), }, - Spec: *t.Spec.BuildSpec, + Spec: *bSpec, } // Pass service account name from taskrun to build // if task specifies service account name override with taskrun SA diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go index 2f66f3fa211..4d263988f2b 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go @@ -24,15 +24,69 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/taskrun" + "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/taskrun/resources" "github.com/knative/build-pipeline/test" buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" ) +var ( + groupVersionKind = schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Version: v1alpha1.SchemeGroupVersion.Version, + Kind: "TaskRun", + } +) + +const ( + entrypointLocation = "/tools/entrypoint" + toolsMountName = "tools" + pvcSizeBytes = 5 * 1024 * 1024 * 1024 // 5 GBs +) + +var toolsMount = corev1.VolumeMount{ + Name: toolsMountName, + MountPath: "/tools", +} + +var entrypointCopyStep = corev1.Container{ + Name: "place-tools", + Image: resources.EntrypointImage, + Command: []string{"/bin/cp"}, + Args: []string{"/entrypoint", entrypointLocation}, + VolumeMounts: []corev1.VolumeMount{toolsMount}, +} + +func getExpectedPVC(tr *v1alpha1.TaskRun) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tr.Namespace, + // This pvc is specific to this TaskRun, so we'll use the same name + Name: tr.Name, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(tr, groupVersionKind), + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), + }, + }, + }, + } +} + var simpleTask = &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ Name: "test-task", @@ -42,8 +96,27 @@ var simpleTask = &v1alpha1.Task{ BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{ { - Name: "simple-step", - Image: "foo", + Name: "simple-step", + Image: "foo", + Command: []string{"/mycmd"}, + }, + }, + }, + }, +} + +var saTask = &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-with-sa", + Namespace: "foo", + }, + Spec: v1alpha1.TaskSpec{ + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{ + { + Name: "sa-step", + Image: "foo", + Command: []string{"/mycmd"}, }, }, }, @@ -59,9 +132,10 @@ var templatedTask = &v1alpha1.Task{ BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{ { - Name: "mycontainer", - Image: "myimage", - Args: []string{"--my-arg=${inputs.params.myarg}"}, + Name: "mycontainer", + Image: "myimage", + Command: []string{"/mycmd"}, + Args: []string{"--my-arg=${inputs.params.myarg}"}, }, { Name: "myothercontainer", @@ -89,6 +163,17 @@ var gitResource = &v1alpha1.PipelineResource{ }, } +func getToolsVolume(claimName string) corev1.Volume { + return corev1.Volume{ + Name: toolsMountName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, + }, + } +} + func getRunName(tr *v1alpha1.TaskRun) string { return strings.Join([]string{tr.Namespace, tr.Name}, "/") } @@ -115,7 +200,7 @@ func TestReconcile(t *testing.T) { Spec: v1alpha1.TaskRunSpec{ ServiceAccount: "test-sa", TaskRef: v1alpha1.TaskRef{ - Name: "test-task", + Name: "test-with-sa", APIVersion: "a1", }, }, @@ -153,7 +238,7 @@ func TestReconcile(t *testing.T) { d := test.Data{ TaskRuns: taskruns, - Tasks: []*v1alpha1.Task{simpleTask, templatedTask}, + Tasks: []*v1alpha1.Task{simpleTask, saTask, templatedTask}, PipelineResources: []*v1alpha1.PipelineResource{gitResource}, } testcases := []struct { @@ -166,11 +251,24 @@ func TestReconcile(t *testing.T) { taskRun: taskruns[0], wantedBuildSpec: buildv1alpha1.BuildSpec{ Steps: []corev1.Container{ + entrypointCopyStep, { - Name: "simple-step", - Image: "foo", + Name: "simple-step", + Image: "foo", + Command: []string{entrypointLocation}, + Args: []string{}, + Env: []corev1.EnvVar{ + { + Name: "ENTRYPOINT_OPTIONS", + Value: `{"args":["/mycmd"],"process_log":"/tools/process-log.txt","marker_file":"/tools/marker-file.txt"}`, + }, + }, + VolumeMounts: []corev1.VolumeMount{toolsMount}, }, }, + Volumes: []corev1.Volume{ + getToolsVolume("test-taskrun-run-success"), + }, }, }, { @@ -179,11 +277,24 @@ func TestReconcile(t *testing.T) { wantedBuildSpec: buildv1alpha1.BuildSpec{ ServiceAccountName: "test-sa", Steps: []corev1.Container{ + entrypointCopyStep, { - Name: "simple-step", - Image: "foo", + Name: "sa-step", + Image: "foo", + Command: []string{entrypointLocation}, + Args: []string{}, + Env: []corev1.EnvVar{ + { + Name: "ENTRYPOINT_OPTIONS", + Value: `{"args":["/mycmd"],"process_log":"/tools/process-log.txt","marker_file":"/tools/marker-file.txt"}`, + }, + }, + VolumeMounts: []corev1.VolumeMount{toolsMount}, }, }, + Volumes: []corev1.Volume{ + getToolsVolume("test-taskrun-with-sa-run-success"), + }, }, }, { @@ -191,17 +302,37 @@ func TestReconcile(t *testing.T) { taskRun: taskruns[2], wantedBuildSpec: buildv1alpha1.BuildSpec{ Steps: []corev1.Container{ + entrypointCopyStep, { - Name: "mycontainer", - Image: "myimage", - Args: []string{"--my-arg=foo"}, + Name: "mycontainer", + Image: "myimage", + Command: []string{entrypointLocation}, + Args: []string{}, + Env: []corev1.EnvVar{ + { + Name: "ENTRYPOINT_OPTIONS", + Value: `{"args":["/mycmd","--my-arg=foo"],"process_log":"/tools/process-log.txt","marker_file":"/tools/marker-file.txt"}`, + }, + }, + VolumeMounts: []corev1.VolumeMount{toolsMount}, }, { - Name: "myothercontainer", - Image: "myotherimage", - Args: []string{"--my-other-arg=https://foo.git"}, + Name: "myothercontainer", + Image: "myotherimage", + Command: []string{entrypointLocation}, + Args: []string{}, + Env: []corev1.EnvVar{ + { + Name: "ENTRYPOINT_OPTIONS", + Value: `{"args":["--my-other-arg=https://foo.git"],"process_log":"/tools/process-log.txt","marker_file":"/tools/marker-file.txt"}`, + }, + }, + VolumeMounts: []corev1.VolumeMount{toolsMount}, }, }, + Volumes: []corev1.Volume{ + getToolsVolume("test-taskrun-templating"), + }, }, }, } @@ -232,6 +363,41 @@ func TestReconcile(t *testing.T) { if condition != nil && condition.Reason != taskrun.ReasonRunning { t.Errorf("Expected reason %q but was %s", taskrun.ReasonRunning, condition.Reason) } + + namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name) + if err != nil { + t.Errorf("Invalid resource key: %v", err) + } + //Command, Args, Env, VolumeMounts + if len(clients.Kube.Actions()) == 0 { + t.Fatalf("Expected actions to be logged in the kubeclient, got none") + } + // 3. check that volume was created + pvc, err := clients.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Failed to fetch build: %v", err) + } + + // get related TaskRun to populate expected PVC + tr, err := clients.Pipeline.PipelineV1alpha1().TaskRuns(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Failed to fetch build: %v", err) + } + expectedVolume := getExpectedPVC(tr) + if d := cmp.Diff(pvc.Name, expectedVolume.Name); d != "" { + t.Errorf("pvc doesn't match, diff: %s", d) + } + if d := cmp.Diff(pvc.OwnerReferences, expectedVolume.OwnerReferences); d != "" { + t.Errorf("pvc doesn't match, diff: %s", d) + } + if d := cmp.Diff(pvc.Spec.AccessModes, expectedVolume.Spec.AccessModes); d != "" { + t.Errorf("pvc doesn't match, diff: %s", d) + } + if pvc.Spec.Resources.Requests["storage"] != expectedVolume.Spec.Resources.Requests["storage"] { + t.Errorf("pvc doesn't match, got: %v, expected: %v", + pvc.Spec.Resources.Requests["storage"], + expectedVolume.Spec.Resources.Requests["storage"]) + } }) } } diff --git a/test/controller.go b/test/controller.go index e221ded6e29..8b5825e169f 100644 --- a/test/controller.go +++ b/test/controller.go @@ -57,6 +57,7 @@ type Data struct { type Clients struct { Pipeline *fakepipelineclientset.Clientset Build *fakebuildclientset.Clientset + Kube *fakekubeclientset.Clientset } // Informers holds references to informers which are useful for reconciler tests. @@ -95,6 +96,7 @@ func seedTestData(d Data) (Clients, Informers) { c := Clients{ Pipeline: fakepipelineclientset.NewSimpleClientset(objs...), Build: fakebuildclientset.NewSimpleClientset(buildObjs...), + Kube: fakekubeclientset.NewSimpleClientset(), } sharedInformer := informers.NewSharedInformerFactory(c.Pipeline, 0) buildInformerFactory := buildinformers.NewSharedInformerFactory(c.Build, 0) @@ -141,7 +143,7 @@ func GetTaskRunController(d Data) (*controller.Impl, *observer.ObservedLogs, Cli return taskrun.NewController( reconciler.Options{ Logger: zap.New(observer).Sugar(), - KubeClientSet: fakekubeclientset.NewSimpleClientset(), + KubeClientSet: c.Kube, PipelineClientSet: c.Pipeline, BuildClientSet: c.Build, }, @@ -154,13 +156,13 @@ func GetTaskRunController(d Data) (*controller.Impl, *observer.ObservedLogs, Cli // GetPipelineRunController returns an instance of the PipelineRun controller/reconciler that has been seeded with // d, where d represents the state of the system (existing resources) needed for the test. -func GetPipelineRunController(d Data) (*controller.Impl, *observer.ObservedLogs, *fakepipelineclientset.Clientset) { +func GetPipelineRunController(d Data) (*controller.Impl, *observer.ObservedLogs, Clients) { c, i := seedTestData(d) observer, logs := observer.New(zap.InfoLevel) return pipelinerun.NewController( reconciler.Options{ Logger: zap.New(observer).Sugar(), - KubeClientSet: fakekubeclientset.NewSimpleClientset(), + KubeClientSet: c.Kube, PipelineClientSet: c.Pipeline, }, i.PipelineRun, @@ -168,5 +170,5 @@ func GetPipelineRunController(d Data) (*controller.Impl, *observer.ObservedLogs, i.Task, i.TaskRun, i.PipelineParams, - ), logs, c.Pipeline + ), logs, c } diff --git a/test/crd.go b/test/crd.go index d7752140b6c..79ddf39091b 100644 --- a/test/crd.go +++ b/test/crd.go @@ -28,14 +28,12 @@ import ( buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" ) const ( - hwVolumeName = "scratch" hwTaskName = "helloworld" hwTaskRunName = "helloworld-run" hwValidationPodName = "helloworld-validation-busybox" @@ -47,8 +45,8 @@ const ( hwSecret = "helloworld-secret" hwSA = "helloworld-sa" - logPath = "/workspace" - logFile = "out.txt" + logPath = "/logs" + logFile = "process-log.txt" hwContainerName = "helloworld-busybox" taskOutput = "do you want to build a snowman" @@ -66,9 +64,10 @@ func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod { corev1.Container{ Name: hwValidationPodName, Image: "busybox", - Args: []string{ - "cat", fmt.Sprintf("%s/%s", logPath, logFile), + Command: []string{ + "cat", }, + Args: []string{fmt.Sprintf("%s/%s", logPath, logFile)}, VolumeMounts: []corev1.VolumeMount{ corev1.VolumeMount{ Name: "scratch", @@ -91,25 +90,6 @@ func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod { } } -func getHelloWorldVolumeClaim(namespace string) *corev1.PersistentVolumeClaim { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: hwVolumeName, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), - }, - }, - }, - } -} - func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task { return &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ @@ -120,9 +100,9 @@ func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task { BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{ corev1.Container{ - Name: hwContainerName, - Image: "busybox", - Args: args, + Name: hwContainerName, + Image: "busybox", + Command: args, }, }, }, @@ -130,27 +110,6 @@ func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task { } } -func getHelloWorldTaskWithVolume(namespace string, args []string) *v1alpha1.Task { - t := getHelloWorldTask(namespace, args) - t.Spec.BuildSpec.Steps[0].VolumeMounts = []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: "scratch", - MountPath: logPath, - }, - } - t.Spec.BuildSpec.Volumes = []corev1.Volume{ - corev1.Volume{ - Name: "scratch", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: hwVolumeName, - }, - }, - }, - } - return t -} - func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun { return &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ @@ -248,11 +207,12 @@ func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, // Create Validation Pod pods := c.KubeClient.Kube.CoreV1().Pods(namespace) - if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwVolumeName)); err != nil { - return "", fmt.Errorf("failed to create Volume `%s`: %s", hwVolumeName, err) + // Volume created for Task should have the same name as the Task + if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwTaskRunName)); err != nil { + return "", fmt.Errorf("failed to create Validation pod to mount volume `%s`: %s", hwTaskRunName, err) } - logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwVolumeName) + logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwTaskRunName) if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) { // the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly // there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation diff --git a/test/helm_task_test.go b/test/helm_task_test.go index fb6d5b8260e..b5b1cc7fbb9 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -29,6 +29,7 @@ import ( "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" ) @@ -192,8 +193,9 @@ func getCreateImageTask(namespace string, t *testing.T) *v1alpha1.Task { }, BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{{ - Name: "kaniko", - Image: "gcr.io/kaniko-project/executor", + Name: "kaniko", + Image: "gcr.io/kaniko-project/executor", + Command: []string{"/kaniko/executor"}, Args: []string{"--dockerfile=/workspace/test/gohelloworld/Dockerfile", fmt.Sprintf("--destination=%s", imageName), }, @@ -227,12 +229,14 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { }, BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{{ - Name: "helm-init", - Image: "alpine/helm", - Args: []string{"init", "--wait"}, + Name: "helm-init", + Image: "alpine/helm", + Command: []string{"helm"}, + Args: []string{"init", "--wait"}, }, { - Name: "helm-deploy", - Image: "alpine/helm", + Name: "helm-deploy", + Image: "alpine/helm", + Command: []string{"helm"}, Args: []string{"install", "--debug", "--name=${inputs.params.chartname}", @@ -323,7 +327,9 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string, logg logger.Infof("Creating tiller service account") if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts("kube-system").Create(tillerServiceAccount); err != nil { - t.Fatalf("Failed to create default Service account for Helm %s", err) + if !errors.IsAlreadyExists(err) { + t.Fatalf("Failed to create default Service account for Helm %s", err) + } } clusterRoleBindings[0] = &rbacv1.ClusterRoleBinding{ @@ -480,9 +486,10 @@ func removeHelmFromCluster(c *clients, t *testing.T, namespace string, logger *l Spec: v1alpha1.TaskSpec{ BuildSpec: &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{{ - Name: "helm-reset", - Image: "alpine/helm", - Args: []string{"reset", "--force"}, + Name: "helm-reset", + Image: "alpine/helm", + Command: []string{"helm"}, + Args: []string{"reset", "--force"}, }, }, }, diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 5f7b7edd8b6..9323d5cfa31 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -18,13 +18,14 @@ package test import ( "fmt" "io/ioutil" - "k8s.io/client-go/kubernetes" "os" "regexp" "strings" "testing" "time" + "k8s.io/client-go/kubernetes" + "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/remote" @@ -119,8 +120,9 @@ func getTask(repo, namespace string, withSecretConfig bool) *v1alpha1.Task { } step := corev1.Container{ - Name: "kaniko", - Image: "gcr.io/kaniko-project/executor", + Name: "kaniko", + Image: "gcr.io/kaniko-project/executor", + Command: []string{"/kaniko/executor"}, Args: []string{"--dockerfile=/workspace/Dockerfile", fmt.Sprintf("--destination=%s", repo), }, diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 3d2e0379bc7..f137fff0aad 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -125,7 +125,8 @@ func TestPipelineRun_WithServiceAccount(t *testing.T) { Name: "config-docker", Image: "gcr.io/cloud-builders/docker", // Private docker image for Build CRD testing - Args: []string{"pull", "gcr.io/build-crd-testing/secret-sauce"}, + Command: []string{"docker"}, + Args: []string{"pull", "gcr.io/build-crd-testing/secret-sauce"}, VolumeMounts: []corev1.VolumeMount{{ Name: "docker-socket", MountPath: "/var/run/docker.sock", diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 2e738183151..d7b178c08b4 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -37,13 +37,8 @@ func TestTaskRun(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) defer tearDown(logger, c.KubeClient, namespace) - logger.Infof("Creating volume %s to collect log output", hwVolumeName) - if _, err := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Create(getHelloWorldVolumeClaim(namespace)); err != nil { - t.Fatalf("Failed to create Volume `%s`: %s", hwTaskName, err) - } - logger.Infof("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(getHelloWorldTaskWithVolume(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil { + if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)})); err != nil { t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) } if _, err := c.TaskRunClient.Create(getHelloWorldTaskRun(namespace)); err != nil { @@ -65,10 +60,11 @@ func TestTaskRun(t *testing.T) { t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) } - logger.Infof("Verifying TaskRun %s output in volume %s", hwTaskRunName, hwVolumeName) + // The volume created with the results will have the same name as the TaskRun + logger.Infof("Verifying TaskRun %s output in volume %s", hwTaskRunName, hwTaskRunName) output, err := getBuildOutputFromVolume(logger, c, namespace, taskOutput) if err != nil { - t.Fatalf("Unable to get build output from volume %s: %s", hwVolumeName, err) + t.Fatalf("Unable to get build output from volume %s: %s", hwTaskRunName, err) } if !strings.Contains(output, taskOutput) { t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, output)