From a2e8ec519befbc60912ad54ec489390c03675769 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Wed, 17 Oct 2018 14:16:14 -0400 Subject: [PATCH 1/8] Add integration test to run a helm deploy pipeline - #63 - create a pipeline of two tasks, - first task build and push an image - second task helm deploys that image - verify the service created is reachable --- test/crd_checks.go | 2 +- test/helm_task_test.go | 310 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 test/helm_task_test.go diff --git a/test/crd_checks.go b/test/crd_checks.go index 6e8f271d728..5dc6784abad 100644 --- a/test/crd_checks.go +++ b/test/crd_checks.go @@ -30,7 +30,7 @@ import ( const ( interval = 1 * time.Second - timeout = 2 * time.Minute + timeout = 5 * time.Minute ) // WaitForTaskRunState polls the status of the TaskRun called name from client every diff --git a/test/helm_task_test.go b/test/helm_task_test.go new file mode 100644 index 00000000000..b0c4178b03c --- /dev/null +++ b/test/helm_task_test.go @@ -0,0 +1,310 @@ +// +build e2e + +/* +Copyright 2018 Knative Authors LLC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "net/http" + "os" + "testing" + + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + knativetest "github.com/knative/pkg/test" + "github.com/knative/pkg/test/logging" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" +) + +const ( + sourceResourceName = "go-helloworld-git" + sourceImageName = "go-helloworld-image" + createImageTaskName = "create-image-task" + helmDeployTaskName = "helm-deploy-task" + helmDeployPipelineName = "helm-deploy-pipeline" + helmDeployPipelineRunName = "helm-deploy-pipeline-run" + helmDeployServiceName = "gohelloworld-chart" +) + +var imageName string + +// TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image +// and then using helm to deploy it +func TestHelmDeployPipelineRun(t *testing.T) { + logger := logging.GetContextLogger(t.Name()) + c, namespace := setup(t, logger) + setupClusterBindingForHelm(c, t, namespace) + + knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) + defer tearDown(logger, c.KubeClient, namespace) + + logger.Infof("Creating Git PipelineResource %s", sourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGoHelloworldGitResource(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceResourceName, err) + } + + logger.Infof("Creating Task %s", createImageTaskName) + if _, err := c.TaskClient.Create(getCreateImageTask(namespace, t)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", createImageTaskName, err) + } + + logger.Infof("Creating Task %s", helmDeployTaskName) + if _, err := c.TaskClient.Create(getHelmDeployTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmDeployTaskName, err) + } + + logger.Infof("Creating Pipeline %s", helmDeployPipelineName) + if _, err := c.PipelineClient.Create(getelmDeployPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineName, err) + } + + logger.Infof("Creating PipelineRun %s", helmDeployPipelineRunName) + if _, err := c.PipelineRunClient.Create(getelmDeployPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineRunName, err) + } + + // Verify status of PipelineRun (wait for it) + if err := WaitForPipelineRunState(c, helmDeployPipelineRunName, func(pr *v1alpha1.PipelineRun) (bool, error) { + c := pr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %s failed!", helmDeployPipelineRunName) + } + } + return false, nil + }, "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) + } + + k8sService, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(helmDeployServiceName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting service at %s %s", helmDeployServiceName, err) + } + var serviceIp string + ingress := k8sService.Status.LoadBalancer.Ingress + if len(ingress) > 0 { + serviceIp = ingress[0].IP + } + + resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) + if err != nil { + t.Errorf("Error reaching service at http://%s:8080 %s", serviceIp, err) + } + if resp != nil && resp.StatusCode != http.StatusOK { + t.Errorf("Error from service at http://%s:8080 %s", serviceIp, err) + } +} + +func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { + return &v1alpha1.PipelineResource{ + ObjectMeta: metav1.ObjectMeta{ + Name: sourceResourceName, + Namespace: namespace, + }, + Spec: v1alpha1.PipelineResourceSpec{ + Type: v1alpha1.PipelineResourceTypeGit, + Params: []v1alpha1.Param{ + v1alpha1.Param{ + Name: "Url", + Value: "https://github.com/pivotal-nader-ziada/gohelloworld", + }, + }, + }, + } +} + +func getCreateImageTask(namespace string, t *testing.T) *v1alpha1.Task { + // according to knative/test-infra readme (https://github.com/knative/test-infra/blob/13055d769cc5e1756e605fcb3bcc1c25376699f1/scripts/README.md) + // the KO_DOCKER_REPO will be set with according to the porject where the cluster is created + // it is used here to dunamically get the docker registery to push the image to + dockerRepo := os.Getenv("KO_DOCKER_REPO") + if dockerRepo == "" { + t.Fatalf("KO_DOCKER_REPO env variable is required") + } + + imageName = fmt.Sprintf("%s/%s", dockerRepo, sourceImageName) + + return &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: createImageTaskName, + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: sourceResourceName, + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + }, + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "kaniko", + Image: "gcr.io/kaniko-project/executor", + Args: []string{"--dockerfile=/workspace/Dockerfile", + fmt.Sprintf("--destination=%s", imageName), + }, + }}, + }, + }, + } +} + +func getHelmDeployTask(namespace string) *v1alpha1.Task { + return &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmDeployTaskName, + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: sourceResourceName, + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + Params: []v1alpha1.TaskParam{{ + Name: "pathToHelmCharts", + }, { + Name: "image", + }, { + Name: "chartname", + }}, + }, + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "helm-init", + Image: "alpine/helm", + Args: []string{"init"}, + }, + { + Name: "helm-cleanup", //for local clusters, clean up from previous runs + Image: "alpine/helm", + Command: []string{"/bin/sh", + "-c", + "helm ls --short --all | xargs -n1 helm del --purge", + }, + }, + { + Name: "helm-deploy", + Image: "alpine/helm", + Args: []string{"install", + "--debug", + "--name=${inputs.params.chartname}", + "${inputs.params.pathToHelmCharts}", + "--set", + "image.repository=${inputs.params.image}", + }, + }}, + }, + }, + } +} + +func getelmDeployPipeline(namespace string) *v1alpha1.Pipeline { + return &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmDeployPipelineName, + }, + Spec: v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{ + v1alpha1.PipelineTask{ + Name: "push-image", + TaskRef: v1alpha1.TaskRef{ + Name: createImageTaskName, + }, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-name", + Key: sourceResourceName, + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: sourceResourceName, + }, + }}, + }, + v1alpha1.PipelineTask{ + Name: "helm-deploy", + TaskRef: v1alpha1.TaskRef{ + Name: helmDeployTaskName, + }, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-other-name", + Key: sourceResourceName, + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: sourceResourceName, + }, + }}, + Params: []v1alpha1.Param{{ + Name: "pathToHelmCharts", + Value: "/workspace/gohelloworld-chart", + }, { + Name: "chartname", + Value: "gohelloworld", + }, { + Name: "image", + Value: imageName, + }}, + }, + }, + }, + } +} + +func getelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { + return &v1alpha1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmDeployPipelineRunName, + }, + Spec: v1alpha1.PipelineRunSpec{ + PipelineRef: v1alpha1.PipelineRef{ + Name: helmDeployPipelineName, + }, + PipelineTriggerRef: v1alpha1.PipelineTriggerRef{ + Type: v1alpha1.PipelineTriggerTypeManual, + }, + }, + } +} + +func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { + defaultClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: AppendRandomString("default-tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: namespace, + }}, + } + + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(defaultClusterRoleBinding); err != nil { + t.Fatalf("Failed to create default Service account for Helm in namespace: %s - %s", namespace, err) + } +} From c6807abec90217cc930fc280eea6a3e45313f1b2 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Wed, 17 Oct 2018 15:32:32 -0400 Subject: [PATCH 2/8] changes to helm task test to improve reliability and fix typos --- test/helm_task_test.go | 58 ++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/test/helm_task_test.go b/test/helm_task_test.go index b0c4178b03c..e729fe4c4a4 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -70,12 +70,12 @@ func TestHelmDeployPipelineRun(t *testing.T) { } logger.Infof("Creating Pipeline %s", helmDeployPipelineName) - if _, err := c.PipelineClient.Create(getelmDeployPipeline(namespace)); err != nil { + if _, err := c.PipelineClient.Create(getHelmDeployPipeline(namespace)); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineName, err) } logger.Infof("Creating PipelineRun %s", helmDeployPipelineRunName) - if _, err := c.PipelineRunClient.Create(getelmDeployPipelineRun(namespace)); err != nil { + if _, err := c.PipelineRunClient.Create(getHelmDeployPipelineRun(namespace)); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineRunName, err) } @@ -94,14 +94,17 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) } + var serviceIp string k8sService, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(helmDeployServiceName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting service at %s %s", helmDeployServiceName, err) } - var serviceIp string - ingress := k8sService.Status.LoadBalancer.Ingress - if len(ingress) > 0 { - serviceIp = ingress[0].IP + if k8sService != nil { + ingress := k8sService.Status.LoadBalancer.Ingress + if len(ingress) > 0 { + serviceIp = ingress[0].IP + t.Logf("Service IP is %s", serviceIp) + } } resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) @@ -140,7 +143,8 @@ func getCreateImageTask(namespace string, t *testing.T) *v1alpha1.Task { t.Fatalf("KO_DOCKER_REPO env variable is required") } - imageName = fmt.Sprintf("%s/%s", dockerRepo, sourceImageName) + imageName = fmt.Sprintf("%s/%s", dockerRepo, AppendRandomString(sourceImageName)) + t.Log("Image to be pusblished: %s", imageName) return &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ @@ -196,32 +200,30 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { Name: "helm-init", Image: "alpine/helm", Args: []string{"init"}, - }, - { - Name: "helm-cleanup", //for local clusters, clean up from previous runs - Image: "alpine/helm", - Command: []string{"/bin/sh", - "-c", - "helm ls --short --all | xargs -n1 helm del --purge", - }, + }, { + Name: "helm-cleanup", //for local clusters, clean up from previous runs + Image: "alpine/helm", + Command: []string{"/bin/sh", + "-c", + "helm ls --short --all | xargs -n1 helm del --purge", }, - { - Name: "helm-deploy", - Image: "alpine/helm", - Args: []string{"install", - "--debug", - "--name=${inputs.params.chartname}", - "${inputs.params.pathToHelmCharts}", - "--set", - "image.repository=${inputs.params.image}", - }, - }}, + }, { + Name: "helm-deploy", + Image: "alpine/helm", + Args: []string{"install", + "--debug", + "--name=${inputs.params.chartname}", + "${inputs.params.pathToHelmCharts}", + "--set", + "image.repository=${inputs.params.image}", + }, + }}, }, }, } } -func getelmDeployPipeline(namespace string) *v1alpha1.Pipeline { +func getHelmDeployPipeline(namespace string) *v1alpha1.Pipeline { return &v1alpha1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -270,7 +272,7 @@ func getelmDeployPipeline(namespace string) *v1alpha1.Pipeline { } } -func getelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { +func getHelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { return &v1alpha1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, From 505b6f1b98b4768b9b736bc56186161bfc32a5a5 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Thu, 18 Oct 2018 10:12:54 -0400 Subject: [PATCH 3/8] Add passedConstraint checking when creating taskrun - Before a pipeline run creates a taskrun for the next task, it needs to check the passed constraints to respect dependecnies between tasks - add implementation of canTaskRun and unit tests - add a task to delete tiller at the end of the helm task test --- .../resources/passedconstraint_test.go | 174 ++++++++++++++++++ .../pipelinerun/resources/pipelinestate.go | 28 ++- test/crd_checks.go | 18 ++ test/helm_task_test.go | 133 +++++++++++-- 4 files changed, 334 insertions(+), 19 deletions(-) create mode 100644 pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go new file mode 100644 index 00000000000..cd6ba52fb36 --- /dev/null +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var mytask1 = &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "mytask1", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: "myresource1", + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + }, + }, +} + +var mytask2 = &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "mytask2", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: "myresource1", + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + }, + }, +} + +var mypipelinetasks = []v1alpha1.PipelineTask{{ + Name: "mypipelinetask1", + TaskRef: v1alpha1.TaskRef{Name: "mytask1"}, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-name-1", + Key: "myresource1", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "myresource1", + }, + }}, +}, { + Name: "mypipelinetask2", + TaskRef: v1alpha1.TaskRef{Name: "mytask2"}, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-name-2", + Key: "myresource1", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "myresource1", + }, + PassedConstraints: []string{"mytask1"}, + }}, +}} + +var mytaskruns = []v1alpha1.TaskRun{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pipelinerun-mytask1", + }, + Spec: v1alpha1.TaskRunSpec{}, +}, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pipelinerun-mytask2", + }, + Spec: v1alpha1.TaskRunSpec{}, +}} + +func TestCanTaskRun(t *testing.T) { + tcs := []struct { + name string + state []*PipelineRunTaskRun + canSecondTaskRun bool + }{ + { + name: "first-task-not-started", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: nil, + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-running", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeStarted(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-failed", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeFailed(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-finished", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeSucceeded(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: true, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + cantaskrun := canTaskRun(&mypipelinetasks[1], tc.state) + if d := cmp.Diff(cantaskrun, tc.canSecondTaskRun); d != "" { + t.Fatalf("Expected second task availability to run should be %t, but different state returned: %s", tc.canSecondTaskRun, d) + } + }) + } +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go index 107a8330c92..97ba67ce3e2 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go @@ -45,7 +45,7 @@ func GetNextTask(prName string, state []*PipelineRunTaskRun, logger *zap.Sugared logger.Infof("TaskRun %s is still running so we shouldn't start more for PipelineRun %s", prtr.TaskRunName, prName) return nil } - } else if canTaskRun(prtr.PipelineTask) { + } else if canTaskRun(prtr.PipelineTask, state) { logger.Infof("TaskRun %s should be started for PipelineRun %s", prtr.TaskRunName, prName) return prtr } @@ -54,8 +54,32 @@ func GetNextTask(prName string, state []*PipelineRunTaskRun, logger *zap.Sugared return nil } -func canTaskRun(pt *v1alpha1.PipelineTask) bool { +func canTaskRun(pt *v1alpha1.PipelineTask, state []*PipelineRunTaskRun) bool { // Check if Task can run now. Go through all the input constraints + for _, input := range pt.InputSourceBindings { + if len(input.PassedConstraints) > 0 { + for _, constrainingTaskName := range input.PassedConstraints { + for _, prtr := range state { + // the constraining task must have a successful task run to allow this task to run + if prtr.Task.Name == constrainingTaskName { + if prtr.TaskRun == nil { + return false + } + c := prtr.TaskRun.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c == nil { + return false + } + switch c.Status { + case corev1.ConditionFalse: + return false + case corev1.ConditionUnknown: + return false + } + } + } + } + } + } return true } diff --git a/test/crd_checks.go b/test/crd_checks.go index 5dc6784abad..56bea537c2d 100644 --- a/test/crd_checks.go +++ b/test/crd_checks.go @@ -86,3 +86,21 @@ func WaitForPipelineRunState(c *clients, name string, inState func(r *v1alpha1.P return inState(r) }) } + +// WaitForServiceExternalIPState polls the status of the a k8s Service called name from client every +// interval until an external ip is assigned indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} diff --git a/test/helm_task_test.go b/test/helm_task_test.go index e729fe4c4a4..95fe2d8699d 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -94,26 +94,36 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) } + logger.Info("Waiting for service to get external IP") var serviceIp string - k8sService, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(helmDeployServiceName, metav1.GetOptions{}) - if err != nil { - t.Errorf("Error getting service at %s %s", helmDeployServiceName, err) - } - if k8sService != nil { - ingress := k8sService.Status.LoadBalancer.Ingress - if len(ingress) > 0 { - serviceIp = ingress[0].IP - t.Logf("Service IP is %s", serviceIp) + if err := WaitForServiceExternalIPState(c, namespace, helmDeployServiceName, func(svc *corev1.Service) (bool, error) { + ingress := svc.Status.LoadBalancer.Ingress + if ingress != nil { + if len(ingress) > 0 { + serviceIp = ingress[0].IP + return true, nil + } } + return false, nil + }, "ServiceExternalIPisReady"); err != nil { + t.Errorf("Error waiting for Service %s to get an external IP: %s", helmDeployServiceName, err) } - resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) - if err != nil { - t.Errorf("Error reaching service at http://%s:8080 %s", serviceIp, err) - } - if resp != nil && resp.StatusCode != http.StatusOK { - t.Errorf("Error from service at http://%s:8080 %s", serviceIp, err) + if serviceIp != "" { + resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) + if err != nil { + t.Errorf("Error reaching service at http://%s:8080 %s", serviceIp, err) + } + if resp != nil && resp.StatusCode != http.StatusOK { + t.Errorf("Error from service at http://%s:8080 %s", serviceIp, err) + } + + } else { + t.Errorf("Service IP is empty.") } + + // cleanup task to remove helm from cluster, will not fail the test if it fails, just log + removeHelmFromCluster(c, t, namespace, logger) } func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { @@ -144,7 +154,7 @@ func getCreateImageTask(namespace string, t *testing.T) *v1alpha1.Task { } imageName = fmt.Sprintf("%s/%s", dockerRepo, AppendRandomString(sourceImageName)) - t.Log("Image to be pusblished: %s", imageName) + t.Logf("Image to be pusblished: %s", imageName) return &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ @@ -203,7 +213,10 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { }, { Name: "helm-cleanup", //for local clusters, clean up from previous runs Image: "alpine/helm", - Command: []string{"/bin/sh", + Command: []string{ + "/bin/sh", + }, + Args: []string{ "-c", "helm ls --short --all | xargs -n1 helm del --purge", }, @@ -255,6 +268,7 @@ func getHelmDeployPipeline(namespace string) *v1alpha1.Pipeline { ResourceRef: v1alpha1.PipelineResourceRef{ Name: sourceResourceName, }, + PassedConstraints: []string{createImageTaskName}, }}, Params: []v1alpha1.Param{{ Name: "pathToHelmCharts", @@ -306,7 +320,92 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { }}, } + t.Logf("Creating Cluster Role binding in kube-system for helm in namespace %s", namespace) if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(defaultClusterRoleBinding); err != nil { t.Fatalf("Failed to create default Service account for Helm in namespace: %s - %s", namespace, err) } + + kubesystemClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: AppendRandomString("default-tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: "kube-system", + }}, + } + + t.Logf("Creating Cluster Role binding in kube-system for helm") + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(kubesystemClusterRoleBinding); err != nil { + t.Fatalf("Failed to create default Service account for Helm in kube-system - %s", err) + } +} + +func removeHelmFromCluster(c *clients, t *testing.T, namespace string, logger *logging.BaseLogger) { + helmResetTaskName := "helm-reset-task" + helmResetTask := &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmResetTaskName, + }, + Spec: v1alpha1.TaskSpec{ + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "helm-reset", + Image: "alpine/helm", + Args: []string{"reset", "--force"}, + }, + }, + }, + }, + } + + helmResetTaskRunName := "helm-reset-taskrun" + helmResetTaskRun := &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmResetTaskRunName, + }, + Spec: v1alpha1.TaskRunSpec{ + TaskRef: v1alpha1.TaskRef{ + Name: helmResetTaskName, + }, + Trigger: v1alpha1.TaskTrigger{ + TriggerRef: v1alpha1.TaskTriggerRef{ + Type: v1alpha1.TaskTriggerTypeManual, + }, + }, + }, + } + + logger.Infof("Creating Task %s", helmResetTaskName) + if _, err := c.TaskClient.Create(helmResetTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmResetTaskName, err) + } + + logger.Infof("Creating TaskRun %s", helmResetTaskRunName) + if _, err := c.TaskRunClient.Create(helmResetTaskRun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", helmResetTaskRunName, err) + } + + logger.Infof("Waiting for TaskRun %s in namespace %s to complete", helmResetTaskRunName, namespace) + if err := WaitForTaskRunState(c, helmResetTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName) + } + } + return false, nil + }, "TaskRunSuccess"); err != nil { + logger.Infof("TaskRun %s failed to finish: %s", helmResetTaskRunName, err) + } } From 142733486fa5d75d25c03295d6cb78523d856efb Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Fri, 19 Oct 2018 09:13:30 -0400 Subject: [PATCH 4/8] better cleanup of helm artifacts from cluster in test - call helm cleanup if test is interrupted or fails - cleanup service account and cluster role binding --- test/helm_task_test.go | 162 ++++++++++++++++++++++++++++++++++------- 1 file changed, 135 insertions(+), 27 deletions(-) diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 95fe2d8699d..9f3a186796f 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -42,14 +42,18 @@ const ( helmDeployServiceName = "gohelloworld-chart" ) -var imageName string +var ( + imageName string + clusterRoleBindings [3]*rbacv1.ClusterRoleBinding + tillerServiceAccount *corev1.ServiceAccount +) // TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image // and then using helm to deploy it func TestHelmDeployPipelineRun(t *testing.T) { logger := logging.GetContextLogger(t.Name()) c, namespace := setup(t, logger) - setupClusterBindingForHelm(c, t, namespace) + setupClusterBindingForHelm(c, t, namespace, logger) knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) defer tearDown(logger, c.KubeClient, namespace) @@ -109,6 +113,10 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Errorf("Error waiting for Service %s to get an external IP: %s", helmDeployServiceName, err) } + // cleanup task to remove helm from cluster, will not fail the test if it fails, just log + knativetest.CleanupOnInterrupt(func() { helmCleanup(c, t, namespace, logger) }, logger) + defer helmCleanup(c, t, namespace, logger) + if serviceIp != "" { resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) if err != nil { @@ -121,9 +129,6 @@ func TestHelmDeployPipelineRun(t *testing.T) { } else { t.Errorf("Service IP is empty.") } - - // cleanup task to remove helm from cluster, will not fail the test if it fails, just log - removeHelmFromCluster(c, t, namespace, logger) } func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { @@ -210,16 +215,6 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { Name: "helm-init", Image: "alpine/helm", Args: []string{"init"}, - }, { - Name: "helm-cleanup", //for local clusters, clean up from previous runs - Image: "alpine/helm", - Command: []string{ - "/bin/sh", - }, - Args: []string{ - "-c", - "helm ls --short --all | xargs -n1 helm del --purge", - }, }, { Name: "helm-deploy", Image: "alpine/helm", @@ -303,8 +298,36 @@ func getHelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { } } -func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { - defaultClusterRoleBinding := &rbacv1.ClusterRoleBinding{ +func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string, logger *logging.BaseLogger) { + tillerServiceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tiller", + Namespace: "kube-system", + }, + } + + logger.Infof("Creating tiller service account") + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts("kube-system").Create(tillerServiceAccount); err != nil { + t.Fatalf("Failed to create default Service account for Helm %s", err) + } + + clusterRoleBindings[0] = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: AppendRandomString("tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "tiller", + Namespace: "kube-system", + }}, + } + + clusterRoleBindings[1] = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: AppendRandomString("default-tiller"), }, @@ -320,12 +343,7 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { }}, } - t.Logf("Creating Cluster Role binding in kube-system for helm in namespace %s", namespace) - if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(defaultClusterRoleBinding); err != nil { - t.Fatalf("Failed to create default Service account for Helm in namespace: %s - %s", namespace, err) - } - - kubesystemClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + clusterRoleBindings[2] = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: AppendRandomString("default-tiller"), }, @@ -341,9 +359,99 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { }}, } - t.Logf("Creating Cluster Role binding in kube-system for helm") - if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(kubesystemClusterRoleBinding); err != nil { - t.Fatalf("Failed to create default Service account for Helm in kube-system - %s", err) + for _, crb := range clusterRoleBindings { + logger.Infof("Creating Cluster Role binding %s for helm", crb.Name) + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(crb); err != nil { + t.Fatalf("Failed to create cluster role binding for Helm %s", err) + } + } +} + +func helmCleanup(c *clients, t *testing.T, namespace string, logger *logging.BaseLogger) { + logger.Infof("Cleaning up helm from cluster...") + + removeAllHelmReleases(c, t, namespace, logger) + removeHelmFromCluster(c, t, namespace, logger) + + logger.Infof("Deleting tiller service account") + if err := c.KubeClient.Kube.CoreV1().ServiceAccounts("kube-system").Delete("tiller", &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete default Service account for Helm %s", err) + } + + for _, crb := range clusterRoleBindings { + logger.Infof("Deleting Cluster Role binding %s for helm", crb.Name) + if err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Delete(crb.Name, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete cluster role binding for Helm %s", err) + } + } +} + +func removeAllHelmReleases(c *clients, t *testing.T, namespace string, logger *logging.BaseLogger) { + helmRemoveAllTaskName := "helm-remove-all-task" + helmRemoveAllTask := &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmRemoveAllTaskName, + }, + Spec: v1alpha1.TaskSpec{ + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "helm-remove-all", + Image: "alpine/helm", + Command: []string{ + "/bin/sh", + }, + Args: []string{ + "-c", + "helm ls --short --all | xargs -n1 helm del --purge", + }, + }, + }, + }, + }, + } + + helmRemoveAllTaskRunName := "helm-remove-all-taskrun" + helmRemoveAllTaskRun := &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmRemoveAllTaskRunName, + }, + Spec: v1alpha1.TaskRunSpec{ + TaskRef: v1alpha1.TaskRef{ + Name: helmRemoveAllTaskName, + }, + Trigger: v1alpha1.TaskTrigger{ + TriggerRef: v1alpha1.TaskTriggerRef{ + Type: v1alpha1.TaskTriggerTypeManual, + }, + }, + }, + } + + logger.Infof("Creating Task %s", helmRemoveAllTaskName) + if _, err := c.TaskClient.Create(helmRemoveAllTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmRemoveAllTaskName, err) + } + + logger.Infof("Creating TaskRun %s", helmRemoveAllTaskRunName) + if _, err := c.TaskRunClient.Create(helmRemoveAllTaskRun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", helmRemoveAllTaskRunName, err) + } + + logger.Infof("Waiting for TaskRun %s in namespace %s to complete", helmRemoveAllTaskRunName, namespace) + if err := WaitForTaskRunState(c, helmRemoveAllTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("task run %s failed!", hwPipelineRunName) + } + } + return false, nil + }, "TaskRunSuccess"); err != nil { + logger.Infof("TaskRun %s failed to finish: %s", helmRemoveAllTaskRunName, err) } } @@ -401,7 +509,7 @@ func removeHelmFromCluster(c *clients, t *testing.T, namespace string, logger *l if c.Status == corev1.ConditionTrue { return true, nil } else if c.Status == corev1.ConditionFalse { - return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName) + return true, fmt.Errorf("task run run %s failed!", hwPipelineRunName) } } return false, nil From a5478f8ad8fb7c36c35009070c013cdd30c29243 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Fri, 19 Oct 2018 11:52:04 -0400 Subject: [PATCH 5/8] dumping build logs to debug failure on Prow --- test/helm_task_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 9f3a186796f..07b8f3eec2a 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -28,6 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" ) @@ -98,6 +99,23 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) } + // The Build created by the TaskRun will have the same name + b, err := c.BuildClient.Get("helm-deploy-pipeline-run-helm-deploy", metav1.GetOptions{}) + if err != nil { + t.Errorf("Expected there to be a Build with the same name as TaskRun %s but got error: %s", "helm-deploy-pipeline-run-helm-deploy", err) + } + cluster := b.Status.Cluster + if cluster == nil || cluster.PodName == "" { + t.Fatalf("Expected build status to have a podname but it didn't!") + } + logs, err := getInitContainerLogsFromPod(c.KubeClient.Kube, cluster.PodName, namespace) + if err != nil { + t.Errorf("Expected there to be logs from build helm-deploy-pipeline-run-helm-deploy %s", err) + } + logger.Info("=========== build logs ===============") + logger.Info(logs) + logger.Info("=========== build logs ===============") + logger.Info("Waiting for service to get external IP") var serviceIp string if err := WaitForServiceExternalIPState(c, namespace, helmDeployServiceName, func(svc *corev1.Service) (bool, error) { @@ -131,6 +149,20 @@ func TestHelmDeployPipelineRun(t *testing.T) { } } +func getInitContainerLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var containers []string + for _, initContainer := range p.Spec.InitContainers { + containers = append(containers, initContainer.Name) + } + + return getContainerLogs(c, pod, namespace, containers...) +} + func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { return &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{ From 5f218cb48b598f23b9b6901d14005cfb63e153da Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Fri, 19 Oct 2018 12:40:53 -0400 Subject: [PATCH 6/8] add --wait flag on helm init to block until tiller is ready --- test/helm_task_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 07b8f3eec2a..d29fe7288e3 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -246,7 +246,7 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { Steps: []corev1.Container{{ Name: "helm-init", Image: "alpine/helm", - Args: []string{"init"}, + Args: []string{"init", "--wait"}, }, { Name: "helm-deploy", Image: "alpine/helm", From 4adfd60bd33cc960bd48e1196ecf1658d46de607 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Fri, 19 Oct 2018 14:28:06 -0400 Subject: [PATCH 7/8] cleanup of logging in helm task test --- test/build_logs.go | 66 ++++++++++++++++++++++++++++++++++++++++++ test/helm_task_test.go | 39 +++++-------------------- 2 files changed, 73 insertions(+), 32 deletions(-) create mode 100644 test/build_logs.go diff --git a/test/build_logs.go b/test/build_logs.go new file mode 100644 index 00000000000..3433278fe60 --- /dev/null +++ b/test/build_logs.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "io/ioutil" + "strings" + + "github.com/knative/pkg/test/logging" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +//CollectBuildLogs will get the build logs for a task run +func CollectBuildLogs(c *clients, buildName, namespace string, logger *logging.BaseLogger) { + b, err := c.BuildClient.Get(buildName, metav1.GetOptions{}) + if err != nil { + logger.Infof("Expected there to be a Build with the same name as TaskRun %s but got error: %s", buildName, err) + } + cluster := b.Status.Cluster + if cluster == nil || cluster.PodName == "" { + logger.Infof("Expected build status to have a podname but it didn't!") + } + logs, err := getInitContainerLogsFromPod(c.KubeClient.Kube, cluster.PodName, namespace) + if err != nil { + logger.Infof("Expected there to be logs from build helm-deploy-pipeline-run-helm-deploy %s", err) + } + logger.Infof("build logs %s", logs) +} + +func getInitContainerLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) + if err != nil { + return "", err + } + + sb := strings.Builder{} + for _, initContainer := range p.Spec.InitContainers { + req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: initContainer.Name}) + rc, err := req.Stream() + if err != nil { + return "", err + } + bs, err := ioutil.ReadAll(rc) + if err != nil { + return "", err + } + sb.Write(bs) + } + return sb.String(), nil +} diff --git a/test/helm_task_test.go b/test/helm_task_test.go index d29fe7288e3..50f7e806de3 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -28,7 +28,6 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" ) @@ -96,26 +95,16 @@ func TestHelmDeployPipelineRun(t *testing.T) { } return false, nil }, "PipelineRunCompleted"); err != nil { + taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list for PipelineRun %s %s", helmDeployPipelineRunName, err) + } + for _, tr := range taskruns.Items { + CollectBuildLogs(c, tr.Name, namespace, logger) + } t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) } - // The Build created by the TaskRun will have the same name - b, err := c.BuildClient.Get("helm-deploy-pipeline-run-helm-deploy", metav1.GetOptions{}) - if err != nil { - t.Errorf("Expected there to be a Build with the same name as TaskRun %s but got error: %s", "helm-deploy-pipeline-run-helm-deploy", err) - } - cluster := b.Status.Cluster - if cluster == nil || cluster.PodName == "" { - t.Fatalf("Expected build status to have a podname but it didn't!") - } - logs, err := getInitContainerLogsFromPod(c.KubeClient.Kube, cluster.PodName, namespace) - if err != nil { - t.Errorf("Expected there to be logs from build helm-deploy-pipeline-run-helm-deploy %s", err) - } - logger.Info("=========== build logs ===============") - logger.Info(logs) - logger.Info("=========== build logs ===============") - logger.Info("Waiting for service to get external IP") var serviceIp string if err := WaitForServiceExternalIPState(c, namespace, helmDeployServiceName, func(svc *corev1.Service) (bool, error) { @@ -149,20 +138,6 @@ func TestHelmDeployPipelineRun(t *testing.T) { } } -func getInitContainerLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { - p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var containers []string - for _, initContainer := range p.Spec.InitContainers { - containers = append(containers, initContainer.Name) - } - - return getContainerLogs(c, pod, namespace, containers...) -} - func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { return &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{ From 3f87858c3a0d8062e054f8c69a85ddf5542d4bb1 Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Fri, 19 Oct 2018 15:28:24 -0400 Subject: [PATCH 8/8] Add gohelloworld sample in test folder will follow up with another pr to update helm test to use this instead of external repo, but need it to be in git source --- test/gohelloworld/Dockerfile | 11 ++++++ .../gohelloworld-chart/.helmignore | 21 +++++++++++ .../gohelloworld-chart/Chart.yaml | 5 +++ .../gohelloworld-chart/templates/_helpers.tpl | 32 ++++++++++++++++ .../templates/deployment.yaml | 37 +++++++++++++++++++ .../gohelloworld-chart/templates/service.yaml | 18 +++++++++ .../gohelloworld-chart/values.yaml | 23 ++++++++++++ test/gohelloworld/main.go | 19 ++++++++++ 8 files changed, 166 insertions(+) create mode 100644 test/gohelloworld/Dockerfile create mode 100644 test/gohelloworld/gohelloworld-chart/.helmignore create mode 100644 test/gohelloworld/gohelloworld-chart/Chart.yaml create mode 100644 test/gohelloworld/gohelloworld-chart/templates/_helpers.tpl create mode 100644 test/gohelloworld/gohelloworld-chart/templates/deployment.yaml create mode 100644 test/gohelloworld/gohelloworld-chart/templates/service.yaml create mode 100644 test/gohelloworld/gohelloworld-chart/values.yaml create mode 100644 test/gohelloworld/main.go diff --git a/test/gohelloworld/Dockerfile b/test/gohelloworld/Dockerfile new file mode 100644 index 00000000000..6c599e2168f --- /dev/null +++ b/test/gohelloworld/Dockerfile @@ -0,0 +1,11 @@ +FROM golang + +# Copy the local package files to the container's workspace. +ADD . /go/src/github.com/knative/build-pipeline/test/gohelloworld + +RUN go install github.com/knative/build-pipeline/test/gohelloworld + +ENTRYPOINT /go/bin/gohelloworld + +# Document that the service listens on port 8080. +EXPOSE 8080 \ No newline at end of file diff --git a/test/gohelloworld/gohelloworld-chart/.helmignore b/test/gohelloworld/gohelloworld-chart/.helmignore new file mode 100644 index 00000000000..f0c13194444 --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/test/gohelloworld/gohelloworld-chart/Chart.yaml b/test/gohelloworld/gohelloworld-chart/Chart.yaml new file mode 100644 index 00000000000..189a140a8f7 --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: gohelloworld-chart +version: 0.1.0 diff --git a/test/gohelloworld/gohelloworld-chart/templates/_helpers.tpl b/test/gohelloworld/gohelloworld-chart/templates/_helpers.tpl new file mode 100644 index 00000000000..9c9b9c3c987 --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "gohelloworld-chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "gohelloworld-chart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gohelloworld-chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/test/gohelloworld/gohelloworld-chart/templates/deployment.yaml b/test/gohelloworld/gohelloworld-chart/templates/deployment.yaml new file mode 100644 index 00000000000..8862d5ecfe7 --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/templates/deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "gohelloworld-chart.name" . }} + labels: + app: {{ template "gohelloworld-chart.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "gohelloworld-chart.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.internalPort }} + livenessProbe: + httpGet: + path: / + port: {{ .Values.service.internalPort }} + readinessProbe: + httpGet: + path: / + port: {{ .Values.service.internalPort }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} \ No newline at end of file diff --git a/test/gohelloworld/gohelloworld-chart/templates/service.yaml b/test/gohelloworld/gohelloworld-chart/templates/service.yaml new file mode 100644 index 00000000000..409ed379b2e --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "gohelloworld-chart.name" . }} + labels: + app: {{ template "gohelloworld-chart.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + name: {{ .Values.service.name }} + selector: + app: {{ template "gohelloworld-chart.name" . }} + release: {{ .Release.Name }} \ No newline at end of file diff --git a/test/gohelloworld/gohelloworld-chart/values.yaml b/test/gohelloworld/gohelloworld-chart/values.yaml new file mode 100644 index 00000000000..4608fb113f5 --- /dev/null +++ b/test/gohelloworld/gohelloworld-chart/values.yaml @@ -0,0 +1,23 @@ +# Default values for gohelloworld-chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + tag: latest + pullPolicy: Always + +service: + name: gohelloworld + type: LoadBalancer + externalPort: 8080 + internalPort: 8080 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi diff --git a/test/gohelloworld/main.go b/test/gohelloworld/main.go new file mode 100644 index 00000000000..eb7670ddc3f --- /dev/null +++ b/test/gohelloworld/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "fmt" + "log" + "net/http" +) + +func handler(w http.ResponseWriter, r *http.Request) { + log.Print("Hello world received a request.") + fmt.Fprintf(w, "Hello World! \n") +} + +func main() { + log.Print("Hello world sample started.") + + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +}