From 19d6cee8149917c994b737510d9c8dbfc6dbdd27 Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Fri, 19 Oct 2018 17:31:23 -0700 Subject: [PATCH 001/145] Updated ARTIFACT_REPO.md (#1049) --- ARTIFACT_REPO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ARTIFACT_REPO.md b/ARTIFACT_REPO.md index d0b1ef7b0f30..df23309ba568 100644 --- a/ARTIFACT_REPO.md +++ b/ARTIFACT_REPO.md @@ -106,7 +106,7 @@ For Minio, the `accessKeySecret` and `secretKeySecret` naturally correspond the Example: ``` -$ kubectl edit configmap workflow-controller-configmap -n kube-system +$ kubectl edit configmap workflow-controller-configmap -n argo # assumes argo was installed in the argo namespace ... data: config: | From e297d19501a8116b5a18c925a3c72d7c7e106ea0 Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Sat, 20 Oct 2018 21:42:50 -0700 Subject: [PATCH 002/145] Updated examples/README.md (#1051) --- examples/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/README.md b/examples/README.md index dd0a8d2d977b..4d476b832cf5 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,6 +8,8 @@ Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a resul Many of the Argo examples used in this walkthrough are available at https://github.com/argoproj/argo/tree/master/examples. If you like this project, please give us a star! +For a complete description of the Argo workflow spec, please refer to https://github.com/argoproj/argo/blob/master/pkg/apis/workflow/v1alpha1/types.go + ## Table of Content - [Argo CLI](#argo-cli) From f18716b74c6f52d0c8bf4d64c05eae9db75bfb1f Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 27 Oct 2018 06:29:15 +0900 Subject: [PATCH 003/145] Support for K8s API based Executor (#1010) --- cmd/argoexec/commands/root.go | 6 + docs/workflow-controller-configmap.yaml | 2 +- workflow/common/common.go | 3 + workflow/common/util.go | 6 +- workflow/controller/workflowpod.go | 7 ++ workflow/executor/common/common.go | 140 ++++++++++++++++++++++ workflow/executor/k8sapi/client.go | 149 ++++++++++++++++++++++++ workflow/executor/k8sapi/k8sapi.go | 63 ++++++++++ workflow/executor/kubelet/client.go | 102 +++++----------- workflow/executor/kubelet/kubelet.go | 51 +------- 10 files changed, 401 insertions(+), 128 deletions(-) create mode 100644 workflow/executor/common/common.go create mode 100644 workflow/executor/k8sapi/client.go create mode 100644 workflow/executor/k8sapi/k8sapi.go diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go index 344e7f1ca618..c53dadcd2e1d 100644 --- a/cmd/argoexec/commands/root.go +++ b/cmd/argoexec/commands/root.go @@ -15,6 +15,7 @@ import ( "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/executor" "github.com/argoproj/argo/workflow/executor/docker" + "github.com/argoproj/argo/workflow/executor/k8sapi" "github.com/argoproj/argo/workflow/executor/kubelet" ) @@ -77,6 +78,11 @@ func initExecutor() *executor.WorkflowExecutor { var cre executor.ContainerRuntimeExecutor switch os.Getenv(common.EnvVarContainerRuntimeExecutor) { + case common.ContainerRuntimeExecutorK8sAPI: + cre, err = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace) + if err != nil { + panic(err.Error()) + } case common.ContainerRuntimeExecutorKubelet: cre, err = kubelet.NewKubeletExecutor() if err != nil { diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index 8bd773d6e60e..771084761810 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -33,7 +33,7 @@ data: bucket: my-bucket region: us-west-2 # keyFormat is a format pattern to define how artifacts will be organized in a bucket. - # It can reference workflow metadata variables such as workflow.namespace, workflow.name, + # It can reference workflow metadata variables such as workflow.namespace, workflow.name, # pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow # artifacts can be organized by date. If omitted, will use `{{workflow.name}}/{{pod.name}}`, # which has potential for have collisions. diff --git a/workflow/common/common.go b/workflow/common/common.go index 29ab0d0725ec..432339e10cbe 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -97,6 +97,9 @@ const ( // ContainerRuntimeExecutorKubelet to use the kubelet as container runtime executor ContainerRuntimeExecutorKubelet = "kubelet" + // ContainerRuntimeExecutorK8sAPI to use the Kubernetes API server as container runtime executor + ContainerRuntimeExecutorK8sAPI = "k8sapi" + // Variables that are added to the scope during template execution and can be referenced using {{}} syntax // GlobalVarWorkflowName is a global workflow variable referencing the workflow's metadata.name field diff --git a/workflow/common/util.go b/workflow/common/util.go index 97ece971eab7..88f3431ecd0e 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -96,7 +96,7 @@ func ExecPodContainer(restConfig *rest.Config, namespace string, pod string, con } // GetExecutorOutput returns the output of an remotecommand.Executor -func GetExecutorOutput(exec remotecommand.Executor) (string, string, error) { +func GetExecutorOutput(exec remotecommand.Executor) (*bytes.Buffer, *bytes.Buffer, error) { var stdOut bytes.Buffer var stdErr bytes.Buffer err := exec.Stream(remotecommand.StreamOptions{ @@ -105,9 +105,9 @@ func GetExecutorOutput(exec remotecommand.Executor) (string, string, error) { Tty: false, }) if err != nil { - return "", "", errors.InternalWrapError(err) + return nil, nil, errors.InternalWrapError(err) } - return stdOut.String(), stdErr.String(), nil + return &stdOut, &stdErr, nil } // ProcessArgs sets in the inputs, the values either passed via arguments, or the hardwired values diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 3ca62c26a49a..6255ef259442 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -275,6 +275,13 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar { switch woc.controller.Config.ContainerRuntimeExecutor { + case common.ContainerRuntimeExecutorK8sAPI: + return append(execEnvVars, + apiv1.EnvVar{ + Name: common.EnvVarContainerRuntimeExecutor, + Value: woc.controller.Config.ContainerRuntimeExecutor, + }, + ) case common.ContainerRuntimeExecutorKubelet: return append(execEnvVars, apiv1.EnvVar{ diff --git a/workflow/executor/common/common.go b/workflow/executor/common/common.go new file mode 100644 index 000000000000..e5b94cc38f4b --- /dev/null +++ b/workflow/executor/common/common.go @@ -0,0 +1,140 @@ +package common + +import ( + "bytes" + "compress/gzip" + "fmt" + "os" + "strings" + "syscall" + "time" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" +) + +const ( + containerShimPrefix = "://" +) + +// killGracePeriod is the time in seconds after sending SIGTERM before +// forcefully killing the sidecar with SIGKILL (value matches k8s) +const killGracePeriod = 10 + +// GetContainerID returns container ID of a ContainerStatus resource +func GetContainerID(container *v1.ContainerStatus) string { + i := strings.Index(container.ContainerID, containerShimPrefix) + if i == -1 { + return "" + } + return container.ContainerID[i+len(containerShimPrefix):] +} + +// KubernetesClientInterface is the interface to implement getContainerStatus method +type KubernetesClientInterface interface { + getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) + killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error + createArchive(containerID, sourcePath string) (*bytes.Buffer, error) +} + +// WaitForTermination of the given containerID, set the timeout to 0 to discard it +func WaitForTermination(c KubernetesClientInterface, containerID string, timeout time.Duration) error { + ticker := time.NewTicker(time.Second * 1) + defer ticker.Stop() + timer := time.NewTimer(timeout) + if timeout == 0 { + timer.Stop() + } else { + defer timer.Stop() + } + + log.Infof("Starting to wait completion of containerID %s ...", containerID) + for { + select { + case <-ticker.C: + _, containerStatus, err := c.getContainerStatus(containerID) + if err != nil { + return err + } + if containerStatus.State.Terminated == nil { + continue + } + log.Infof("ContainerID %q is terminated: %v", containerID, containerStatus.String()) + return nil + case <-timer.C: + return fmt.Errorf("timeout after %s", timeout.String()) + } + } +} + +// TerminatePodWithContainerID invoke the given SIG against the PID1 of the container. +// No-op if the container is on the hostPID +func TerminatePodWithContainerID(c KubernetesClientInterface, containerID string, sig syscall.Signal) error { + pod, container, err := c.getContainerStatus(containerID) + if err != nil { + return err + } + if container.State.Terminated != nil { + log.Infof("Container %s is already terminated: %v", container.ContainerID, container.State.Terminated.String()) + return nil + } + if pod.Spec.HostPID { + return fmt.Errorf("cannot terminate a hostPID Pod %s", pod.Name) + } + if pod.Spec.RestartPolicy != "Never" { + return fmt.Errorf("cannot terminate pod with a %q restart policy", pod.Spec.RestartPolicy) + } + return c.killContainer(pod, container, sig) +} + +// KillGracefully kills a container gracefully. +func KillGracefully(c KubernetesClientInterface, containerID string) error { + log.Infof("SIGTERM containerID %q: %s", containerID, syscall.SIGTERM.String()) + err := TerminatePodWithContainerID(c, containerID, syscall.SIGTERM) + if err != nil { + return err + } + err = WaitForTermination(c, containerID, time.Second*killGracePeriod) + if err == nil { + log.Infof("ContainerID %q successfully killed", containerID) + return nil + } + log.Infof("SIGKILL containerID %q: %s", containerID, syscall.SIGKILL.String()) + err = TerminatePodWithContainerID(c, containerID, syscall.SIGKILL) + if err != nil { + return err + } + err = WaitForTermination(c, containerID, time.Second*killGracePeriod) + if err != nil { + return err + } + log.Infof("ContainerID %q successfully killed", containerID) + return nil +} + +// CopyArchive downloads files and directories as a tarball and saves it to a specified path. +func CopyArchive(c KubernetesClientInterface, containerID, sourcePath, destPath string) error { + log.Infof("Archiving %s:%s to %s", containerID, sourcePath, destPath) + b, err := c.createArchive(containerID, sourcePath) + if err != nil { + return err + } + f, err := os.OpenFile(destPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) + if err != nil { + return err + } + w := gzip.NewWriter(f) + _, err = w.Write(b.Bytes()) + if err != nil { + return err + } + err = w.Flush() + if err != nil { + return err + } + err = w.Close() + if err != nil { + return err + } + return nil +} diff --git a/workflow/executor/k8sapi/client.go b/workflow/executor/k8sapi/client.go new file mode 100644 index 000000000000..5a949595ac2f --- /dev/null +++ b/workflow/executor/k8sapi/client.go @@ -0,0 +1,149 @@ +package k8sapi + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "syscall" + "time" + + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/workflow/common" + execcommon "github.com/argoproj/argo/workflow/executor/common" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +type k8sAPIClient struct { + execcommon.KubernetesClientInterface + + clientset *kubernetes.Clientset + config *restclient.Config + podName string + namespace string +} + +func newK8sAPIClient(clientset *kubernetes.Clientset, config *restclient.Config, podName, namespace string) (*k8sAPIClient, error) { + return &k8sAPIClient{ + clientset: clientset, + config: config, + podName: podName, + namespace: namespace, + }, nil +} + +func (c *k8sAPIClient) getFileContents(containerID, sourcePath string) (string, error) { + _, containerStatus, err := c.getContainerStatus(containerID) + if err != nil { + return "", err + } + command := []string{"cat", sourcePath} + exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, containerStatus.Name, true, false, command...) + if err != nil { + return "", err + } + stdOut, _, err := common.GetExecutorOutput(exec) + if err != nil { + return "", err + } + return stdOut.String(), nil +} + +func (c *k8sAPIClient) createArchive(containerID, sourcePath string) (*bytes.Buffer, error) { + _, containerStatus, err := c.getContainerStatus(containerID) + if err != nil { + return nil, err + } + command := []string{"tar", "cf", "-", sourcePath} + exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, containerStatus.Name, true, false, command...) + if err != nil { + return nil, err + } + stdOut, _, err := common.GetExecutorOutput(exec) + if err != nil { + return nil, err + } + return stdOut, nil +} + +func (c *k8sAPIClient) getLogsAsStream(containerID string) (io.ReadCloser, error) { + _, containerStatus, err := c.getContainerStatus(containerID) + if err != nil { + return nil, err + } + return c.clientset.CoreV1().Pods(c.namespace). + GetLogs(c.podName, &v1.PodLogOptions{Container: containerStatus.Name, SinceTime: &metav1.Time{}}).Stream() +} + +func (c *k8sAPIClient) getLogs(containerID string) (string, error) { + reader, err := c.getLogsAsStream(containerID) + if err != nil { + return "", err + } + bytes, err := ioutil.ReadAll(reader) + if err != nil { + return "", errors.InternalWrapError(err) + } + return string(bytes), nil +} + +func (c *k8sAPIClient) saveLogs(containerID, path string) error { + reader, err := c.getLogsAsStream(containerID) + if err != nil { + return err + } + outFile, err := os.Create(path) + if err != nil { + return errors.InternalWrapError(err) + } + defer outFile.Close() + _, err = io.Copy(outFile, reader) + if err != nil { + return errors.InternalWrapError(err) + } + return nil +} + +func (c *k8sAPIClient) getPod() (*v1.Pod, error) { + return c.clientset.CoreV1().Pods(c.namespace).Get(c.podName, metav1.GetOptions{}) +} + +func (c *k8sAPIClient) getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { + pod, err := c.getPod() + if err != nil { + return nil, nil, err + } + for _, containerStatus := range pod.Status.ContainerStatuses { + if execcommon.GetContainerID(&containerStatus) != containerID { + continue + } + return pod, &containerStatus, nil + } + return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod %s", containerID, c.podName)) +} + +func (c *k8sAPIClient) waitForTermination(containerID string, timeout time.Duration) error { + return execcommon.WaitForTermination(c, containerID, timeout) +} + +func (c *k8sAPIClient) killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { + command := []string{"/bin/sh", "-c", fmt.Sprintf("kill -%d 1", sig)} + exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, container.Name, false, false, command...) + if err != nil { + return err + } + _, _, err = common.GetExecutorOutput(exec) + return err +} + +func (c *k8sAPIClient) killGracefully(containerID string) error { + return execcommon.KillGracefully(c, containerID) +} + +func (c *k8sAPIClient) copyArchive(containerID, sourcePath, destPath string) error { + return execcommon.CopyArchive(c, containerID, sourcePath, destPath) +} diff --git a/workflow/executor/k8sapi/k8sapi.go b/workflow/executor/k8sapi/k8sapi.go new file mode 100644 index 000000000000..16d4ecef339f --- /dev/null +++ b/workflow/executor/k8sapi/k8sapi.go @@ -0,0 +1,63 @@ +package k8sapi + +import ( + "github.com/argoproj/argo/errors" + log "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +type K8sAPIExecutor struct { + client *k8sAPIClient +} + +func NewK8sAPIExecutor(clientset *kubernetes.Clientset, config *restclient.Config, podName, namespace string) (*K8sAPIExecutor, error) { + log.Infof("Creating a K8sAPI executor") + client, err := newK8sAPIClient(clientset, config, podName, namespace) + if err != nil { + return nil, errors.InternalWrapError(err) + } + return &K8sAPIExecutor{ + client: client, + }, nil +} + +func (k *K8sAPIExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { + log.Infof("Getting file contents of %s:%s", containerID, sourcePath) + return k.client.getFileContents(containerID, sourcePath) +} + +func (k *K8sAPIExecutor) CopyFile(containerID string, sourcePath string, destPath string) error { + return k.client.copyArchive(containerID, sourcePath, destPath) +} + +// GetOutput returns the entirety of the container output as a string +// Used to capturing script results as an output parameter +func (k *K8sAPIExecutor) GetOutput(containerID string) (string, error) { + log.Infof("Getting output of %s", containerID) + return k.client.getLogs(containerID) +} + +// Logs copies logs to a given path +func (k *K8sAPIExecutor) Logs(containerID, path string) error { + log.Infof("Saving output of %s to %s", containerID, path) + return k.client.saveLogs(containerID, path) +} + +// Wait for the container to complete +func (k *K8sAPIExecutor) Wait(containerID string) error { + log.Infof("Waiting for container %s to complete", containerID) + return k.client.waitForTermination(containerID, 0) +} + +// Kill kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period +func (k *K8sAPIExecutor) Kill(containerIDs []string) error { + log.Infof("Killing containers %s", containerIDs) + for _, containerID := range containerIDs { + err := k.client.killGracefully(containerID) + if err != nil { + return err + } + } + return nil +} diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index 0d4332e3ca09..35178a19f584 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -12,7 +12,6 @@ import ( "net/url" "os" "strconv" - "strings" "syscall" "time" @@ -20,6 +19,7 @@ import ( "github.com/argoproj/argo/errors" "github.com/argoproj/argo/workflow/common" + execcommon "github.com/argoproj/argo/workflow/executor/common" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" @@ -27,10 +27,11 @@ import ( const ( readWSResponseTimeout = time.Minute * 1 - containerShimPrefix = "://" ) type kubeletClient struct { + execcommon.KubernetesClientInterface + httpClient *http.Client httpHeader http.Header websocketDialer *websocket.Dialer @@ -178,28 +179,20 @@ func (k *kubeletClient) saveLogsToFile(namespace, podName, containerName, path s return err } -func getContainerID(container *v1.ContainerStatus) string { - i := strings.Index(container.ContainerID, containerShimPrefix) - if i == -1 { - return "" - } - return container.ContainerID[i+len(containerShimPrefix):] -} - -func (k *kubeletClient) getContainerStatus(containerID string) (*v1.ContainerStatus, error) { +func (k *kubeletClient) getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { podList, err := k.getPodList() if err != nil { - return nil, errors.InternalWrapError(err) + return nil, nil, errors.InternalWrapError(err) } for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { - if getContainerID(&container) != containerID { + if execcommon.GetContainerID(&container) != containerID { continue } - return &container, nil + return &pod, &container, nil } } - return nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) + return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) } func (k *kubeletClient) GetContainerLogs(containerID string) (string, error) { @@ -209,7 +202,7 @@ func (k *kubeletClient) GetContainerLogs(containerID string) (string, error) { } for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { - if getContainerID(&container) != containerID { + if execcommon.GetContainerID(&container) != containerID { continue } return k.getLogs(pod.Namespace, pod.Name, container.Name) @@ -225,7 +218,7 @@ func (k *kubeletClient) SaveLogsToFile(containerID, path string) error { } for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { - if getContainerID(&container) != containerID { + if execcommon.GetContainerID(&container) != containerID { continue } return k.saveLogsToFile(pod.Namespace, pod.Name, container.Name, path) @@ -294,39 +287,6 @@ func (k *kubeletClient) readFileContents(u *url.URL) (*bytes.Buffer, error) { } } -// TerminatePodWithContainerID invoke the given SIG against the PID1 of the container. -// No-op if the container is on the hostPID -func (k *kubeletClient) TerminatePodWithContainerID(containerID string, sig syscall.Signal) error { - podList, err := k.getPodList() - if err != nil { - return errors.InternalWrapError(err) - } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if getContainerID(&container) != containerID { - continue - } - if container.State.Terminated != nil { - log.Infof("Container %s is already terminated: %v", container.ContainerID, container.State.Terminated.String()) - return nil - } - if pod.Spec.HostPID { - return fmt.Errorf("cannot terminate a hostPID Pod %s", pod.Name) - } - if pod.Spec.RestartPolicy != "Never" { - return fmt.Errorf("cannot terminate pod with a %q restart policy", pod.Spec.RestartPolicy) - } - u, err := url.ParseRequestURI(fmt.Sprintf("wss://%s/exec/%s/%s/%s?command=/bin/sh&&command=-c&command=kill+-%d+1&output=1&error=1", k.kubeletEndpoint, pod.Namespace, pod.Name, container.Name, sig)) - if err != nil { - return errors.InternalWrapError(err) - } - _, err = k.exec(u) - return err - } - } - return errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) -} - // CreateArchive exec in the given containerID and create a tarball of the given sourcePath. Works with directory func (k *kubeletClient) CreateArchive(containerID, sourcePath string) (*bytes.Buffer, error) { return k.getCommandOutput(containerID, fmt.Sprintf("command=tar&command=-cf&command=-&command=%s&output=1", sourcePath)) @@ -344,7 +304,7 @@ func (k *kubeletClient) getCommandOutput(containerID, command string) (*bytes.Bu } for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { - if getContainerID(&container) != containerID { + if execcommon.GetContainerID(&container) != containerID { continue } if container.State.Terminated != nil { @@ -367,30 +327,22 @@ func (k *kubeletClient) getCommandOutput(containerID, command string) (*bytes.Bu // WaitForTermination of the given containerID, set the timeout to 0 to discard it func (k *kubeletClient) WaitForTermination(containerID string, timeout time.Duration) error { - ticker := time.NewTicker(time.Second * 1) - defer ticker.Stop() - timer := time.NewTimer(timeout) - if timeout == 0 { - timer.Stop() - } else { - defer timer.Stop() - } + return execcommon.WaitForTermination(k, containerID, timeout) +} - log.Infof("Starting to wait completion of containerID %s ...", containerID) - for { - select { - case <-ticker.C: - containerStatus, err := k.getContainerStatus(containerID) - if err != nil { - return err - } - if containerStatus.State.Terminated == nil { - continue - } - log.Infof("ContainerID %q is terminated: %v", containerID, containerStatus.String()) - return nil - case <-timer.C: - return fmt.Errorf("timeout after %s", timeout.String()) - } +func (k *kubeletClient) killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { + u, err := url.ParseRequestURI(fmt.Sprintf("wss://%s/exec/%s/%s/%s?command=/bin/sh&&command=-c&command=kill+-%d+1&output=1&error=1", k.kubeletEndpoint, pod.Namespace, pod.Name, container.Name, sig)) + if err != nil { + return errors.InternalWrapError(err) } + _, err = k.exec(u) + return err +} + +func (k *kubeletClient) KillGracefully(containerID string) error { + return execcommon.KillGracefully(k, containerID) +} + +func (k *kubeletClient) CopyArchive(containerID, sourcePath, destPath string) error { + return execcommon.CopyArchive(k, containerID, sourcePath, destPath) } diff --git a/workflow/executor/kubelet/kubelet.go b/workflow/executor/kubelet/kubelet.go index 638cf8c078b8..ca99b0b52033 100644 --- a/workflow/executor/kubelet/kubelet.go +++ b/workflow/executor/kubelet/kubelet.go @@ -1,19 +1,10 @@ package kubelet import ( - "compress/gzip" - "os" - "syscall" - "time" - "github.com/argoproj/argo/errors" log "github.com/sirupsen/logrus" ) -// killGracePeriod is the time in seconds after sending SIGTERM before -// forcefully killing the sidecar with SIGKILL (value matches k8s) -const killGracePeriod = 10 - type KubeletExecutor struct { cli *kubeletClient } @@ -38,29 +29,7 @@ func (k *KubeletExecutor) GetFileContents(containerID string, sourcePath string) } func (k *KubeletExecutor) CopyFile(containerID string, sourcePath string, destPath string) error { - log.Infof("Archiving %s:%s to %s", containerID, sourcePath, destPath) - b, err := k.cli.CreateArchive(containerID, sourcePath) - if err != nil { - return err - } - f, err := os.OpenFile(destPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) - if err != nil { - return err - } - w := gzip.NewWriter(f) - _, err = w.Write(b.Bytes()) - if err != nil { - return err - } - err = w.Flush() - if err != nil { - return err - } - err = w.Close() - if err != nil { - return err - } - return f.Close() + return k.cli.CopyArchive(containerID, sourcePath, destPath) } // GetOutput returns the entirety of the container output as a string @@ -82,26 +51,10 @@ func (k *KubeletExecutor) Wait(containerID string) error { // Kill kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period func (k *KubeletExecutor) Kill(containerIDs []string) error { for _, containerID := range containerIDs { - log.Infof("SIGTERM containerID %q: %s", containerID, syscall.SIGTERM.String()) - err := k.cli.TerminatePodWithContainerID(containerID, syscall.SIGTERM) - if err != nil { - return err - } - err = k.cli.WaitForTermination(containerID, time.Second*killGracePeriod) - if err == nil { - log.Infof("ContainerID %q successfully killed", containerID) - continue - } - log.Infof("SIGKILL containerID %q: %s", containerID, syscall.SIGKILL.String()) - err = k.cli.TerminatePodWithContainerID(containerID, syscall.SIGKILL) - if err != nil { - return err - } - err = k.cli.WaitForTermination(containerID, time.Second*killGracePeriod) + err := k.cli.KillGracefully(containerID) if err != nil { return err } - log.Infof("ContainerID %q successfully killed", containerID) } return nil } From 6e6c59f13ff84fd6b4f1e7f836c783941c434ce7 Mon Sep 17 00:00:00 2001 From: Andrei Miulescu Date: Sat, 27 Oct 2018 08:34:18 +1100 Subject: [PATCH 004/145] Submodules are dirty after checkout -- need to update (#1052) --- workflow/artifacts/git/git.go | 55 ++++++++++++++++++++++++++++++--- workflow/controller/operator.go | 3 +- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/workflow/artifacts/git/git.go b/workflow/artifacts/git/git.go index 2d70ea4ada74..3f1cf10ba6a7 100644 --- a/workflow/artifacts/git/git.go +++ b/workflow/artifacts/git/git.go @@ -1,7 +1,11 @@ package git import ( + "fmt" + "io/ioutil" + "os" "os/exec" + "os/user" "strings" log "github.com/sirupsen/logrus" @@ -31,13 +35,13 @@ func (g *GitArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) erro } auth := &ssh2.PublicKeys{User: "git", Signer: signer} auth.HostKeyCallback = ssh.InsecureIgnoreHostKey() - return gitClone(path, inputArtifact, auth) + return gitClone(path, inputArtifact, auth, g.SSHPrivateKey) } if g.Username != "" || g.Password != "" { auth := &http.BasicAuth{Username: g.Username, Password: g.Password} - return gitClone(path, inputArtifact, auth) + return gitClone(path, inputArtifact, auth, "") } - return gitClone(path, inputArtifact, nil) + return gitClone(path, inputArtifact, nil, "") } // Save is unsupported for git output artifacts @@ -45,7 +49,33 @@ func (g *GitArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) err return errors.Errorf(errors.CodeBadRequest, "Git output artifacts unsupported") } -func gitClone(path string, inputArtifact *wfv1.Artifact, auth transport.AuthMethod) error { +func writePrivateKey(key string) error { + usr, err := user.Current() + if err != nil { + return errors.InternalWrapError(err) + } + sshDir := fmt.Sprintf("%s/.ssh", usr.HomeDir) + err = os.Mkdir(sshDir, 0700) + if err != nil { + return errors.InternalWrapError(err) + } + + sshConfig := `Host * + StrictHostKeyChecking no + UserKnownHostsFile /dev/null` + err = ioutil.WriteFile(fmt.Sprintf("%s/config", sshDir), []byte(sshConfig), 0644) + if err != nil { + return errors.InternalWrapError(err) + } + err = ioutil.WriteFile(fmt.Sprintf("%s/id_rsa", sshDir), []byte(key), 0600) + if err != nil { + return errors.InternalWrapError(err) + } + + return nil +} + +func gitClone(path string, inputArtifact *wfv1.Artifact, auth transport.AuthMethod, privateKey string) error { cloneOptions := git.CloneOptions{ URL: inputArtifact.Git.Repo, RecurseSubmodules: git.DefaultSubmoduleRecursionDepth, @@ -70,6 +100,23 @@ func gitClone(path string, inputArtifact *wfv1.Artifact, auth transport.AuthMeth return errors.InternalWrapError(err) } log.Errorf("`%s` stdout:\n%s", cmd.Args, string(output)) + if privateKey != "" { + err := writePrivateKey(privateKey) + if err != nil { + return errors.InternalWrapError(err) + } + } + submodulesCmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--force") + submodulesCmd.Dir = path + submoduleOutput, err := submodulesCmd.Output() + if err != nil { + if exErr, ok := err.(*exec.ExitError); ok { + log.Errorf("`%s` stderr:\n%s", submodulesCmd.Args, string(exErr.Stderr)) + return errors.InternalError(strings.Split(string(exErr.Stderr), "\n")[0]) + } + return errors.InternalWrapError(err) + } + log.Errorf("`%s` stdout:\n%s", submodulesCmd.Args, string(submoduleOutput)) } return nil } diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index ad7379cc34b9..86c768c3bb54 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -734,9 +734,10 @@ func inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) { } errMsg := fmt.Sprintf("failed with exit code %d", ctr.State.Terminated.ExitCode) if ctr.Name != common.MainContainerName { - if ctr.State.Terminated.ExitCode == 137 { + if ctr.State.Terminated.ExitCode == 137 || ctr.State.Terminated.ExitCode == 143 { // if the sidecar was SIGKILL'd (exit code 137) assume it was because argoexec // forcibly killed the container, which we ignore the error for. + // Java code 143 is a normal exit 128 + 15 https://github.com/elastic/elasticsearch/issues/31847 log.Infof("Ignoring %d exit code of sidecar '%s'", ctr.State.Terminated.ExitCode, ctr.Name) continue } From 79ed7665d7419e7fbfe8b120c4cbcd486bebee57 Mon Sep 17 00:00:00 2001 From: Val Sichkovskyi Date: Fri, 26 Oct 2018 17:37:49 -0400 Subject: [PATCH 005/145] Parameter and Argument names should support snake case (#1048) --- workflow/validate/validate.go | 24 ++++++++++++--- workflow/validate/validate_test.go | 47 +++++++++++++++++++++++++----- 2 files changed, 60 insertions(+), 11 deletions(-) diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index c9dca2a47bd4..83d813b7a485 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -464,7 +464,7 @@ func validateOutputs(scope map[string]interface{}, tmpl *wfv1.Template) error { } } if art.GlobalName != "" && !isParameter(art.GlobalName) { - errs := isValidWorkflowFieldName(art.GlobalName) + errs := isValidParamOrArtifactName(art.GlobalName) if len(errs) > 0 { return errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.globalName: %s", tmpl.Name, artRef, errs[0]) } @@ -492,7 +492,7 @@ func validateOutputs(scope map[string]interface{}, tmpl *wfv1.Template) error { } } if param.GlobalName != "" && !isParameter(param.GlobalName) { - errs := isValidWorkflowFieldName(param.GlobalName) + errs := isValidParamOrArtifactName(param.GlobalName) if len(errs) > 0 { return errors.Errorf(errors.CodeBadRequest, "%s.globalName: %s", paramRef, errs[0]) } @@ -556,7 +556,14 @@ func validateWorkflowFieldNames(slice interface{}) error { if name == "" { return errors.Errorf(errors.CodeBadRequest, "[%d].name is required", i) } - if errs := isValidWorkflowFieldName(name); len(errs) != 0 { + var errs []string + t := reflect.TypeOf(item) + if t == reflect.TypeOf(wfv1.Parameter{}) || t == reflect.TypeOf(wfv1.Artifact{}) { + errs = isValidParamOrArtifactName(name) + } else { + errs = isValidWorkflowFieldName(name) + } + if len(errs) != 0 { return errors.Errorf(errors.CodeBadRequest, "[%d].name: '%s' is invalid: %s", i, name, strings.Join(errs, ";")) } _, ok := names[name] @@ -715,13 +722,22 @@ func verifyNoCycles(tmpl *wfv1.Template, nameToTask map[string]wfv1.DAGTask) err var ( // paramRegex matches a parameter. e.g. {{inputs.parameters.blah}} - paramRegex = regexp.MustCompile(`{{[-a-zA-Z0-9]+(\.[-a-zA-Z0-9]+)*}}`) + paramRegex = regexp.MustCompile(`{{[-a-zA-Z0-9]+(\.[-a-zA-Z0-9_]+)*}}`) + paramOrArtifactNameRegex = regexp.MustCompile(`^[-a-zA-Z0-9_]+[-a-zA-Z0-9_]*$`) ) func isParameter(p string) bool { return paramRegex.MatchString(p) } +func isValidParamOrArtifactName(p string) []string { + var errs []string + if !paramOrArtifactNameRegex.MatchString(p) { + return append(errs, "Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-' e.g. my_param_1, MY-PARAM-1") + } + return errs +} + const ( workflowFieldNameFmt string = "[a-zA-Z0-9][-a-zA-Z0-9]*" workflowFieldNameErrMsg string = "name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character" diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 235c92b5faad..886dfd126bbf 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -322,9 +322,7 @@ spec: func TestInvalidArgParamName(t *testing.T) { err := validate(invalidArgParamNames) - if assert.NotNil(t, err) { - assert.Contains(t, err.Error(), invalidErr) - } + assert.NotNil(t, err) } var invalidArgArtNames = ` @@ -336,7 +334,7 @@ spec: entrypoint: kubectl-input-artifact arguments: artifacts: - - name: -kubectl + - name: "&-kubectl" http: url: https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl @@ -344,7 +342,7 @@ spec: - name: kubectl-input-artifact inputs: artifacts: - - name: -kubectl + - name: "&-kubectl" path: /usr/local/bin/kubectl mode: 0755 container: @@ -423,7 +421,7 @@ spec: container: image: docker/whalesay command: [cowsay] - args: ["{{inputs.parameters.message}}"] + args: ["{{inputs.parameters.message+123}}"] ` func TestInvalidInputParamName(t *testing.T) { @@ -500,7 +498,7 @@ spec: args: ["cowsay hello world | tee /tmp/hello_world.txt"] outputs: artifacts: - - name: __1 + - name: "!1" path: /tmp/hello_world.txt ` @@ -1074,6 +1072,41 @@ func TestSpecArgumentNoValue(t *testing.T) { assert.NotNil(t, err) } +var specArgumentSnakeCase = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: spec-arg-snake-case- +spec: + entrypoint: whalesay + arguments: + artifacts: + - name: __kubectl + http: + url: https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl + parameters: + - name: my_snake_case_param + value: "hello world" + templates: + - name: whalesay + inputs: + artifacts: + - name: __kubectl + path: /usr/local/bin/kubectl + mode: 0755 + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay {{workflow.parameters.my_snake_case_param}} | tee /tmp/hello_world.txt && ls /usr/local/bin/kubectl"] +` + +// TestSpecArgumentSnakeCase we allow parameter and artifact names to be snake case +func TestSpecArgumentSnakeCase(t *testing.T) { + wf := unmarshalWf(specArgumentSnakeCase) + err := ValidateWorkflow(wf, true) + assert.Nil(t, err) +} + var specBadSequenceCountAndEnd = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow From 23d733bae386db44ec80639daf91b29dbf86b335 Mon Sep 17 00:00:00 2001 From: Divya Vavili <406488+dvavili@users.noreply.github.com> Date: Fri, 26 Oct 2018 14:39:02 -0700 Subject: [PATCH 006/145] Add namespace explicitly to pod metadata (#1059) --- workflow/controller/workflowpod.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 6255ef259442..7b794832cb48 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -110,7 +110,8 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont mainCtr.Name = common.MainContainerName pod := &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: nodeID, + Name: nodeID, + Namespace: woc.wf.ObjectMeta.Namespace, Labels: map[string]string{ common.LabelKeyWorkflow: woc.wf.ObjectMeta.Name, // Allows filtering by pods related to specific workflow common.LabelKeyCompleted: "false", // Allows filtering by incomplete workflow pods From 7ef1cea68c94f7f0e1e2f8bd75bedc5a7df8af90 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Fri, 26 Oct 2018 18:44:25 -0700 Subject: [PATCH 007/145] Update dependencies to K8s v1.12 and client-go 9.0 --- Gopkg.lock | 100 +++++++++++++----- Gopkg.toml | 12 +-- api/openapi-spec/swagger.json | 4 +- .../workflow/v1alpha1/openapi_generated.go | 4 +- pkg/client/clientset/versioned/clientset.go | 2 - .../versioned/fake/clientset_generated.go | 9 +- .../clientset/versioned/fake/register.go | 16 +-- .../clientset/versioned/scheme/register.go | 16 +-- .../workflow/v1alpha1/fake/fake_workflow.go | 2 +- .../informers/externalversions/factory.go | 61 +++++++++-- util/unstructured/unstructured.go | 9 +- workflow/util/util.go | 19 ++-- 12 files changed, 170 insertions(+), 84 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index d8bea715700a..82af60f29311 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -34,7 +34,7 @@ [[projects]] branch = "master" - digest = "1:0e8f4b1639b80be02aed21595cef2164bab203e660db4485c142e7cebf4f5e95" + digest = "1:c3b7ed058146643b16d3a9827550fba317dbff9f55249dfafac7eb6c3652ad23" name = "github.com/argoproj/pkg" packages = [ "errors", @@ -49,7 +49,7 @@ "time", ] pruneopts = "" - revision = "1aa3e0c55668da17703adba5c534fff6930db589" + revision = "a581a48d63014312c4f2762787f669e46bdb1fd9" [[projects]] branch = "master" @@ -69,14 +69,14 @@ [[projects]] branch = "master" - digest = "1:6c48291ff15f3c3b263ed6f7356acea45da69e7fca8d0d76d2c021a075fbd52a" + digest = "1:d6c13a378213e3de60445e49084b8a0a9ce582776dfc77927775dbeb3ff72a35" name = "github.com/docker/spdystream" packages = [ ".", "spdy", ] pruneopts = "" - revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db" + revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" [[projects]] branch = "master" @@ -209,6 +209,14 @@ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" +[[projects]] + branch = "master" + digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + [[projects]] branch = "master" digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692" @@ -239,22 +247,25 @@ [[projects]] branch = "master" - digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" - name = "github.com/hashicorp/golang-lru" + digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8" + name = "github.com/gregjones/httpcache" packages = [ ".", - "simplelru", + "diskcache", ] pruneopts = "" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] branch = "master" - digest = "1:f81c8d7354cc0c6340f2f7a48724ee6c2b3db3e918ecd441c985b4d2d97dd3e7" - name = "github.com/howeyc/gopass" - packages = ["."] + digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] pruneopts = "" - revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" [[projects]] digest = "1:7ab38c15bd21e056e3115c8b526d201eaf74e0308da9370997c6b3c187115d36" @@ -281,12 +292,11 @@ revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" [[projects]] - digest = "1:b79fc583e4dc7055ed86742e22164ac41bf8c0940722dbcb600f1a3ace1a8cb5" + digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc" name = "github.com/json-iterator/go" packages = ["."] pruneopts = "" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "1.1.5" + revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" [[projects]] digest = "1:7fe04787f53bb61c1ba9c659b1a90ee3da16b4d6a1c41566bcb5077efbd30f97" @@ -363,6 +373,22 @@ revision = "c37440a7cf42ac63b919c752ca73a85067e05992" version = "v0.2.0" +[[projects]] + branch = "master" + digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + [[projects]] digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" name = "github.com/pkg/errors" @@ -749,8 +775,8 @@ version = "v2.2.1" [[projects]] - branch = "release-1.10" - digest = "1:e164e2b731f3e3eaf63220865e6271d27eb7b3e50e2e0b8ff2d6d1cd7a618fc3" + branch = "release-1.12" + digest = "1:ed04c5203ecbf6358fb6a774b0ecd40ea992d6dcc42adc1d3b7cf9eceb66b6c8" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -764,10 +790,12 @@ "authorization/v1beta1", "autoscaling/v1", "autoscaling/v2beta1", + "autoscaling/v2beta2", "batch/v1", "batch/v1beta1", "batch/v2alpha1", "certificates/v1beta1", + "coordination/v1beta1", "core/v1", "events/v1beta1", "extensions/v1beta1", @@ -777,17 +805,18 @@ "rbac/v1alpha1", "rbac/v1beta1", "scheduling/v1alpha1", + "scheduling/v1beta1", "settings/v1alpha1", "storage/v1", "storage/v1alpha1", "storage/v1beta1", ] pruneopts = "" - revision = "0f11257a8a25954878633ebdc9841c67d8f83bdb" + revision = "475331a8afff5587f47d0470a93f79c60c573c03" [[projects]] - branch = "release-1.10" - digest = "1:33bcc98ed218289d68aeac0799649844075ee7a2fb1e686040b605b5b5a1523c" + branch = "release-1.12" + digest = "1:5899da40e41bcc8c1df101b72954096bba9d85b763bc17efc846062ccc111c7b" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -820,25 +849,29 @@ "pkg/util/httpstream/spdy", "pkg/util/intstr", "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", "pkg/util/net", "pkg/util/remotecommand", "pkg/util/runtime", "pkg/util/sets", + "pkg/util/strategicpatch", "pkg/util/validation", "pkg/util/validation/field", "pkg/util/wait", "pkg/util/yaml", "pkg/version", "pkg/watch", + "third_party/forked/golang/json", "third_party/forked/golang/netutil", "third_party/forked/golang/reflect", ] pruneopts = "" - revision = "e386b2658ed20923da8cc9250e552f082899a1ee" + revision = "f71dbbc36e126f5a371b85f6cca96bc8c57db2b6" [[projects]] - branch = "release-7.0" - digest = "1:e36c8f7f67a233f593e07faa6ccdedbef3de5675f15d139d2069a8cd57e0ac44" + branch = "release-9.0" + digest = "1:77bf3d9f18ec82e08ac6c4c7e2d9d1a2ef8d16b25d3ff72fcefcf9256d751573" name = "k8s.io/client-go" packages = [ "discovery", @@ -870,6 +903,8 @@ "kubernetes/typed/autoscaling/v1/fake", "kubernetes/typed/autoscaling/v2beta1", "kubernetes/typed/autoscaling/v2beta1/fake", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", "kubernetes/typed/batch/v1", "kubernetes/typed/batch/v1/fake", "kubernetes/typed/batch/v1beta1", @@ -878,6 +913,8 @@ "kubernetes/typed/batch/v2alpha1/fake", "kubernetes/typed/certificates/v1beta1", "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", "kubernetes/typed/core/v1", "kubernetes/typed/core/v1/fake", "kubernetes/typed/events/v1beta1", @@ -896,6 +933,8 @@ "kubernetes/typed/rbac/v1beta1/fake", "kubernetes/typed/scheduling/v1alpha1", "kubernetes/typed/scheduling/v1alpha1/fake", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", "kubernetes/typed/settings/v1alpha1", "kubernetes/typed/settings/v1alpha1/fake", "kubernetes/typed/storage/v1", @@ -906,6 +945,7 @@ "kubernetes/typed/storage/v1beta1/fake", "pkg/apis/clientauthentication", "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", "pkg/version", "plugin/pkg/client/auth/exec", "plugin/pkg/client/auth/gcp", @@ -928,6 +968,7 @@ "transport/spdy", "util/buffer", "util/cert", + "util/connrotation", "util/exec", "util/flowcontrol", "util/homedir", @@ -937,11 +978,11 @@ "util/workqueue", ] pruneopts = "" - revision = "a312bfe35c401f70e5ea0add48b50da283031dc3" + revision = "13596e875accbd333e0b5bd5fd9462185acd9958" [[projects]] - branch = "release-1.10" - digest = "1:34b0b3400ffdc2533ed4ea23721956638c2776ba49ca4c5def71dddcf0cdfd9b" + branch = "release-1.12" + digest = "1:e6fffdf0dfeb0d189a7c6d735e76e7564685d3b6513f8b19d3651191cb6b084b" name = "k8s.io/code-generator" packages = [ "cmd/client-gen", @@ -963,7 +1004,7 @@ "pkg/util", ] pruneopts = "" - revision = "9de8e796a74d16d2a285165727d04c185ebca6dc" + revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" [[projects]] branch = "master" @@ -985,7 +1026,10 @@ branch = "master" digest = "1:951bc2047eea6d316a17850244274554f26fd59189360e45f4056b424dadf2c1" name = "k8s.io/kube-openapi" - packages = ["pkg/common"] + packages = [ + "pkg/common", + "pkg/util/proto", + ] pruneopts = "" revision = "e3762e86a74c878ffed47484592986685639c2cd" diff --git a/Gopkg.toml b/Gopkg.toml index 2a5f36b93050..36431d39c9e7 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -7,15 +7,15 @@ required = [ [[constraint]] name = "k8s.io/api" - branch = "release-1.10" + branch = "release-1.12" [[constraint]] name = "k8s.io/apimachinery" - branch = "release-1.10" + branch = "release-1.12" [[constraint]] name = "k8s.io/code-generator" - branch = "release-1.10" + branch = "release-1.12" [[constraint]] name = "k8s.io/kube-openapi" @@ -23,11 +23,7 @@ required = [ [[constraint]] name = "k8s.io/client-go" - branch = "release-7.0" - -[[override]] - name = "k8s.io/kubernetes" - version = "~1.10.0" + branch = "release-9.0" [[constraint]] name = "github.com/stretchr/testify" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index e56cbbfad65f..92b07dd6be23 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -549,7 +549,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, "resources": { - "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "securityContext": { @@ -699,7 +699,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, "resources": { - "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "securityContext": { diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 2ebb01d7e668..5c5a6f65d529 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1008,7 +1008,7 @@ func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallba }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, @@ -1281,7 +1281,7 @@ func schema_pkg_apis_workflow_v1alpha1_Sidecar(ref common.ReferenceCallback) com }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index f39d47d21a50..3a54ecc84fce 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -4,7 +4,6 @@ package versioned import ( argoprojv1alpha1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" - glog "github.com/golang/glog" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -58,7 +57,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 9f7881309f06..4c0de7d99d6b 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -25,9 +25,10 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - fakePtr := testing.Fake{} - fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { gvr := action.GetResource() ns := action.GetNamespace() watch, err := o.Watch(gvr, ns) @@ -37,7 +38,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { return true, watch, nil }) - return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} + return cs } // Clientset implements clientset.Interface. Meant to be embedded into a diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index be2cfddc45f5..f2677c800686 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -8,15 +8,14 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + argoprojv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition @@ -29,10 +28,13 @@ func init() { // ) // // kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. -func AddToScheme(scheme *runtime.Scheme) { - argoprojv1alpha1.AddToScheme(scheme) +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) } diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 4a000d537b23..0b62f7b19539 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -8,15 +8,14 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + argoprojv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition @@ -29,10 +28,13 @@ func init() { // ) // // kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. -func AddToScheme(scheme *runtime.Scheme) { - argoprojv1alpha1.AddToScheme(scheme) +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) } diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow.go index 0fb0224efef3..f403ed543a9b 100644 --- a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow.go +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow.go @@ -46,7 +46,7 @@ func (c *FakeWorkflows) List(opts v1.ListOptions) (result *v1alpha1.WorkflowList if label == nil { label = labels.Everything() } - list := &v1alpha1.WorkflowList{} + list := &v1alpha1.WorkflowList{ListMeta: obj.(*v1alpha1.WorkflowList).ListMeta} for _, item := range obj.(*v1alpha1.WorkflowList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 0ac90bb7c174..5209b380583f 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -16,12 +16,16 @@ import ( cache "k8s.io/client-go/tools/cache" ) +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + type sharedInformerFactory struct { client versioned.Interface namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc lock sync.Mutex defaultResync time.Duration + customResync map[reflect.Type]time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -29,23 +33,62 @@ type sharedInformerFactory struct { startedInformers map[reflect.Type]bool } -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) + return NewSharedInformerFactoryWithOptions(client, defaultResync) } // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. // Listers obtained via this SharedInformerFactory will be subject to the same filters // as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return &sharedInformerFactory{ + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ client: client, - namespace: namespace, - tweakListOptions: tweakListOptions, + namespace: v1.NamespaceAll, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) } + + return factory } // Start initializes all requested informers. @@ -94,7 +137,13 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal if exists { return informer } - informer = newFunc(f.client, f.defaultResync) + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) f.informers[informerType] = informer return informer diff --git a/util/unstructured/unstructured.go b/util/unstructured/unstructured.go index b3073c4e82f3..e2c798c984b2 100644 --- a/util/unstructured/unstructured.go +++ b/util/unstructured/unstructured.go @@ -6,6 +6,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers/internalinterfaces" @@ -15,27 +16,27 @@ import ( // NewUnstructuredInformer constructs a new informer for Unstructured type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewUnstructuredInformer(resource *metav1.APIResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { +func NewUnstructuredInformer(resource schema.GroupVersionResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { return NewFilteredUnstructuredInformer(resource, client, namespace, resyncPeriod, indexers, nil) } // NewFilteredUnstructuredInformer constructs a new informer for Unstructured type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredUnstructuredInformer(resource *metav1.APIResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredUnstructuredInformer(resource schema.GroupVersionResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.Resource(resource, namespace).List(options) + return client.Resource(resource).Namespace(namespace).List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.Resource(resource, namespace).Watch(options) + return client.Resource(resource).Namespace(namespace).Watch(options) }, }, &unstructured.Unstructured{}, diff --git a/workflow/util/util.go b/workflow/util/util.go index cfc9e4cff82d..9a6fc09ee452 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -17,6 +17,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -37,28 +38,20 @@ import ( "github.com/argoproj/argo/workflow/validate" ) -func NewDynamicWorkflowClient(config *rest.Config) (dynamic.Interface, error) { - dynClientPool := dynamic.NewDynamicClientPool(config) - return dynClientPool.ClientForGroupVersionKind(wfv1.SchemaGroupVersionKind) -} - // NewWorkflowInformer returns the workflow informer used by the controller. This is actually // a custom built UnstructuredInformer which is in actuality returning unstructured.Unstructured // objects. We no longer return WorkflowInformer due to: // https://github.com/kubernetes/kubernetes/issues/57705 // https://github.com/argoproj/argo/issues/632 func NewWorkflowInformer(cfg *rest.Config, ns string, resyncPeriod time.Duration, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - dclient, err := NewDynamicWorkflowClient(cfg) + dclient, err := dynamic.NewForConfig(cfg) if err != nil { panic(err) } - resource := &metav1.APIResource{ - Name: workflow.Plural, - SingularName: workflow.Singular, - Namespaced: true, - Group: workflow.Group, - Version: "v1alpha1", - ShortNames: []string{"wf"}, + resource := schema.GroupVersionResource{ + Group: workflow.Group, + Version: "v1alpha1", + Resource: "workflows", } informer := unstructutil.NewFilteredUnstructuredInformer( resource, From a07bbe431cecbb1d50356f94111d3bd2dbc48bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Wed, 31 Oct 2018 00:21:51 +0100 Subject: [PATCH 008/145] Adding SAP Hybris in Who uses Argo (#1064) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d1ad3ed82d70..2f0214ac3878 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ Currently **officially** using Argo: 1. [NVIDIA](https://www.nvidia.com/) 1. [KintoHub](https://www.kintohub.com/) 1. [Styra](https://www.styra.com/) +1. [SAP Hybris](https://cx.sap.com/) ## Community Blogs and Presentations * [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) From 036969c0f4f6ce6a3c948b5d161c0367cf07176b Mon Sep 17 00:00:00 2001 From: Howie Benefiel Date: Tue, 30 Oct 2018 18:23:10 -0500 Subject: [PATCH 009/145] Add Cratejoy to list of users (#1063) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2f0214ac3878..b3c5a24a6ab8 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ Currently **officially** using Argo: 1. [NVIDIA](https://www.nvidia.com/) 1. [KintoHub](https://www.kintohub.com/) 1. [Styra](https://www.styra.com/) +1. [Cratejoy](https://www.cratejoy.com/) 1. [SAP Hybris](https://cx.sap.com/) ## Community Blogs and Presentations From eb48c23a2525a62bbc1b8b4c94e3d50fd91014bd Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Wed, 31 Oct 2018 15:38:06 +0900 Subject: [PATCH 010/145] Raise not implemented error when artifact saving is unsupported (#1062) --- errors/errors.go | 13 +++++++------ workflow/executor/k8sapi/k8sapi.go | 5 ++--- workflow/executor/kubelet/client.go | 4 ++-- workflow/executor/kubelet/kubelet.go | 8 ++------ 4 files changed, 13 insertions(+), 17 deletions(-) diff --git a/errors/errors.go b/errors/errors.go index c21119bb0b10..22177ccaa4f1 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -10,12 +10,13 @@ import ( // Externally visible error codes const ( - CodeUnauthorized = "ERR_UNAUTHORIZED" - CodeBadRequest = "ERR_BAD_REQUEST" - CodeForbidden = "ERR_FORBIDDEN" - CodeNotFound = "ERR_NOT_FOUND" - CodeTimeout = "ERR_TIMEOUT" - CodeInternal = "ERR_INTERNAL" + CodeUnauthorized = "ERR_UNAUTHORIZED" + CodeBadRequest = "ERR_BAD_REQUEST" + CodeForbidden = "ERR_FORBIDDEN" + CodeNotFound = "ERR_NOT_FOUND" + CodeNotImplemented = "ERR_NOT_INPLEMENTED" + CodeTimeout = "ERR_TIMEOUT" + CodeInternal = "ERR_INTERNAL" ) // ArgoError is an error interface that additionally adds support for diff --git a/workflow/executor/k8sapi/k8sapi.go b/workflow/executor/k8sapi/k8sapi.go index 16d4ecef339f..6f3fd932f705 100644 --- a/workflow/executor/k8sapi/k8sapi.go +++ b/workflow/executor/k8sapi/k8sapi.go @@ -23,12 +23,11 @@ func NewK8sAPIExecutor(clientset *kubernetes.Clientset, config *restclient.Confi } func (k *K8sAPIExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { - log.Infof("Getting file contents of %s:%s", containerID, sourcePath) - return k.client.getFileContents(containerID, sourcePath) + return "", errors.Errorf(errors.CodeNotImplemented, "GetFileContents() is not implemented in the k8sapi executor.") } func (k *K8sAPIExecutor) CopyFile(containerID string, sourcePath string, destPath string) error { - return k.client.copyArchive(containerID, sourcePath, destPath) + return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the k8sapi executor.") } // GetOutput returns the entirety of the container output as a string diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index 35178a19f584..49730c187535 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -287,8 +287,8 @@ func (k *kubeletClient) readFileContents(u *url.URL) (*bytes.Buffer, error) { } } -// CreateArchive exec in the given containerID and create a tarball of the given sourcePath. Works with directory -func (k *kubeletClient) CreateArchive(containerID, sourcePath string) (*bytes.Buffer, error) { +// createArchive exec in the given containerID and create a tarball of the given sourcePath. Works with directory +func (k *kubeletClient) createArchive(containerID, sourcePath string) (*bytes.Buffer, error) { return k.getCommandOutput(containerID, fmt.Sprintf("command=tar&command=-cf&command=-&command=%s&output=1", sourcePath)) } diff --git a/workflow/executor/kubelet/kubelet.go b/workflow/executor/kubelet/kubelet.go index ca99b0b52033..6cd8f9a482f0 100644 --- a/workflow/executor/kubelet/kubelet.go +++ b/workflow/executor/kubelet/kubelet.go @@ -21,15 +21,11 @@ func NewKubeletExecutor() (*KubeletExecutor, error) { } func (k *KubeletExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { - b, err := k.cli.GetFileContents(containerID, sourcePath) - if err != nil { - return "", err - } - return b.String(), nil + return "", errors.Errorf(errors.CodeNotImplemented, "GetFileContents() is not implemented in the kubelet executor.") } func (k *KubeletExecutor) CopyFile(containerID string, sourcePath string, destPath string) error { - return k.cli.CopyArchive(containerID, sourcePath, destPath) + return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the kubelet executor.") } // GetOutput returns the entirety of the container output as a string From 2f3fc414f537023b836e61e87d2b0c320dbf22a9 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Fri, 28 Sep 2018 15:03:13 -0400 Subject: [PATCH 011/145] Adding native GCS support for artifact storage and retrieval --- Gopkg.lock | 90 +++++++++++++ api/openapi-spec/swagger.json | 34 +++++ .../workflow/v1alpha1/openapi_generated.go | 64 ++++++++- pkg/apis/workflow/v1alpha1/types.go | 20 ++- .../v1alpha1/zz_generated.deepcopy.go | 38 ++++++ workflow/artifacts/gcs/gcs.go | 126 ++++++++++++++++++ workflow/common/common.go | 10 ++ workflow/controller/config.go | 6 + workflow/controller/workflowpod.go | 44 +++++- workflow/executor/executor.go | 9 ++ 10 files changed, 434 insertions(+), 7 deletions(-) create mode 100644 workflow/artifacts/gcs/gcs.go diff --git a/Gopkg.lock b/Gopkg.lock index 82af60f29311..233bb7612a7c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -200,6 +200,7 @@ name = "github.com/golang/protobuf" packages = [ "proto", + "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", @@ -225,6 +226,12 @@ pruneopts = "" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" +[[projects]] + name = "github.com/googleapis/gax-go" + packages = ["."] + revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" + version = "v2.0.0" + [[projects]] digest = "1:16b2837c8b3cf045fa2cdc82af0cf78b19582701394484ae76b2c3bc3c99ad73" name = "github.com/googleapis/gnostic" @@ -555,6 +562,26 @@ revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c" version = "v0.2.0" +[[projects]] + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate" + ] + revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" + version = "v0.17.0" + [[projects]] branch = "master" digest = "1:53c4b75f22ea7757dea07eae380ea42de547ae6865a5e3b41866754a8a8219c9" @@ -594,6 +621,9 @@ "http2", "http2/hpack", "idna", + "internal/timeseries", + "lex/httplex", + "trace" ] pruneopts = "" revision = "f9ce57c11b242f0f1599cf25c89d8cb02c45295a" @@ -668,6 +698,23 @@ pruneopts = "" revision = "ca6481ae56504398949d597084558e50ad07117a" +[[projects]] + branch = "master" + name = "google.golang.org/api" + packages = [ + "gensupport", + "googleapi", + "googleapi/internal/uritemplates", + "googleapi/transport", + "internal", + "iterator", + "option", + "storage/v1", + "transport/http", + "transport/http/internal/propagation" + ] + revision = "44c6748ece026e0fe668793d8f92e521356400a3" + [[projects]] digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" @@ -687,6 +734,49 @@ revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/iam/v1", + "googleapis/rpc/status" + ] + revision = "0e822944c569bf5c9afd034adaa56208bd2906ac" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap" + ] + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" + [[projects]] digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" name = "gopkg.in/inf.v0" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 92b07dd6be23..29254c385f40 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -58,6 +58,10 @@ "description": "From allows an artifact to reference an artifact from a previous step", "type": "string" }, + "gcs": { + "description": "GCS contains GCS artifact location details", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifact" + }, "git": { "description": "Git contains git artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GitArtifact" @@ -104,6 +108,10 @@ "description": "Artifactory contains artifactory artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact" }, + "gcs": { + "description": "GCS contains GCS artifact location details", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifact" + }, "git": { "description": "Git contains git artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GitArtifact" @@ -221,6 +229,32 @@ } } }, + "io.argoproj.workflow.v1alpha1.GCSArtifact": { + "description": "GCSArtifact is the location of a GCS artifact", + "required": [ + "bucket", + "key" + ], + "properties": { + "bucket": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + "io.argoproj.workflow.v1alpha1.GCSBucket": { + "description": "GCSBucket contains the access information required for acting with a GCS bucket", + "required": [ + "bucket" + ], + "properties": { + "bucket": { + "type": "string" + } + } + }, "io.argoproj.workflow.v1alpha1.GitArtifact": { "description": "GitArtifact is the location of an git artifact", "required": [ diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 5c5a6f65d529..0f898464dbd7 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -21,6 +21,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryAuth": schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSArtifact": schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSBucket": schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), @@ -183,6 +185,12 @@ func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) co Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact"), }, }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS contains GCS artifact location details", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSArtifact"), + }, + }, "globalName": { SchemaProps: spec.SchemaProps{ Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", @@ -201,7 +209,7 @@ func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) co }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, } } @@ -248,11 +256,17 @@ func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCall Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact"), }, }, + "gcs": { + SchemaProps: spec.SchemaProps{ + Description: "GCS contains GCS artifact location details", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSArtifact"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, } } @@ -433,6 +447,52 @@ func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) } } +func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GCSArtifact is the location of a GCS artifact", + Properties: map[string]spec.Schema{ + "bucket": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"bucket", "key"}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GCSBucket contains the access information required for acting with a GCS bucket", + Properties: map[string]spec.Schema{ + "bucket": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"bucket"}, + }, + }, + Dependencies: []string{}, + } +} + func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index fbd07fe54174..d2815274acd8 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -318,6 +318,9 @@ type ArtifactLocation struct { // Raw contains raw artifact location details Raw *RawArtifact `json:"raw,omitempty"` + + // GCS contains GCS artifact location details + GCS *GCSArtifact `json:"gcs,omitempty"` } // Outputs hold parameters, artifacts, and results from a step @@ -594,6 +597,21 @@ func (s *S3Artifact) String() string { return fmt.Sprintf("%s://%s/%s/%s", protocol, s.Endpoint, s.Bucket, s.Key) } +// GCSBucket contains the access information required for acting with a GCS bucket +type GCSBucket struct { + Bucket string `json:"bucket"` +} + +// GCSArtifact is the location of a GCS artifact +type GCSArtifact struct { + GCSBucket `json:",inline"` + Key string `json:"key"` +} + +func (s *GCSArtifact) String() string { + return fmt.Sprintf("gs://%s/%s", s.Bucket, s.Key) +} + // GitArtifact is the location of an git artifact type GitArtifact struct { // Repo is the git repository @@ -818,7 +836,7 @@ func (args *Arguments) GetParameterByName(name string) *Parameter { // HasLocation whether or not an artifact has a location defined func (a *Artifact) HasLocation() bool { - return a.S3 != nil || a.Git != nil || a.HTTP != nil || a.Artifactory != nil || a.Raw != nil + return a.S3 != nil || a.Git != nil || a.HTTP != nil || a.Artifactory != nil || a.Raw != nil || a.GCS != nil } // GetTemplate retrieves a defined template by its name diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index ffa11d640f1b..6b6ccec7ba76 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -125,6 +125,11 @@ func (in *ArtifactLocation) DeepCopyInto(out *ArtifactLocation) { *out = new(RawArtifact) **out = **in } + if in.GCS != nil { + in, out := &in.GCS, &out.GCS + *out = new(GCSArtifact) + **out = **in + } return } @@ -238,6 +243,39 @@ func (in *DAGTemplate) DeepCopy() *DAGTemplate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCSArtifact) DeepCopyInto(out *GCSArtifact) { + *out = *in + out.GCSBucket = in.GCSBucket + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSArtifact. +func (in *GCSArtifact) DeepCopy() *GCSArtifact { + if in == nil { + return nil + } + out := new(GCSArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCSBucket) DeepCopyInto(out *GCSBucket) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSBucket. +func (in *GCSBucket) DeepCopy() *GCSBucket { + if in == nil { + return nil + } + out := new(GCSBucket) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitArtifact) DeepCopyInto(out *GitArtifact) { *out = *in diff --git a/workflow/artifacts/gcs/gcs.go b/workflow/artifacts/gcs/gcs.go new file mode 100644 index 000000000000..5802076c3ae8 --- /dev/null +++ b/workflow/artifacts/gcs/gcs.go @@ -0,0 +1,126 @@ +package gcs + +import ( + "cloud.google.com/go/storage" + "context" + "errors" + argoErrors "github.com/argoproj/argo/errors" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + log "github.com/sirupsen/logrus" + "io" + "os" +) + +type GCSArtifactDriver struct { + Context context.Context +} + +func (gcsDriver *GCSArtifactDriver) newGcsClient() (client *storage.Client, err error) { + gcsDriver.Context = context.Background() + client, err = storage.NewClient(gcsDriver.Context) + if err != nil { + return nil, argoErrors.InternalWrapError(err) + } + return + +} + +func (gcsDriver *GCSArtifactDriver) saveToFile(inputArtifact *wfv1.Artifact, filePath string) error { + + log.Infof("Loading from GCS (gs://%s/%s) to %s", + inputArtifact.GCS.Bucket, inputArtifact.GCS.Key, filePath) + + stat, err := os.Stat(filePath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if stat.IsDir() { + return errors.New("output artifact path is a directory") + } + + outputFile, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return err + } + + gcsClient, err := gcsDriver.newGcsClient() + if err != nil { + return err + } + + bucket := gcsClient.Bucket(inputArtifact.GCS.Bucket) + object := bucket.Object(inputArtifact.GCS.Key) + + r, err := object.NewReader(gcsDriver.Context) + if err != nil { + return err + } + defer r.Close() + + _, err = io.Copy(outputFile, r) + if err != nil { + return err + } + + err = outputFile.Close() + if err != nil { + return err + } + return nil +} + +func (gcsDriver *GCSArtifactDriver) saveToGCS(outputArtifact *wfv1.Artifact, filePath string) error { + + log.Infof("Saving to GCS (gs://%s/%s)", + outputArtifact.GCS.Bucket, outputArtifact.GCS.Key) + + gcsClient, err := gcsDriver.newGcsClient() + if err != nil { + return err + } + + inputFile, err := os.Open(filePath) + if err != nil { + return err + } + + stat, err := os.Stat(filePath) + if err != nil { + return err + } + + if stat.IsDir() { + return errors.New("only single files can be saved to GCS, not entire directories") + } + + defer inputFile.Close() + + bucket := gcsClient.Bucket(outputArtifact.GCS.Bucket) + object := bucket.Object(outputArtifact.GCS.Key) + + w := object.NewWriter(gcsDriver.Context) + _, err = io.Copy(w, inputFile) + if err != nil { + return err + } + + err = w.Close() + if err != nil { + return err + } + return nil + +} + +func (gcsDriver *GCSArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error { + + err := gcsDriver.saveToFile(inputArtifact, path) + return err +} + +func (gcsDriver *GCSArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { + + err := gcsDriver.saveToGCS(outputArtifact, path) + return err +} diff --git a/workflow/common/common.go b/workflow/common/common.go index 432339e10cbe..e3dbb079169b 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -1,6 +1,7 @@ package common import ( + "os" "time" "github.com/argoproj/argo/pkg/apis/workflow" @@ -36,6 +37,11 @@ const ( // DockerSockVolumeName is the volume name for the /var/run/docker.sock host path volume DockerSockVolumeName = "docker-sock" + // GoogleSecretVolumeName is the volume name for the /var/secrets/google volume + GoogleSecretVolumeName = "google-cloud-key" + // EvnVarGoogleSecret contains the name of the google credentials file used fro GCS access + EnvVarGoogleSecret = "GOOGLE_CREDENTIALS_SECRET" + // AnnotationKeyNodeName is the pod metadata annotation key containing the workflow node name AnnotationKeyNodeName = workflow.FullName + "/node-name" // AnnotationKeyNodeMessage is the pod metadata annotation key the executor will use to @@ -116,6 +122,10 @@ const ( LocalVarPodName = "pod.name" ) +var ( + GoogleSecretName = os.Getenv(EnvVarGoogleSecret) +) + // ExecutionControl contains execution control parameters for executor to decide how to execute the container type ExecutionControl struct { // Deadline is a max timestamp in which an executor can run the container before terminating it diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 1559eb93b02d..0caa9141fb4e 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -67,6 +67,7 @@ type ArtifactRepository struct { S3 *S3ArtifactRepository `json:"s3,omitempty"` // Artifactory stores artifacts to JFrog Artifactory Artifactory *ArtifactoryArtifactRepository `json:"artifactory,omitempty"` + GCS *GCSArtifactRepository `json:"gcs,omitempty"` } // S3ArtifactRepository defines the controller configuration for an S3 artifact repository @@ -88,6 +89,11 @@ type ArtifactoryArtifactRepository struct { RepoURL string `json:"repoURL,omitempty"` } +// GCSArtifactRepository defines the controller configuration for a GCS artifact repository +type GCSArtifactRepository struct { + wfv1.GCSBucket `json:",inline"` +} + // ResyncConfig reloads the controller config from the configmap func (wfc *WorkflowController) ResyncConfig() error { cmClient := wfc.kubeclientset.CoreV1().ConfigMaps(wfc.namespace) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 7b794832cb48..4700ecd996a0 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -87,6 +87,25 @@ var ( execEnvVars = []apiv1.EnvVar{ envFromField(common.EnvVarPodName, "metadata.name"), } + + volumeMountGoogleSecret = apiv1.VolumeMount{ + Name: common.GoogleSecretVolumeName, + MountPath: "/var/secrets/google", + } + + googleCredentialSecretEnvVar = apiv1.EnvVar{ + Name: "GOOGLE_APPLICATION_CREDENTIALS", + Value: "/var/secrets/google/key.json", + } + + volumeGoogleSecret = apiv1.Volume{ + Name: common.GoogleSecretVolumeName, + VolumeSource: apiv1.VolumeSource{ + Secret: &apiv1.SecretVolumeSource{ + SecretName: common.GoogleSecretName, + }, + }, + } ) // envFromField is a helper to return a EnvVar with the name and field @@ -138,6 +157,10 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } + if common.GoogleSecretName != "" { + pod.Spec.Volumes = append(pod.Spec.Volumes, volumeGoogleSecret) + } + if tmpl.GetType() != wfv1.TemplateTypeResource { // we do not need the wait container for resource templates because // argoexec runs as the main container and will perform the job of @@ -260,9 +283,8 @@ func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container ctr := woc.newExecContainer(common.InitContainerName, false) ctr.Command = []string{"argoexec"} ctr.Args = []string{"init"} - ctr.VolumeMounts = []apiv1.VolumeMount{ - volumeMountPodMetadata, - } + ctr.VolumeMounts = append(ctr.VolumeMounts, volumeMountPodMetadata) + return *ctr } @@ -270,7 +292,8 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain ctr := woc.newExecContainer(common.WaitContainerName, false) ctr.Command = []string{"argoexec"} ctr.Args = []string{"wait"} - ctr.VolumeMounts = woc.createVolumeMounts() + ctr.VolumeMounts = append(ctr.VolumeMounts, woc.createVolumeMounts()...) + return ctr, nil } @@ -340,6 +363,7 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool) *apiv1 Name: name, Image: woc.controller.executorImage(), ImagePullPolicy: woc.controller.executorImagePullPolicy(), + VolumeMounts: []apiv1.VolumeMount{}, Env: woc.createEnvVars(), SecurityContext: &apiv1.SecurityContext{ Privileged: &privileged, @@ -348,6 +372,11 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool) *apiv1 if woc.controller.Config.ExecutorResources != nil { exec.Resources = *woc.controller.Config.ExecutorResources } + + if common.GoogleSecretName != "" { + exec.VolumeMounts = append(exec.VolumeMounts, volumeMountGoogleSecret) + exec.Env = append(exec.Env, googleCredentialSecretEnvVar) + } return &exec } @@ -594,6 +623,13 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat ArtifactoryAuth: woc.controller.Config.ArtifactRepository.Artifactory.ArtifactoryAuth, URL: artURL, } + } else if woc.controller.Config.ArtifactRepository.GCS != nil { + log.Debugf("Setting GCS artifact repository information") + artLocationKey := fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Name, pod.ObjectMeta.Name) + tmpl.ArchiveLocation.GCS = &wfv1.GCSArtifact{ + GCSBucket: woc.controller.Config.ArtifactRepository.GCS.GCSBucket, + Key: artLocationKey, + } } else { for _, art := range tmpl.Outputs.Artifacts { if !art.HasLocation() { diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 017d89dfad09..a545e7ef58aa 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -24,6 +24,7 @@ import ( "github.com/argoproj/argo/util/retry" artifact "github.com/argoproj/argo/workflow/artifacts" "github.com/argoproj/argo/workflow/artifacts/artifactory" + "github.com/argoproj/argo/workflow/artifacts/gcs" "github.com/argoproj/argo/workflow/artifacts/git" "github.com/argoproj/argo/workflow/artifacts/http" "github.com/argoproj/argo/workflow/artifacts/raw" @@ -253,6 +254,10 @@ func (we *WorkflowExecutor) saveArtifact(tempOutArtDir string, mainCtrID string, } artifactoryURL.Path = path.Join(artifactoryURL.Path, fileName) art.Artifactory.URL = artifactoryURL.String() + } else if we.Template.ArchiveLocation.GCS != nil { + shallowCopy := *we.Template.ArchiveLocation.GCS + art.GCS = &shallowCopy + art.GCS.Key = path.Join(art.GCS.Key, fileName) } else { return errors.Errorf(errors.CodeBadRequest, "Unable to determine path to store %s. Archive location provided no information", art.Name) } @@ -435,6 +440,10 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv } return &driver, nil } + if art.GCS != nil { + driver := gcs.GCSArtifactDriver{} + return &driver, nil + } if art.HTTP != nil { return &http.HTTPArtifactDriver{}, nil } From f2914d63e9c8b41a13b5932f7962f208b7e5a0da Mon Sep 17 00:00:00 2001 From: Tang Lee Date: Sat, 3 Nov 2018 03:43:24 +0800 Subject: [PATCH 012/145] Support nested steps workflow parallelism (#1046) --- examples/parallelism-nested-dag.yaml | 87 +++++++++++++++++++++++ examples/parallelism-nested-workflow.yaml | 52 ++++++++++++++ workflow/controller/operator.go | 39 ++++++++-- 3 files changed, 172 insertions(+), 6 deletions(-) create mode 100644 examples/parallelism-nested-dag.yaml create mode 100644 examples/parallelism-nested-workflow.yaml diff --git a/examples/parallelism-nested-dag.yaml b/examples/parallelism-nested-dag.yaml new file mode 100644 index 000000000000..bcc7bd6ca064 --- /dev/null +++ b/examples/parallelism-nested-dag.yaml @@ -0,0 +1,87 @@ +# Example on specifying parallelism on the outer DAG and limiting the number of its +# children DAGs to be run at the same time. +# +# As the parallelism of A is 2, only two of the three DAGs (b2, b3, b4) will start +# running after b1 is finished, and the left DAG will run after either one is finished. + +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: parallelism-nested-dag- +spec: + entrypoint: A + templates: + - name: A + parallelism: 2 + dag: + tasks: + - name: b1 + template: B + arguments: + parameters: + - name: msg + value: "1" + - name: b2 + template: B + dependencies: [b1] + arguments: + parameters: + - name: msg + value: "2" + - name: b3 + template: B + dependencies: [b1] + arguments: + parameters: + - name: msg + value: "3" + - name: b4 + template: B + dependencies: [b1] + arguments: + parameters: + - name: msg + value: "4" + - name: b5 + template: B + dependencies: [b2, b3, b4] + arguments: + parameters: + - name: msg + value: "5" + + - name: B + inputs: + parameters: + - name: msg + dag: + tasks: + - name: c1 + template: one-job + arguments: + parameters: + - name: msg + value: "{{inputs.parameters.msg}} c1" + - name: c2 + template: one-job + dependencies: [c1] + arguments: + parameters: + - name: msg + value: "{{inputs.parameters.msg}} c2" + - name: c3 + template: one-job + dependencies: [c1] + arguments: + parameters: + - name: msg + value: "{{inputs.parameters.msg}} c3" + + - name: one-job + inputs: + parameters: + - name: msg + container: + image: alpine + command: ['/bin/sh', '-c'] + args: ["echo {{inputs.parameters.msg}}; sleep 10"] diff --git a/examples/parallelism-nested-workflow.yaml b/examples/parallelism-nested-workflow.yaml new file mode 100644 index 000000000000..5cba4de3391b --- /dev/null +++ b/examples/parallelism-nested-workflow.yaml @@ -0,0 +1,52 @@ +# Example on specifying parallelism on the outer workflow and limiting the number of its +# children workflowss to be run at the same time. +# +# As the parallelism of A is 1, the four steps of seq-step will run sequentially. + +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: parallelism-nested-workflow- +spec: + arguments: + parameters: + - name: seq-list + value: | + ["a","b","c","d"] + entrypoint: A + templates: + - name: A + parallelism: 1 + inputs: + parameters: + - name: seq-list + steps: + - - name: seq-step + template: B + arguments: + parameters: + - name: seq-id + value: "{{item}}" + withParam: "{{inputs.parameters.seq-list}}" + + - name: B + inputs: + parameters: + - name: seq-id + steps: + - - name: jobs + template: one-job + arguments: + parameters: + - name: seq-id + value: "{{inputs.parameters.seq-id}}" + withParam: "[1, 2]" + + - name: one-job + inputs: + parameters: + - name: seq-id + container: + image: alpine + command: ['/bin/sh', '-c'] + args: ["echo {{inputs.parameters.seq-id}}; sleep 30"] diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 86c768c3bb54..ed03abc2f000 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -495,6 +495,31 @@ func (woc *wfOperationCtx) countActivePods(boundaryIDs ...string) int64 { return activePods } +// countActiveChildren counts the number of active (Pending/Running) children nodes of parent parentName +func (woc *wfOperationCtx) countActiveChildren(boundaryIDs ...string) int64 { + var boundaryID = "" + if len(boundaryIDs) > 0 { + boundaryID = boundaryIDs[0] + } + var activeChildren int64 + // if we care about parallelism, count the active pods at the template level + for _, node := range woc.wf.Status.Nodes { + if boundaryID != "" && node.BoundaryID != boundaryID { + continue + } + switch node.Type { + case wfv1.NodeTypePod, wfv1.NodeTypeSteps, wfv1.NodeTypeDAG: + default: + continue + } + switch node.Phase { + case wfv1.NodePending, wfv1.NodeRunning: + activeChildren++ + } + } + return activeChildren +} + // getAllWorkflowPods returns all pods related to the current workflow func (woc *wfOperationCtx) getAllWorkflowPods() (*apiv1.PodList, error) { options := metav1.ListOptions{ @@ -868,7 +893,8 @@ func (woc *wfOperationCtx) getLastChildNode(node *wfv1.NodeStatus) (*wfv1.NodeSt // nodeName is the name to be used as the name of the node, and boundaryID indicates which template // boundary this node belongs to. func (woc *wfOperationCtx) executeTemplate(templateName string, args wfv1.Arguments, nodeName string, boundaryID string) (*wfv1.NodeStatus, error) { - woc.log.Debugf("Evaluating node %s: template: %s", nodeName, templateName) + woc.log.Debugf("Evaluating node %s: template: %s, boundaryID: %s", nodeName, templateName, boundaryID) + node := woc.getNodeByName(nodeName) if node != nil && node.Completed() { woc.log.Debugf("Node %s already completed", nodeName) @@ -1113,16 +1139,17 @@ func (woc *wfOperationCtx) checkParallelism(tmpl *wfv1.Template, node *wfv1.Node return ErrParallelismReached } } + fallthrough default: // if we are about to execute a pod, make our parent hasn't reached it's limit - if boundaryID != "" { + if boundaryID != "" && (node == nil || (node.Phase != wfv1.NodePending && node.Phase != wfv1.NodeRunning)) { boundaryNode := woc.wf.Status.Nodes[boundaryID] boundaryTemplate := woc.wf.GetTemplate(boundaryNode.TemplateName) if boundaryTemplate.Parallelism != nil { - templateActivePods := woc.countActivePods(boundaryID) - woc.log.Debugf("counted %d/%d active pods in boundary %s", templateActivePods, *boundaryTemplate.Parallelism, boundaryID) - if templateActivePods >= *boundaryTemplate.Parallelism { - woc.log.Infof("template (node %s) active pod parallelism reached %d/%d", boundaryID, templateActivePods, *boundaryTemplate.Parallelism) + activeSiblings := woc.countActiveChildren(boundaryID) + woc.log.Debugf("counted %d/%d active children in boundary %s", activeSiblings, *boundaryTemplate.Parallelism, boundaryID) + if activeSiblings >= *boundaryTemplate.Parallelism { + woc.log.Infof("template (node %s) active children parallelism reached %d/%d", boundaryID, activeSiblings, *boundaryTemplate.Parallelism) return ErrParallelismReached } } From 76b14f54520a92b81ced78d4cae2632655f396fc Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 3 Nov 2018 15:08:07 +0900 Subject: [PATCH 013/145] Auto-complete workflow names (#1061) * Auto-complete workflow names * Use cobra revision at fe5e611709b0c57fa4a89136deaa8e1d4004d053 --- Gopkg.lock | 6 ++---- Gopkg.toml | 2 +- cmd/argo/commands/completion.go | 25 +++++++++++++++++++++++++ 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 82af60f29311..797653a4c341 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -466,12 +466,11 @@ version = "v1.0.6" [[projects]] - branch = "master" - digest = "1:c8f6919ab9f140506fd4ad3f4a9c9c2af9ee7921e190af0c67b2fca2f903083c" + digest = "1:9ba49264cef4386aded205f9cb5b1f2d30f983d7dc37a21c780d9db3edfac9a7" name = "github.com/spf13/cobra" packages = ["."] pruneopts = "" - revision = "7c4570c3ebeb8129a1f7456d0908a8b676b6f9f1" + revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053" [[projects]] digest = "1:8e243c568f36b09031ec18dff5f7d2769dcf5ca4d624ea511c8e3197dc3d352d" @@ -1052,7 +1051,6 @@ "github.com/fsnotify/fsnotify", "github.com/ghodss/yaml", "github.com/go-openapi/spec", - "github.com/golang/glog", "github.com/gorilla/websocket", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", diff --git a/Gopkg.toml b/Gopkg.toml index 36431d39c9e7..25aee72f4b95 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -31,7 +31,7 @@ required = [ [[constraint]] name = "github.com/spf13/cobra" - branch = "master" + revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053" [[constraint]] name = "gopkg.in/src-d/go-git.v4" diff --git a/cmd/argo/commands/completion.go b/cmd/argo/commands/completion.go index 53f5bf8fc9f7..d6cba691292a 100644 --- a/cmd/argo/commands/completion.go +++ b/cmd/argo/commands/completion.go @@ -9,6 +9,30 @@ import ( "github.com/spf13/cobra" ) +const ( + bashCompletionFunc = ` +__argo_get_workflow() { + local argo_out + if argo_out=$(argo list --output name 2>/dev/null); then + COMPREPLY+=( $( compgen -W "${argo_out[*]}" -- "$cur" ) ) + fi +} + +__argo_custom_func() { + case ${last_command} in + argo_delete | argo_get | argo_logs |\ + argo_resubmit | argo_resume | argo_retry | argo_suspend |\ + argo_terminate | argo_wait | argo_watch) + __argo_get_workflow + return + ;; + *) + ;; + esac +} + ` +) + func NewCompletionCommand() *cobra.Command { var command = &cobra.Command{ Use: "completion SHELL", @@ -30,6 +54,7 @@ variable. } shell := args[0] rootCommand := NewCommand() + rootCommand.BashCompletionFunction = bashCompletionFunc availableCompletions := map[string]func(io.Writer) error{ "bash": rootCommand.GenBashCompletion, "zsh": rootCommand.GenZshCompletion, From 79b3e30746f779e3cec3a28beaecb9c0df7024e1 Mon Sep 17 00:00:00 2001 From: Joshua Carp Date: Sat, 3 Nov 2018 02:09:17 -0400 Subject: [PATCH 014/145] Fix string format arguments in workflow utilities. (#1070) --- workflow/util/util.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/workflow/util/util.go b/workflow/util/util.go index 9a6fc09ee452..40d0bb4ffe24 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -412,7 +412,7 @@ func FormulateResubmitWorkflow(wf *wfv1.Workflow, memoized bool) (*wfv1.Workflow // NOTE: NodeRunning shouldn't really happen except in weird scenarios where controller // mismanages state (e.g. panic when operating on a workflow) default: - return nil, errors.InternalErrorf("Workflow cannot be resubmitted with nodes in %s phase", node, node.Phase) + return nil, errors.InternalErrorf("Workflow cannot be resubmitted with node %s in %s phase", node, node.Phase) } } return &newWF, nil @@ -460,7 +460,7 @@ func RetryWorkflow(kubeClient kubernetes.Interface, wfClient v1alpha1.WorkflowIn // do not add this status to the node. pretend as if this node never existed. default: // Do not allow retry of workflows with pods in Running/Pending phase - return nil, errors.InternalErrorf("Workflow cannot be retried with nodes in %s phase", node, node.Phase) + return nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node, node.Phase) } if node.Type == wfv1.NodeTypePod { log.Infof("Deleting pod: %s", node.ID) From a53a76e9401fab701eaa150307b21a28825c97ce Mon Sep 17 00:00:00 2001 From: gerardaus Date: Tue, 6 Nov 2018 15:53:54 -0800 Subject: [PATCH 015/145] fix #1078 Azure AKS authentication issues (#1079) --- Gopkg.lock | 22 ++++++++++++++++++++++ Gopkg.toml | 7 +++++++ cmd/argo/main.go | 2 ++ cmd/argoexec/main.go | 2 ++ cmd/workflow-controller/main.go | 1 + 5 files changed, 34 insertions(+) diff --git a/Gopkg.lock b/Gopkg.lock index 797653a4c341..eb7e6c92a286 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -9,6 +9,18 @@ revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" version = "v0.26.0" +[[projects]] + digest = "1:d62e9a41f2e45c103f6c15ffabb3466b3548db41b8cc135a4669794033ee761f" + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/date", + ] + pruneopts = "" + revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" + [[projects]] digest = "1:b9660f5e3522b899d32b1f9bb98056203d6f76f673e1843eaa00869330103ba5" name = "github.com/Knetic/govaluate" @@ -67,6 +79,14 @@ revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" +[[projects]] + digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + [[projects]] branch = "master" digest = "1:d6c13a378213e3de60445e49084b8a0a9ce582776dfc77927775dbeb3ff72a35" @@ -946,6 +966,7 @@ "pkg/apis/clientauthentication/v1alpha1", "pkg/apis/clientauthentication/v1beta1", "pkg/version", + "plugin/pkg/client/auth/azure", "plugin/pkg/client/auth/exec", "plugin/pkg/client/auth/gcp", "plugin/pkg/client/auth/oidc", @@ -1090,6 +1111,7 @@ "k8s.io/client-go/informers/internalinterfaces", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/plugin/pkg/client/auth/azure", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/plugin/pkg/client/auth/oidc", "k8s.io/client-go/rest", diff --git a/Gopkg.toml b/Gopkg.toml index 25aee72f4b95..facaf7e03133 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -48,3 +48,10 @@ required = [ [[constraint]] name = "github.com/ghodss/yaml" branch = "master" + +# vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go:300:25: +# cannot call non-function spt.Token (type adal.Token) +[[override]] + name = "github.com/Azure/go-autorest" + revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" + diff --git a/cmd/argo/main.go b/cmd/argo/main.go index d799b6a42d14..7f45d54b17f8 100644 --- a/cmd/argo/main.go +++ b/cmd/argo/main.go @@ -5,6 +5,8 @@ import ( "os" "github.com/argoproj/argo/cmd/argo/commands" + // load the azure plugin (required to authenticate against AKS clusters). + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" // load the gcp plugin (required to authenticate against GKE clusters). _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // load the oidc plugin (required to authenticate with OpenID Connect). diff --git a/cmd/argoexec/main.go b/cmd/argoexec/main.go index dbebde6e829f..629e1b0806fd 100644 --- a/cmd/argoexec/main.go +++ b/cmd/argoexec/main.go @@ -5,6 +5,8 @@ import ( "os" "github.com/argoproj/argo/cmd/argoexec/commands" + // load the azure plugin (required to authenticate against AKS clusters). + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" // load the gcp plugin (required to authenticate against GKE clusters). _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // load the oidc plugin (required to authenticate with OpenID Connect). diff --git a/cmd/workflow-controller/main.go b/cmd/workflow-controller/main.go index a43c1cf2866e..f881739dbf23 100644 --- a/cmd/workflow-controller/main.go +++ b/cmd/workflow-controller/main.go @@ -12,6 +12,7 @@ import ( "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" "k8s.io/client-go/tools/clientcmd" From afdac9bb34fe8a01ad511323a00ccf6c07e41137 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 7 Nov 2018 13:52:38 -0800 Subject: [PATCH 016/145] Issue #740 - System level workflow parallelism limits & priorities (#1065) * Issue #740 - System level workflow parallelism limits & priorities * Apply reviewer notes --- api/openapi-spec/swagger.json | 5 + cmd/argo/commands/list.go | 4 +- cmd/argo/commands/submit.go | 16 +- docs/workflow-controller-configmap.yaml | 5 + .../workflow/v1alpha1/openapi_generated.go | 7 + pkg/apis/workflow/v1alpha1/types.go | 21 ++- .../v1alpha1/zz_generated.deepcopy.go | 5 + workflow/controller/config.go | 4 + workflow/controller/controller.go | 36 +++++ workflow/controller/throttler.go | 153 ++++++++++++++++++ workflow/controller/throttler_test.go | 86 ++++++++++ 11 files changed, 331 insertions(+), 11 deletions(-) create mode 100644 workflow/controller/throttler.go create mode 100644 workflow/controller/throttler_test.go diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 92b07dd6be23..92b599d03dcd 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -978,6 +978,11 @@ "type": "integer", "format": "int64" }, + "priority": { + "description": "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", + "type": "integer", + "format": "int32" + }, "serviceAccountName": { "description": "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.", "type": "string" diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 1fa2a92b9178..471a3f356f26 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -108,7 +108,7 @@ func printTable(wfList []wfv1.Workflow, listArgs *listFlags) { if listArgs.allNamespaces { fmt.Fprint(w, "NAMESPACE\t") } - fmt.Fprint(w, "NAME\tSTATUS\tAGE\tDURATION") + fmt.Fprint(w, "NAME\tSTATUS\tAGE\tDURATION\tPRIORITY") if listArgs.output == "wide" { fmt.Fprint(w, "\tP/R/C\tPARAMETERS") } @@ -119,7 +119,7 @@ func printTable(wfList []wfv1.Workflow, listArgs *listFlags) { if listArgs.allNamespaces { fmt.Fprintf(w, "%s\t", wf.ObjectMeta.Namespace) } - fmt.Fprintf(w, "%s\t%s\t%s\t%s", wf.ObjectMeta.Name, worklowStatus(&wf), ageStr, durationStr) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d", wf.ObjectMeta.Name, worklowStatus(&wf), ageStr, durationStr, wf.Spec.Priority) if listArgs.output == "wide" { pending, running, completed := countPendingRunningCompleted(&wf) fmt.Fprintf(w, "\t%d/%d/%d", pending, running, completed) diff --git a/cmd/argo/commands/submit.go b/cmd/argo/commands/submit.go index 5a2f04fa87ad..d9cd5c3a58ec 100644 --- a/cmd/argo/commands/submit.go +++ b/cmd/argo/commands/submit.go @@ -18,16 +18,18 @@ import ( // cliSubmitOpts holds submition options specific to CLI submission (e.g. controlling output) type cliSubmitOpts struct { - output string // --output - wait bool // --wait - watch bool // --watch - strict bool // --strict + output string // --output + wait bool // --wait + watch bool // --watch + strict bool // --strict + priority *int32 // --priority } func NewSubmitCommand() *cobra.Command { var ( submitOpts util.SubmitOpts cliSubmitOpts cliSubmitOpts + priority int32 ) var command = &cobra.Command{ Use: "submit FILE1 FILE2...", @@ -37,6 +39,10 @@ func NewSubmitCommand() *cobra.Command { cmd.HelpFunc()(cmd, args) os.Exit(1) } + if cmd.Flag("priority").Changed { + cliSubmitOpts.priority = &priority + } + SubmitWorkflows(args, &submitOpts, &cliSubmitOpts) }, } @@ -51,6 +57,7 @@ func NewSubmitCommand() *cobra.Command { command.Flags().BoolVarP(&cliSubmitOpts.wait, "wait", "w", false, "wait for the workflow to complete") command.Flags().BoolVar(&cliSubmitOpts.watch, "watch", false, "watch the workflow until it completes") command.Flags().BoolVar(&cliSubmitOpts.strict, "strict", true, "perform strict workflow validation") + command.Flags().Int32Var(&priority, "priority", 0, "workflow priority") return command } @@ -106,6 +113,7 @@ func SubmitWorkflows(filePaths []string, submitOpts *util.SubmitOpts, cliOpts *c var workflowNames []string for _, wf := range workflows { + wf.Spec.Priority = cliOpts.priority created, err := util.SubmitWorkflow(wfClient, &wf, submitOpts) if err != nil { log.Fatalf("Failed to submit workflow: %v", err) diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index 771084761810..de5a64893096 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -18,6 +18,11 @@ data: # controller to run with namespace scope (role), instead of cluster scope (clusterrole). namespace: argo + # Parallelism limits the max total parallel workflows that can execute at the same time + + parallelism: 10 + + # artifactRepository defines the default location to be used as the artifact repository for # container artifacts. artifactRepository: diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 5c5a6f65d529..d7401fb91e99 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1875,6 +1875,13 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Format: "int64", }, }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", + Type: []string{"integer"}, + Format: "int32", + }, + }, }, Required: []string{"templates", "entrypoint"}, }, diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index fbd07fe54174..fe2c99073341 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -133,6 +133,8 @@ type WorkflowSpec struct { // allowed to run before the controller terminates the workflow. A value of zero is used to // terminate a Running workflow ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. + Priority *int32 `json:"priority,omitempty"` } // Template is a reusable and composable unit of execution in a workflow @@ -530,12 +532,21 @@ func (n NodeStatus) String() string { return fmt.Sprintf("%s (%s)", n.Name, n.ID) } -// Completed returns whether or not the node has completed execution +func isCompletedPhase(phase NodePhase) bool { + return phase == NodeSucceeded || + phase == NodeFailed || + phase == NodeError || + phase == NodeSkipped +} + +// Remove returns whether or not the workflow has completed execution +func (ws *WorkflowStatus) Completed() bool { + return isCompletedPhase(ws.Phase) +} + +// Remove returns whether or not the node has completed execution func (n NodeStatus) Completed() bool { - return n.Phase == NodeSucceeded || - n.Phase == NodeFailed || - n.Phase == NodeError || - n.Phase == NodeSkipped + return isCompletedPhase(n.Phase) } // IsDaemoned returns whether or not the node is deamoned diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index ffa11d640f1b..e8e1f77e4cca 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -914,6 +914,11 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { *out = new(int64) **out = **in } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } return } diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 1559eb93b02d..52ff479a66a6 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -57,6 +57,9 @@ type WorkflowControllerConfig struct { MetricsConfig metrics.PrometheusConfig `json:"metricsConfig,omitempty"` TelemetryConfig metrics.PrometheusConfig `json:"telemetryConfig,omitempty"` + + // Parallelism limits the max total parallel workflows that can execute at the same time + Parallelism int `json:"parallelism,omitempty"` } // ArtifactRepository represents a artifact repository in which a controller will store its artifacts @@ -114,6 +117,7 @@ func (wfc *WorkflowController) updateConfig(cm *apiv1.ConfigMap) error { return errors.Errorf(errors.CodeBadRequest, "ConfigMap '%s' does not have executorImage", wfc.configMap) } wfc.Config = config + wfc.throttler.SetParallelism(config.Parallelism) return nil } diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index 4d45fb1d7807..c58c8d3117b1 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -56,6 +56,7 @@ type WorkflowController struct { wfQueue workqueue.RateLimitingInterface podQueue workqueue.RateLimitingInterface completedPods chan string + throttler Throttler } const ( @@ -86,6 +87,7 @@ func NewWorkflowController( podQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), completedPods: make(chan string, 512), } + wfc.throttler = NewThrottler(0, wfc.wfQueue) return &wfc } @@ -217,22 +219,35 @@ func (wfc *WorkflowController) processNextItem() bool { log.Warnf("Key '%s' in index is not an unstructured", key) return true } + + if key, ok = wfc.throttler.Next(key); !ok { + log.Warnf("Workflow %s processing has been postponed due to max parallelism limit", key) + return true + } + wf, err := util.FromUnstructured(un) if err != nil { log.Warnf("Failed to unmarshal key '%s' to workflow object: %v", key, err) woc := newWorkflowOperationCtx(wf, wfc) woc.markWorkflowFailed(fmt.Sprintf("invalid spec: %s", err.Error())) woc.persistUpdates() + wfc.throttler.Remove(key) return true } if wf.ObjectMeta.Labels[common.LabelKeyCompleted] == "true" { + wfc.throttler.Remove(key) // can get here if we already added the completed=true label, // but we are still draining the controller's workflow workqueue return true } + woc := newWorkflowOperationCtx(wf, wfc) woc.operate() + if woc.wf.Status.Completed() { + wfc.throttler.Remove(key) + } + // TODO: operate should return error if it was unable to operate properly // so we can requeue the work for a later time // See: https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go @@ -307,6 +322,22 @@ func (wfc *WorkflowController) tweakWorkflowMetricslist(options *metav1.ListOpti options.LabelSelector = labelSelector.String() } +func getWfPriority(obj interface{}) (int32, time.Time) { + un, ok := obj.(*unstructured.Unstructured) + if !ok { + return 0, time.Now() + } + priority, hasPriority, err := unstructured.NestedInt64(un.Object, "spec", "priority") + if err != nil { + return 0, un.GetCreationTimestamp().Time + } + if !hasPriority { + priority = 0 + } + + return int32(priority), un.GetCreationTimestamp().Time +} + func (wfc *WorkflowController) addWorkflowInformerHandler() { wfc.wfInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ @@ -314,12 +345,16 @@ func (wfc *WorkflowController) addWorkflowInformerHandler() { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { wfc.wfQueue.Add(key) + priority, creation := getWfPriority(obj) + wfc.throttler.Add(key, priority, creation) } }, UpdateFunc: func(old, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { wfc.wfQueue.Add(key) + priority, creation := getWfPriority(new) + wfc.throttler.Add(key, priority, creation) } }, DeleteFunc: func(obj interface{}) { @@ -328,6 +363,7 @@ func (wfc *WorkflowController) addWorkflowInformerHandler() { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { wfc.wfQueue.Add(key) + wfc.throttler.Remove(key) } }, }, diff --git a/workflow/controller/throttler.go b/workflow/controller/throttler.go new file mode 100644 index 000000000000..8224f4af900e --- /dev/null +++ b/workflow/controller/throttler.go @@ -0,0 +1,153 @@ +package controller + +import ( + "container/heap" + "sync" + "time" + + "k8s.io/client-go/util/workqueue" +) + +// Throttler allows CRD controller to limit number of items it is processing in parallel. +type Throttler interface { + Add(key interface{}, priority int32, creationTime time.Time) + // Next returns true if item should be processed by controller now or return false. + Next(key interface{}) (interface{}, bool) + // Remove notifies throttler that item processing is done. In responses the throttler triggers processing of previously throttled items. + Remove(key interface{}) + // SetParallelism update throttler parallelism limit. + SetParallelism(parallelism int) +} + +type throttler struct { + queue workqueue.RateLimitingInterface + inProgress map[interface{}]bool + pending *priorityQueue + lock *sync.Mutex + parallelism int +} + +func NewThrottler(parallelism int, queue workqueue.RateLimitingInterface) Throttler { + return &throttler{ + queue: queue, + inProgress: make(map[interface{}]bool), + lock: &sync.Mutex{}, + parallelism: parallelism, + pending: &priorityQueue{itemByKey: make(map[interface{}]*item)}, + } +} + +func (t *throttler) SetParallelism(parallelism int) { + t.lock.Lock() + defer t.lock.Unlock() + if t.parallelism != parallelism { + t.parallelism = parallelism + t.queueThrottled() + } +} + +func (t *throttler) Add(key interface{}, priority int32, creationTime time.Time) { + t.lock.Lock() + defer t.lock.Unlock() + t.pending.add(key, priority, creationTime) +} + +func (t *throttler) Next(key interface{}) (interface{}, bool) { + t.lock.Lock() + defer t.lock.Unlock() + + if _, isInProgress := t.inProgress[key]; isInProgress || t.pending.Len() == 0 { + return key, true + } + if t.parallelism < 1 || t.parallelism > len(t.inProgress) { + next := t.pending.pop() + t.inProgress[next.key] = true + return next.key, true + } + return key, false + +} + +func (t *throttler) Remove(key interface{}) { + t.lock.Lock() + defer t.lock.Unlock() + delete(t.inProgress, key) + t.pending.remove(key) + + t.queueThrottled() +} + +func (t *throttler) queueThrottled() { + for t.pending.Len() > 0 && (t.parallelism < 1 || t.parallelism > len(t.inProgress)) { + next := t.pending.pop() + t.inProgress[next.key] = true + t.queue.Add(next.key) + } +} + +type item struct { + key interface{} + creationTime time.Time + priority int32 + index int +} + +type priorityQueue struct { + items []*item + itemByKey map[interface{}]*item +} + +func (pq *priorityQueue) pop() *item { + return heap.Pop(pq).(*item) +} + +func (pq *priorityQueue) add(key interface{}, priority int32, creationTime time.Time) { + if res, ok := pq.itemByKey[key]; ok { + if res.priority != priority { + res.priority = priority + heap.Fix(pq, res.index) + } + } else { + heap.Push(pq, &item{key: key, priority: priority, creationTime: creationTime}) + } +} + +func (pq *priorityQueue) remove(key interface{}) { + if item, ok := pq.itemByKey[key]; ok { + heap.Remove(pq, item.index) + delete(pq.itemByKey, key) + } +} + +func (pq priorityQueue) Len() int { return len(pq.items) } + +func (pq priorityQueue) Less(i, j int) bool { + if pq.items[i].priority == pq.items[j].priority { + return pq.items[i].creationTime.Before(pq.items[j].creationTime) + } + return pq.items[i].priority > pq.items[j].priority +} + +func (pq priorityQueue) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] + pq.items[i].index = i + pq.items[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(pq.items) + item := x.(*item) + item.index = n + pq.items = append(pq.items, item) + pq.itemByKey[item.key] = item +} + +func (pq *priorityQueue) Pop() interface{} { + old := pq.items + n := len(old) + item := old[n-1] + item.index = -1 + pq.items = old[0 : n-1] + delete(pq.itemByKey, item.key) + return item +} diff --git a/workflow/controller/throttler_test.go b/workflow/controller/throttler_test.go new file mode 100644 index 000000000000..454a6bca1392 --- /dev/null +++ b/workflow/controller/throttler_test.go @@ -0,0 +1,86 @@ +package controller + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "k8s.io/client-go/util/workqueue" +) + +func TestNoParallelismSamePriority(t *testing.T) { + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + throttler := NewThrottler(0, queue) + + throttler.Add("c", 0, time.Now().Add(2*time.Hour)) + throttler.Add("b", 0, time.Now().Add(1*time.Hour)) + throttler.Add("a", 0, time.Now()) + + next, ok := throttler.Next("b") + assert.True(t, ok) + assert.Equal(t, "a", next) + + next, ok = throttler.Next("c") + assert.True(t, ok) + assert.Equal(t, "b", next) +} + +func TestWithParallelismLimitAndPriority(t *testing.T) { + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + throttler := NewThrottler(2, queue) + + throttler.Add("a", 1, time.Now()) + throttler.Add("b", 2, time.Now()) + throttler.Add("c", 3, time.Now()) + throttler.Add("d", 4, time.Now()) + + next, ok := throttler.Next("a") + assert.True(t, ok) + assert.Equal(t, "d", next) + + next, ok = throttler.Next("a") + assert.True(t, ok) + assert.Equal(t, "c", next) + + _, ok = throttler.Next("a") + assert.False(t, ok) + + next, ok = throttler.Next("c") + assert.True(t, ok) + assert.Equal(t, "c", next) + + throttler.Remove("c") + + assert.Equal(t, 1, queue.Len()) + queued, _ := queue.Get() + assert.Equal(t, "b", queued) +} + +func TestChangeParallelism(t *testing.T) { + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + throttler := NewThrottler(1, queue) + + throttler.Add("a", 1, time.Now()) + throttler.Add("b", 2, time.Now()) + throttler.Add("c", 3, time.Now()) + throttler.Add("d", 4, time.Now()) + + next, ok := throttler.Next("a") + assert.True(t, ok) + assert.Equal(t, "d", next) + + _, ok = throttler.Next("b") + assert.False(t, ok) + + _, ok = throttler.Next("c") + assert.False(t, ok) + + throttler.SetParallelism(3) + + assert.Equal(t, 2, queue.Len()) + queued, _ := queue.Get() + assert.Equal(t, "c", queued) + queued, _ = queue.Get() + assert.Equal(t, "b", queued) +} From d4ef6e944c302b5d2b75d4c49e1833c3a28c1f9a Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Fri, 9 Nov 2018 00:16:57 -0800 Subject: [PATCH 017/145] Add new article and minor edits. (#1083) --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3c5a24a6ab8..90c838865cc3 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ ## News -We are excited to welcome [Adobe](https://www.adobe.com/) and [BlackRock](https://www.blackrock.com/) as the latest corporate members of the Argo Community! We are also thrilled that BlackRock has developed an eventing framework for Argo and has decided to contribute it to the Argo Community. Please check out the new repo and try [Argo Events](https://github.com/argoproj/argo-events)! +We are thrilled that BlackRock has developed an eventing framework for Argo and has decided to contribute it to the Argo Community. Please check out the new project and try [Argo Events](https://github.com/argoproj/argo-events)! If you actively use Argo in your organization and believe that your organization may be interested in actively participating in the Argo Community, please ask a representative to contact saradhi_sreegiriraju@intuit.com for additional information. @@ -56,20 +56,21 @@ Currently **officially** using Argo: 1. [Adobe](https://www.adobe.com/) 1. [BlackRock](https://www.blackrock.com/) 1. [CoreFiling](https://www.corefiling.com/) +1. [Cratejoy](https://www.cratejoy.com/) 1. [Cyrus Biotechnology](https://cyrusbio.com/) 1. [Datadog](https://www.datadoghq.com/) 1. [Gladly](https://gladly.com/) 1. [Google](https://www.google.com/intl/en/about/our-company/) 1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) 1. [Intuit](https://www.intuit.com/) +1. [KintoHub](https://www.kintohub.com/) 1. [Localytics](https://www.localytics.com/) 1. [NVIDIA](https://www.nvidia.com/) -1. [KintoHub](https://www.kintohub.com/) -1. [Styra](https://www.styra.com/) -1. [Cratejoy](https://www.cratejoy.com/) 1. [SAP Hybris](https://cx.sap.com/) +1. [Styra](https://www.styra.com/) ## Community Blogs and Presentations +* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/) * [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) * [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html) * TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859) From 95b72f38c94d12735e79bb8bec1a46b10514603c Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sat, 10 Nov 2018 21:46:24 -0800 Subject: [PATCH 018/145] Update docs to outline bare minimum set of privileges for a workflow --- demo.md | 11 +++++++---- docs/variables.md | 11 +++-------- docs/workflow-rbac.md | 44 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 12 deletions(-) create mode 100644 docs/workflow-rbac.md diff --git a/demo.md b/demo.md index b615916b4e05..6a536b56e5e5 100644 --- a/demo.md +++ b/demo.md @@ -32,13 +32,16 @@ kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=c ``` ## 3. Configure the service account to run workflows -For clusters with RBAC enabled, the 'default' service account is too limited to support features -like artifacts, outputs, access to secrets, etc... Run the following command to grant admin -privileges to the 'default' service account in the namespace 'default': + +To run all of the examples in this guide, the 'default' service account is too limited to support +features such as artifacts, outputs, access to secrets, etc... For demo purposes, run the following +command to grant admin privileges to the 'default' service account in the namespace 'default': ``` kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default ``` -NOTE: You can also submit workflows which run with a different service account using: +For the bare minimum set of privileges which a workflow needs to function, see +[Workflow RBAC](docs/workflow-rbac.md). You can also submit workflows which run with a different +service account using: ``` argo submit --serviceaccount ``` diff --git a/docs/variables.md b/docs/variables.md index bb08d36426c8..cabf49084437 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -43,16 +43,11 @@ The following variables are made available to reference various metadata of a wo | `workflow.uid` | Workflow UID. Useful for setting ownership reference to a resource, or a unique artifact location | | `workflow.parameters.` | Input parameter to the workflow | | `workflow.outputs.parameters.` | Input artifact to the workflow | +| `workflow.creationTimestamp` | Workflow creation timestamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) | +| `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character | + ## Exit Handler: | Variable | Description| |----------|------------| | `workflow.status` | Workflow status. One of: `Succeeded`, `Failed`, `Error` | - -## Coming in v2.2: -| Variable | Description| -|----------|------------| -| `workflow.artifacts.` | Input artifact to the workflow | -| `workflow.outputs.artifacts.` | Output artifact to the workflow | -| `workflow.creationTimestamp` | Workflow creation timestamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) | -| `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character | diff --git a/docs/workflow-rbac.md b/docs/workflow-rbac.md new file mode 100644 index 000000000000..18ecc54aacb9 --- /dev/null +++ b/docs/workflow-rbac.md @@ -0,0 +1,44 @@ +# Workfow RBAC + +All pods in a workflow run with the service account specified in `workflow.spec.serviceAccountName`, +or if omitted, the `default` service account of the workflow's namespace. The amount of access which +a workflow needs is dependent on what the workflow needs to do. For example, if your workflow needs +to deploy a resource, then the workflow's service account will require 'create' privileges on that +resource. + +The bare minimum for a workflow to function is outlined below: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: workflow-role +rules: +# pod get/watch is used to identify the container IDs of the current pod +# pod patch is used to annotate the step's outputs back to controller (e.g. artifact location) +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - patch +# logs get/watch are used to get the pods logs for script outputs, and for log archival +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - watch +# secrets get is used to retrieve credentials to artifact repository. NOTE: starting n Argo v2.3, +# the API secret access will be removed in favor of volume mounting the secrets to the workflow pod +# (issue #1072) +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +``` From b5dfa0217470c97d8e83716a22cf3bd274c4a2d5 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Sun, 11 Nov 2018 01:41:28 -0800 Subject: [PATCH 019/145] Use relative links on README file (#1087) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 90c838865cc3..06adc1001b90 100644 --- a/README.md +++ b/README.md @@ -22,9 +22,9 @@ Argo is an open source container-native workflow engine for getting work done on * Argo with Kubernetes puts a cloud-scale supercomputer at your fingertips. ## Documentation -* [Get started here](https://github.com/argoproj/argo/blob/master/demo.md) -* [How to write Argo workflow specs](https://github.com/argoproj/argo/blob/master/examples/README.md) -* [How to configure your artifact repository](https://github.com/argoproj/argo/blob/master/ARTIFACT_REPO.md) +* [Get started here](demo.md) +* [How to write Argo workflow specs](examples/README.md) +* [How to configure your artifact repository](ARTIFACT_REPO.md) ## Features * DAG or Steps based declaration of workflows From 62b24368a93d57eb505bf226e042a8eb0bf72da4 Mon Sep 17 00:00:00 2001 From: Dan Norris Date: Tue, 13 Nov 2018 18:41:51 -0500 Subject: [PATCH 020/145] Fix typo in demo.md (#1089) Fix a small typo in demo.md that I encounted when reading through the getting started guide. --- demo.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo.md b/demo.md index 6a536b56e5e5..7684293561a9 100644 --- a/demo.md +++ b/demo.md @@ -67,7 +67,7 @@ kubectl get po --selector=workflows.argoproj.io/workflow=hello-world-xxx --show- kubectl logs hello-world-yyy -c main ``` -Additional examples are availabe [here](https://github.com/argoproj/argo/blob/master/examples/README.md). +Additional examples are available [here](https://github.com/argoproj/argo/blob/master/examples/README.md). ## 5. Install an Artifact Repository From 60b508dd9ec36ef45013d72ec6166dd9a30d77fe Mon Sep 17 00:00:00 2001 From: Joshua Carp Date: Thu, 15 Nov 2018 14:58:17 -0500 Subject: [PATCH 021/145] Drop reference to removed `argo install` command. (#1074) --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 96f3d7ed5b35..58c833e48377 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,5 +49,6 @@ $ ./dist/argo version ### Deploying controller with alternative controller/executor images ``` -$ argo install --controller-image jessesuen/workflow-controller:latest --executor-image jessesuen/argoexec:latest +$ helm install argo/argo --set images.namespace=jessesuen --set +images.controller workflow-controller:latest ``` From cb8b036b8db3ebeb6ef73d9f2070a1ddaf0d2150 Mon Sep 17 00:00:00 2001 From: Joshua Carp Date: Thu, 15 Nov 2018 14:59:14 -0500 Subject: [PATCH 022/145] Initialize child node before marking phase. Fixes panic on invalid `When` (#1075) --- workflow/controller/steps.go | 1 + 1 file changed, 1 insertion(+) diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index c1803f333349..571206cfd84b 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -174,6 +174,7 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod // Check the step's when clause to decide if it should execute proceed, err := shouldExecute(step.When) if err != nil { + woc.initializeNode(childNodeName, wfv1.NodeTypeSkipped, "", stepsCtx.boundaryID, wfv1.NodeError, err.Error()) woc.addChildNode(sgNodeName, childNodeName) woc.markNodeError(childNodeName, err) return woc.markNodeError(sgNodeName, err) From 438330c38da69a68d6b0b0b24f6aae0053fc35ee Mon Sep 17 00:00:00 2001 From: kshamajain99 Date: Thu, 15 Nov 2018 13:40:13 -0800 Subject: [PATCH 023/145] #1081 added retry logic to s3 load and save function (#1082) --- workflow/artifacts/s3/s3.go | 92 +++++++++++++++++++++++-------------- 1 file changed, 58 insertions(+), 34 deletions(-) diff --git a/workflow/artifacts/s3/s3.go b/workflow/artifacts/s3/s3.go index 9ea6e4ac68c7..cbbe325d9f3d 100644 --- a/workflow/artifacts/s3/s3.go +++ b/workflow/artifacts/s3/s3.go @@ -6,6 +6,8 @@ import ( log "github.com/sirupsen/logrus" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "k8s.io/apimachinery/pkg/util/wait" + "time" ) // S3ArtifactDriver is a driver for AWS S3 @@ -31,42 +33,64 @@ func (s3Driver *S3ArtifactDriver) newS3Client() (argos3.S3Client, error) { // Load downloads artifacts from S3 compliant storage func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error { - s3cli, err := s3Driver.newS3Client() - if err != nil { - return err - } - origErr := s3cli.GetFile(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path) - if origErr == nil { - return nil - } - if !argos3.IsS3ErrCode(origErr, "NoSuchKey") { - return origErr - } - // If we get here, the error was a NoSuchKey. The key might be a s3 "directory" - isDir, err := s3cli.IsDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key) - if err != nil { - log.Warnf("Failed to test if %s is a directory: %v", inputArtifact.S3.Bucket, err) - return origErr - } - if !isDir { - // It's neither a file, nor a directory. Return the original NoSuchKey error - return origErr - } - return s3cli.GetDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path) + err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Millisecond * 10, Factor: 2.0, Steps: 5, Jitter: 0.1}, + func() (bool, error) { + + s3cli, err := s3Driver.newS3Client() + if err != nil { + log.Warnf("Failed to create new S3 client: %v", err) + return false, nil + } + origErr := s3cli.GetFile(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path) + if origErr == nil { + return true, nil + } + if !argos3.IsS3ErrCode(origErr, "NoSuchKey") { + return false, origErr + } + // If we get here, the error was a NoSuchKey. The key might be a s3 "directory" + isDir, err := s3cli.IsDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key) + if err != nil { + log.Warnf("Failed to test if %s is a directory: %v", inputArtifact.S3.Bucket, err) + return false, nil + } + if !isDir { + // It's neither a file, nor a directory. Return the original NoSuchKey error + return false, origErr + } + + if err = s3cli.GetDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path); err != nil { + return false, nil + } + return true, nil + }) + + return err } // Save saves an artifact to S3 compliant storage func (s3Driver *S3ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { - s3cli, err := s3Driver.newS3Client() - if err != nil { - return err - } - isDir, err := file.IsDirectory(path) - if err != nil { - return err - } - if isDir { - return s3cli.PutDirectory(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path) - } - return s3cli.PutFile(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path) + err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Millisecond * 10, Factor: 2.0, Steps: 5, Jitter: 0.1}, + func() (bool, error) { + s3cli, err := s3Driver.newS3Client() + if err != nil { + log.Warnf("Failed to create new S3 client: %v", err) + return false, nil + } + isDir, err := file.IsDirectory(path) + if err != nil { + log.Warnf("Failed to test if %s is a directory: %v", path, err) + return false, nil + } + if isDir { + if err = s3cli.PutDirectory(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path); err != nil { + return false, nil + } + } + if err = s3cli.PutFile(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path); err != nil { + return false, nil + } + return true, nil + }) + return err } From 46855dcde1d9ba904a1c94a97e602d0510f5e0d4 Mon Sep 17 00:00:00 2001 From: Rocio Montes Date: Tue, 20 Nov 2018 13:57:56 -0800 Subject: [PATCH 024/145] adding logo to be used by the OS Site (#1099) --- os-project-logo.svg | 140 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 os-project-logo.svg diff --git a/os-project-logo.svg b/os-project-logo.svg new file mode 100644 index 000000000000..5de0423e0c8c --- /dev/null +++ b/os-project-logo.svg @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From bb8a6a58fee8170d6db65c73a50c5fe640f3cb7d Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Tue, 20 Nov 2018 14:30:42 -0800 Subject: [PATCH 025/145] Update ROADMAP.md --- ROADMAP.md | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index fbc7e0ddf529..425b1d2583bd 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,14 +1,20 @@ # Roadmap -## v2.2 +## v2.3 +* Priority - ability to set a priority per workflow +* Queuing - ability to limit number of concurrent workflows +* PNS (Proccess Namespace Sharing) Executor -### Proposed Items +## v2.4 +* Persistence - support offloading of workflow state into database layer +* Large workflow support (enabled by persistence feature) +* Argo API server (integration with argo events, persistence layer) -The following are candidate items for v2.2 release +## v2.5 +* Argo API server enhancements (pagination, SSO, etc...) -* Workflow composability - support for Jsonnet in CLI -* Queuing / Admission control - ability to limit number of concurrent workflows -* Scheduling - investigate k8s PriorityClasses and re-use in workflows -* Persistence - workflow history/state -* `argo run` to run workflows against clusters without a controller - #794 -* UI – filtering to improve performance +### Proposed Items +* Best effort workflow steps +* Template level finalizers +* Artifact loop aggregation +* Pod reclamation controls From 49c1fa4f42e1c19ce3b8f4ac2c339894e1ed90d7 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Thu, 15 Nov 2018 17:55:50 -0800 Subject: [PATCH 026/145] Update docs with examples using the K8s REST API --- docs/README.md | 9 +++ docs/example-golang/main.go | 77 +++++++++++++++++++++++++ docs/rest-api.md | 35 +++++++++++ docs/workflow-controller-configmap.yaml | 5 +- 4 files changed, 123 insertions(+), 3 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/example-golang/main.go create mode 100644 docs/rest-api.md diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000000..d0f3d87ad6ed --- /dev/null +++ b/docs/README.md @@ -0,0 +1,9 @@ +# Argo Documentation + +## [Getting Started](../demo.md) + +## Features +* [Controller Configuration](workflow-controller-configmap.yaml) +* [RBAC](workflow-rbac.md) +* [REST API](rest-api.md) +* [Workflow Variables](variables.md) diff --git a/docs/example-golang/main.go b/docs/example-golang/main.go new file mode 100644 index 000000000000..29fc9ee6601b --- /dev/null +++ b/docs/example-golang/main.go @@ -0,0 +1,77 @@ +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + helloWorldWorkflow = wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "hello-world-", + }, + Spec: wfv1.WorkflowSpec{ + Entrypoint: "whalesay", + Templates: []wfv1.Template{ + { + Name: "whalesay", + Container: &corev1.Container{ + Image: "docker/whalesay:latest", + Command: []string{"cowsay", "hello world"}, + }, + }, + }, + }, + } +) + +func main() { + // use the current context in kubeconfig + kubeconfig := flag.String("kubeconfig", filepath.Join(os.Getenv("HOME"), ".kube", "config"), "(optional) absolute path to the kubeconfig file") + flag.Parse() + + // use the current context in kubeconfig + config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) + checkErr(err) + namespace := "default" + + // create the workflow client + wfClient := wfclientset.NewForConfigOrDie(config).ArgoprojV1alpha1().Workflows(namespace) + + // submit the hello world workflow + createdWf, err := wfClient.Create(&helloWorldWorkflow) + checkErr(err) + fmt.Printf("Workflow %s submitted\n", createdWf.Name) + + // wait for the workflow to complete + fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", createdWf.Name)) + watchIf, err := wfClient.Watch(metav1.ListOptions{FieldSelector: fieldSelector.String()}) + errors.CheckError(err) + defer watchIf.Stop() + for next := range watchIf.ResultChan() { + wf, ok := next.Object.(*wfv1.Workflow) + if !ok { + continue + } + if !wf.Status.FinishedAt.IsZero() { + fmt.Printf("Workflow %s %s at %v\n", wf.Name, wf.Status.Phase, wf.Status.FinishedAt) + break + } + } +} + +func checkErr(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/docs/rest-api.md b/docs/rest-api.md new file mode 100644 index 000000000000..ba0ae8f615bb --- /dev/null +++ b/docs/rest-api.md @@ -0,0 +1,35 @@ +# REST API + +Argo is implemented as a kubernetes controller and Workflow [Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). +Argo itself does not run an API server, and with all CRDs, it extends the Kubernetes API server by +introducing a new API Group/Version (argorproj.io/v1alpha1) and Kind (Workflow). When CRDs are +registered in a cluster, access to those resources are made available by exposing new endpoints in +the kubernetes API server. For example, to list workflows in the default namespace, a client would +make an HTTP GET request to: `https:///apis/argoproj.io/v1alpha1/namespaces/default/workflows` + +> NOTE: the optional argo-ui does run a thin API layer to power the UI, but is not intended for + programatic interaction. + +A common scenario is to programatically submit and retrieve workflows. To do this, you would use the +existing Kubernetes REST client in the language of preference, which often libraries for performing +CRUD operation on custom resource objects. + +## Examples + +### Golang + +A kubernetes Workflow clientset library is auto-generated under [argoproj/argo/pkg/client](https://github.com/argoproj/argo/tree/master/pkg/client) and can be imported by golang +applications. See the [golang code example](example-golang/main.go) on how to make use of this client. + +### Python +The python kubernetes client has libraries for interacting with custom objects. See: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CustomObjectsApi.md + + +### Java +The Java kubernetes client has libraries for interacting with custom objects. See: +https://github.com/kubernetes-client/java/blob/master/kubernetes/docs/CustomObjectsApi.md + +## OpenAPI + +An OpenAPI Spec is generated under [argoproj/argo/api/openapi-spec](https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json). This spec may be +used to auto-generate concrete datastructures in other languages. diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index de5a64893096..b5d9c7b31471 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -15,14 +15,13 @@ data: instanceID: my-ci-controller # namespace limits the controller's watch/queries to a specific namespace. This allows the - # controller to run with namespace scope (role), instead of cluster scope (clusterrole). + # controller to run with namespace scope (Role), instead of cluster scope (ClusterRole). namespace: argo # Parallelism limits the max total parallel workflows that can execute at the same time - + # (available since Argo v2.3) parallelism: 10 - # artifactRepository defines the default location to be used as the artifact repository for # container artifacts. artifactRepository: From ec20d94b6f1d0d88d579c8a27b964f6e9915ff55 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 5 Dec 2018 23:21:54 -0800 Subject: [PATCH 027/145] Issue #1114 - Set FORCE_NAMESPACE_ISOLATION env variable in namespace install manifests (#1116) --- manifests/install.yaml | 4 ++-- manifests/namespace-install.yaml | 4 ++-- .../overlays/03d_argo-ui-deployment.yaml | 12 ++++++++++++ 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml diff --git a/manifests/install.yaml b/manifests/install.yaml index 0c802d582237..5a12b2a15c17 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -16,13 +16,13 @@ spec: apiVersion: v1 kind: ServiceAccount metadata: - name: argo + name: argo-ui namespace: argo --- apiVersion: v1 kind: ServiceAccount metadata: - name: argo-ui + name: argo namespace: argo --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 9029aa9f4df0..8453fec54ca2 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -16,12 +16,12 @@ spec: apiVersion: v1 kind: ServiceAccount metadata: - name: argo + name: argo-ui --- apiVersion: v1 kind: ServiceAccount metadata: - name: argo-ui + name: argo --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml b/manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml new file mode 100644 index 000000000000..1efc2479f270 --- /dev/null +++ b/manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml @@ -0,0 +1,12 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: argo-ui +spec: + template: + spec: + containers: + - name: argo-ui + env: + - name: FORCE_NAMESPACE_ISOLATION + value: "true" From e59398adf39b8ef1d0ce273263e80d49e370c510 Mon Sep 17 00:00:00 2001 From: Miyamae Yuuya Date: Fri, 7 Dec 2018 16:39:51 +0900 Subject: [PATCH 028/145] Fix examples docs of parameters. (#1110) --- examples/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/README.md b/examples/README.md index 4d476b832cf5..1e2b8eb0066a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -167,7 +167,7 @@ spec: entrypoint: A arguments: parameters: - - name: log_level + - name: log-level value: INFO templates: @@ -176,15 +176,15 @@ spec: image: containerA env: - name: LOG_LEVEL - value: "{{workflow.parameters.log_level}}" + value: "{{workflow.parameters.log-level}}" command: [runA] - - - name: B - container: - image: containerB - env: - - name: LOG_LEVEL - value: "{{workflow.parameters.log_level}}" - command: [runB] + - name: B + container: + image: containerB + env: + - name: LOG_LEVEL + value: "{{workflow.parameters.log-level}}" + command: [runB] ``` In this workflow, both steps `A` and `B` would have the same log level set to `INFO` and can easily be changed between workflow submissions using the `-p` flag. From 78142837836cb100f6858d246d84100b74794cc6 Mon Sep 17 00:00:00 2001 From: WeiYan Date: Fri, 7 Dec 2018 17:56:16 +0800 Subject: [PATCH 029/145] Remove docker_lib mount volume which is not needed anymore (#1115) * Remove docker_lib mount volume which is not needed anymore * Remove unused hostPathDir --- workflow/common/common.go | 4 ---- workflow/controller/workflowpod.go | 23 ++--------------------- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/workflow/common/common.go b/workflow/common/common.go index 432339e10cbe..ce5ce1602097 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -29,10 +29,6 @@ const ( // PodMetadataAnnotationsPath is the file path containing pod metadata annotations. Examined by executor PodMetadataAnnotationsPath = PodMetadataMountPath + "/" + PodMetadataAnnotationsVolumePath - // DockerLibVolumeName is the volume name for the /var/lib/docker host path volume - DockerLibVolumeName = "docker-lib" - // DockerLibHostPath is the host directory path containing docker runtime state - DockerLibHostPath = "/var/lib/docker" // DockerSockVolumeName is the volume name for the /var/run/docker.sock host path volume DockerSockVolumeName = "docker-sock" diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 7b794832cb48..cd4d29ddd5d2 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -43,27 +43,8 @@ var ( MountPath: common.PodMetadataMountPath, } - hostPathDir = apiv1.HostPathDirectory hostPathSocket = apiv1.HostPathSocket - // volumeDockerLib provides the wait container access to the minion's host docker containers - // runtime files (e.g. /var/lib/docker/container). This is used by the executor to access - // the main container's logs (and potentially storage to upload output artifacts) - volumeDockerLib = apiv1.Volume{ - Name: common.DockerLibVolumeName, - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: common.DockerLibHostPath, - Type: &hostPathDir, - }, - }, - } - volumeMountDockerLib = apiv1.VolumeMount{ - Name: volumeDockerLib.Name, - MountPath: volumeDockerLib.VolumeSource.HostPath.Path, - ReadOnly: true, - } - // volumeDockerSock provides the wait container direct access to the minion's host docker daemon. // The primary purpose of this is to make available `docker cp` to collect an output artifact // from a container. Alternatively, we could use `kubectl cp`, but `docker cp` avoids the extra @@ -319,7 +300,7 @@ func (woc *wfOperationCtx) createVolumeMounts() []apiv1.VolumeMount { case common.ContainerRuntimeExecutorKubelet: return volumeMounts default: - return append(volumeMounts, volumeMountDockerLib, volumeMountDockerSock) + return append(volumeMounts, volumeMountDockerSock) } } @@ -331,7 +312,7 @@ func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { case common.ContainerRuntimeExecutorKubelet: return volumes default: - return append(volumes, volumeDockerLib, volumeDockerSock) + return append(volumes, volumeDockerSock) } } From 515a9005057dfd260a8b60c4ba1ab8c3aa614f48 Mon Sep 17 00:00:00 2001 From: Chen Zhiwei Date: Fri, 7 Dec 2018 17:58:24 +0800 Subject: [PATCH 030/145] add support for ppc64le and s390x (#1102) --- Makefile | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 73d2fa25338b..a9dee64bece5 100644 --- a/Makefile +++ b/Makefile @@ -60,15 +60,42 @@ builder: cli: CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${ARGO_CLI_NAME} ./cmd/argo -.PHONY: cli-linux -cli-linux: builder +.PHONY: cli-linux-amd64 +cli-linux-amd64: builder ${BUILDER_CMD} make cli \ CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 \ IMAGE_TAG=$(IMAGE_TAG) \ IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ LDFLAGS='-extldflags "-static"' \ ARGO_CLI_NAME=argo-linux-amd64 +.PHONY: cli-linux-ppc64le +cli-linux-ppc64le: builder + ${BUILDER_CMD} make cli \ + CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=ppc64le \ + IMAGE_TAG=$(IMAGE_TAG) \ + IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ + LDFLAGS='-extldflags "-static"' \ + ARGO_CLI_NAME=argo-linux-ppc64le + +.PHONY: cli-linux-s390x +cli-linux-s390x: builder + ${BUILDER_CMD} make cli \ + CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=s390x \ + IMAGE_TAG=$(IMAGE_TAG) \ + IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ + LDFLAGS='-extldflags "-static"' \ + ARGO_CLI_NAME=argo-linux-s390x + +.PHONY: cli-linux +cli-linux: cli-linux-amd64 cli-linux-ppc64le cli-linux-s390x + .PHONY: cli-darwin cli-darwin: builder ${BUILDER_CMD} make cli \ From 1ae3696c27f343c947d9225c5cc2294c8b7c45e5 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Fri, 7 Dec 2018 02:19:37 -0800 Subject: [PATCH 031/145] Install mime-support in argoexec to set proper mime types for S3 artifacts (resolves #1119) --- Dockerfile-argoexec | 2 +- VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile-argoexec b/Dockerfile-argoexec index 2461140bbc25..e02426635e7b 100644 --- a/Dockerfile-argoexec +++ b/Dockerfile-argoexec @@ -1,7 +1,7 @@ FROM debian:9.5-slim RUN apt-get update && \ - apt-get install -y curl jq procps git tar && \ + apt-get install -y curl jq procps git tar mime-support && \ rm -rf /var/lib/apt/lists/* && \ curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ chmod +x ./kubectl && \ diff --git a/VERSION b/VERSION index c043eea7767e..276cbf9e2858 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.1 +2.3.0 From 6bb3adbc596349100c4f19155cfe976f4ea0e6fb Mon Sep 17 00:00:00 2001 From: jacky Date: Sun, 9 Dec 2018 04:35:04 +0800 Subject: [PATCH 032/145] Adding Quantibio in Who uses Argo (#1111) * Adding Quantibio in Who uses Argo * fix spelling mistake --- README.md | 1 + cmd/argo/commands/get.go | 2 +- cmd/argo/commands/list.go | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 06adc1001b90..153807b01833 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ Currently **officially** using Argo: 1. [NVIDIA](https://www.nvidia.com/) 1. [SAP Hybris](https://cx.sap.com/) 1. [Styra](https://www.styra.com/) +1. [Quantibio](http://quantibio.com/us/en/) ## Community Blogs and Presentations * [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/) diff --git a/cmd/argo/commands/get.go b/cmd/argo/commands/get.go index 58b80b2439e6..e57f52bba706 100644 --- a/cmd/argo/commands/get.go +++ b/cmd/argo/commands/get.go @@ -71,7 +71,7 @@ func printWorkflowHelper(wf *wfv1.Workflow, outFmt string) { serviceAccount = "default" } fmt.Printf(fmtStr, "ServiceAccount:", serviceAccount) - fmt.Printf(fmtStr, "Status:", worklowStatus(wf)) + fmt.Printf(fmtStr, "Status:", workflowStatus(wf)) if wf.Status.Message != "" { fmt.Printf(fmtStr, "Message:", wf.Status.Message) } diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 471a3f356f26..096426f8ecf6 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -119,7 +119,7 @@ func printTable(wfList []wfv1.Workflow, listArgs *listFlags) { if listArgs.allNamespaces { fmt.Fprintf(w, "%s\t", wf.ObjectMeta.Namespace) } - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d", wf.ObjectMeta.Name, worklowStatus(&wf), ageStr, durationStr, wf.Spec.Priority) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d", wf.ObjectMeta.Name, workflowStatus(&wf), ageStr, durationStr, wf.Spec.Priority) if listArgs.output == "wide" { pending, running, completed := countPendingRunningCompleted(&wf) fmt.Fprintf(w, "\t%d/%d/%d", pending, running, completed) @@ -196,7 +196,7 @@ func (f ByFinishedAt) Less(i, j int) bool { } // workflowStatus returns a human readable inferred workflow status based on workflow phase and conditions -func worklowStatus(wf *wfv1.Workflow) wfv1.NodePhase { +func workflowStatus(wf *wfv1.Workflow) wfv1.NodePhase { switch wf.Status.Phase { case wfv1.NodeRunning: if util.IsWorkflowSuspended(wf) { From 587ab1a02772cd9b7ae7cd94f91b815ac4774297 Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Fri, 14 Dec 2018 11:01:58 -0800 Subject: [PATCH 033/145] Fix output artifact and parameter conflict (#1125) `SaveArtifacts` deletes the files that `SaveParameters` might still need, so we're calling `SaveParameters` first. Fixes https://github.com/argoproj/argo/issues/1124 --- cmd/argoexec/commands/wait.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/argoexec/commands/wait.go b/cmd/argoexec/commands/wait.go index 04eb7091fc26..32d9b1114579 100644 --- a/cmd/argoexec/commands/wait.go +++ b/cmd/argoexec/commands/wait.go @@ -40,13 +40,14 @@ func waitContainer() error { wfExecutor.AddError(err) return err } - err = wfExecutor.SaveArtifacts() + // Saving output parameters + err = wfExecutor.SaveParameters() if err != nil { wfExecutor.AddError(err) return err } - // Saving output parameters - err = wfExecutor.SaveParameters() + // Saving output artifacts + err = wfExecutor.SaveArtifacts() if err != nil { wfExecutor.AddError(err) return err From 3484099c856716f6da5e02ad75a48b568f547695 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Mon, 17 Dec 2018 11:50:54 -0800 Subject: [PATCH 034/145] Update generated swagger to fix verify-codegen (#1131) --- api/openapi-spec/swagger.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 92b599d03dcd..42fddd83d69b 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Argo", - "version": "v2.2.1" + "version": "v2.3.0" }, "paths": {}, "definitions": { From 0f84e5148dd34c225a35eab7a1f5953afb45e724 Mon Sep 17 00:00:00 2001 From: Naresh Kumar Amrutham Date: Mon, 17 Dec 2018 17:11:48 -0800 Subject: [PATCH 035/145] Allow owner reference to be set in submit util (#1120) --- workflow/util/util.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/workflow/util/util.go b/workflow/util/util.go index 40d0bb4ffe24..156f32a365d8 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -132,13 +132,14 @@ func IsWorkflowCompleted(wf *wfv1.Workflow) bool { // SubmitOpts are workflow submission options type SubmitOpts struct { - Name string // --name - GenerateName string // --generate-name - InstanceID string // --instanceid - Entrypoint string // --entrypoint - Parameters []string // --parameter - ParameterFile string // --parameter-file - ServiceAccount string // --serviceaccount + Name string // --name + GenerateName string // --generate-name + InstanceID string // --instanceid + Entrypoint string // --entrypoint + Parameters []string // --parameter + ParameterFile string // --parameter-file + ServiceAccount string // --serviceaccount + OwnerReference *metav1.OwnerReference // useful if your custom controller creates argo workflow resources } // SubmitWorkflow validates and submit a single workflow and override some of the fields of the workflow @@ -233,6 +234,10 @@ func SubmitWorkflow(wfIf v1alpha1.WorkflowInterface, wf *wfv1.Workflow, opts *Su if opts.Name != "" { wf.ObjectMeta.Name = opts.Name } + if opts.OwnerReference != nil { + wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *opts.OwnerReference)) + } + err := validate.ValidateWorkflow(wf) if err != nil { return nil, err From e09d9ade25535ae7e78ca23636e4d158a98bba84 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Fri, 28 Dec 2018 15:23:48 -0800 Subject: [PATCH 036/145] Issue #1104 - Remove container wait timeout from 'argo logs --follow' (#1142) --- cmd/argo/commands/logs.go | 152 ++++++++++++++++------------- workflow/executor/k8sapi/client.go | 4 +- 2 files changed, 88 insertions(+), 68 deletions(-) diff --git a/cmd/argo/commands/logs.go b/cmd/argo/commands/logs.go index e17be636efdb..a41ba6830b9c 100644 --- a/cmd/argo/commands/logs.go +++ b/cmd/argo/commands/logs.go @@ -2,8 +2,10 @@ package commands import ( "bufio" + "context" "fmt" "hash/fnv" + "math" "os" "strconv" @@ -11,16 +13,21 @@ import ( "sync" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + pkgwatch "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/watch" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - wfinformers "github.com/argoproj/argo/pkg/client/informers/externalversions" + workflowv1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/pkg/errors" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" ) type logEntry struct { @@ -101,8 +108,8 @@ func (p *logPrinter) PrintWorkflowLogs(workflow string) error { return err } timeByPod := p.printRecentWorkflowLogs(wf) - if p.follow && wf.Status.Phase == v1alpha1.NodeRunning { - p.printLiveWorkflowLogs(wf, timeByPod) + if p.follow { + p.printLiveWorkflowLogs(wf.Name, wfClient, timeByPod) } return nil } @@ -114,7 +121,7 @@ func (p *logPrinter) PrintPodLogs(podName string) error { return err } var logs []logEntry - err = p.getPodLogs("", podName, namespace, p.follow, p.tail, p.sinceSeconds, p.sinceTime, func(entry logEntry) { + err = p.getPodLogs(context.Background(), "", podName, namespace, p.follow, p.tail, p.sinceSeconds, p.sinceTime, func(entry logEntry) { logs = append(logs, entry) }) if err != nil { @@ -144,7 +151,7 @@ func (p *logPrinter) printRecentWorkflowLogs(wf *v1alpha1.Workflow) map[string]* go func() { defer wg.Done() var podLogs []logEntry - err := p.getPodLogs(getDisplayName(node), node.ID, wf.Namespace, false, p.tail, p.sinceSeconds, p.sinceTime, func(entry logEntry) { + err := p.getPodLogs(context.Background(), getDisplayName(node), node.ID, wf.Namespace, false, p.tail, p.sinceSeconds, p.sinceTime, func(entry logEntry) { podLogs = append(podLogs, entry) }) @@ -178,33 +185,12 @@ func (p *logPrinter) printRecentWorkflowLogs(wf *v1alpha1.Workflow) map[string]* return timeByPod } -func (p *logPrinter) setupWorkflowInformer(namespace string, name string, callback func(wf *v1alpha1.Workflow, done bool)) cache.SharedIndexInformer { - wfcClientset := wfclientset.NewForConfigOrDie(restConfig) - wfInformerFactory := wfinformers.NewFilteredSharedInformerFactory(wfcClientset, 20*time.Minute, namespace, nil) - informer := wfInformerFactory.Argoproj().V1alpha1().Workflows().Informer() - informer.AddEventHandler( - cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, new interface{}) { - updatedWf := new.(*v1alpha1.Workflow) - if updatedWf.Name == name { - callback(updatedWf, updatedWf.Status.Phase != v1alpha1.NodeRunning) - } - }, - DeleteFunc: func(obj interface{}) { - deletedWf := obj.(*v1alpha1.Workflow) - if deletedWf.Name == name { - callback(deletedWf, true) - } - }, - }, - ) - return informer -} - // Prints live logs for workflow pods, starting from time specified in timeByPod name. -func (p *logPrinter) printLiveWorkflowLogs(workflow *v1alpha1.Workflow, timeByPod map[string]*time.Time) { +func (p *logPrinter) printLiveWorkflowLogs(workflowName string, wfClient workflowv1.WorkflowInterface, timeByPod map[string]*time.Time) { logs := make(chan logEntry) streamedPods := make(map[string]bool) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() processPods := func(wf *v1alpha1.Workflow) { for id := range wf.Status.Nodes { @@ -218,7 +204,7 @@ func (p *logPrinter) printLiveWorkflowLogs(workflow *v1alpha1.Workflow, timeByPo sinceTime := metav1.NewTime(podTime.Add(time.Second)) sinceTimePtr = &sinceTime } - err := p.getPodLogs(getDisplayName(node), node.ID, wf.Namespace, true, nil, nil, sinceTimePtr, func(entry logEntry) { + err := p.getPodLogs(ctx, getDisplayName(node), node.ID, wf.Namespace, true, nil, nil, sinceTimePtr, func(entry logEntry) { logs <- entry }) if err != nil { @@ -229,20 +215,31 @@ func (p *logPrinter) printLiveWorkflowLogs(workflow *v1alpha1.Workflow, timeByPo } } - processPods(workflow) - informer := p.setupWorkflowInformer(workflow.Namespace, workflow.Name, func(wf *v1alpha1.Workflow, done bool) { - if done { - close(logs) - } else { - processPods(wf) - } - }) - - stopChannel := make(chan struct{}) go func() { - informer.Run(stopChannel) + defer close(logs) + fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", workflowName)) + listOpts := metav1.ListOptions{FieldSelector: fieldSelector.String()} + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return wfClient.List(listOpts) + }, + WatchFunc: func(options metav1.ListOptions) (pkgwatch.Interface, error) { + return wfClient.Watch(listOpts) + }, + } + _, err := watch.UntilWithSync(ctx, lw, &v1alpha1.Workflow{}, nil, func(event pkgwatch.Event) (b bool, e error) { + if wf, ok := event.Object.(*v1alpha1.Workflow); ok { + if !wf.Status.Completed() { + processPods(wf) + } + return wf.Status.Completed(), nil + } + return true, nil + }) + if err != nil { + log.Fatal(err) + } }() - defer close(stopChannel) for entry := range logs { p.printLogEntry(entry) @@ -273,35 +270,56 @@ func (p *logPrinter) printLogEntry(entry logEntry) { fmt.Println(line) } -func (p *logPrinter) ensureContainerStarted(podName string, podNamespace string, container string, retryCnt int, retryTimeout time.Duration) error { - for retryCnt > 0 { - pod, err := p.kubeClient.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) +func (p *logPrinter) hasContainerStarted(podName string, podNamespace string, container string) (bool, error) { + pod, err := p.kubeClient.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + var containerStatus *v1.ContainerStatus + for _, status := range pod.Status.ContainerStatuses { + if status.Name == container { + containerStatus = &status + break + } + } + if containerStatus == nil { + return false, nil + } + + if containerStatus.State.Waiting != nil { + return false, nil + } + return true, nil +} + +func (p *logPrinter) getPodLogs( + ctx context.Context, + displayName string, + podName string, + podNamespace string, + follow bool, + tail *int64, + sinceSeconds *int64, + sinceTime *metav1.Time, + callback func(entry logEntry)) error { + + for ctx.Err() == nil { + hasStarted, err := p.hasContainerStarted(podName, podNamespace, p.container) + if err != nil { return err } - var containerStatus *v1.ContainerStatus - for _, status := range pod.Status.ContainerStatuses { - if status.Name == container { - containerStatus = &status - break + if !hasStarted { + if follow { + time.Sleep(1 * time.Second) + } else { + return nil } - } - if containerStatus == nil || containerStatus.State.Waiting != nil { - time.Sleep(retryTimeout) - retryCnt-- } else { - return nil + break } } - return fmt.Errorf("container '%s' of pod '%s' has not started within expected timeout", container, podName) -} -func (p *logPrinter) getPodLogs( - displayName string, podName string, podNamespace string, follow bool, tail *int64, sinceSeconds *int64, sinceTime *metav1.Time, callback func(entry logEntry)) error { - err := p.ensureContainerStarted(podName, podNamespace, p.container, 10, time.Second) - if err != nil { - return err - } stream, err := p.kubeClient.CoreV1().Pods(podNamespace).GetLogs(podName, &v1.PodLogOptions{ Container: p.container, Follow: follow, diff --git a/workflow/executor/k8sapi/client.go b/workflow/executor/k8sapi/client.go index 5a949595ac2f..025e69a86686 100644 --- a/workflow/executor/k8sapi/client.go +++ b/workflow/executor/k8sapi/client.go @@ -9,6 +9,8 @@ import ( "syscall" "time" + "github.com/argoproj/argo/util" + "github.com/argoproj/argo/errors" "github.com/argoproj/argo/workflow/common" execcommon "github.com/argoproj/argo/workflow/executor/common" @@ -100,7 +102,7 @@ func (c *k8sAPIClient) saveLogs(containerID, path string) error { if err != nil { return errors.InternalWrapError(err) } - defer outFile.Close() + defer util.Close(outFile) _, err = io.Copy(outFile, reader) if err != nil { return errors.InternalWrapError(err) From e078032e469effdfc492c8eea97eb2701ceda0c2 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Fri, 28 Dec 2018 15:31:55 -0800 Subject: [PATCH 037/145] Issue #1132 - Fix panic in ttl controller (#1143) --- workflow/ttlcontroller/ttlcontroller.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/workflow/ttlcontroller/ttlcontroller.go b/workflow/ttlcontroller/ttlcontroller.go index 91d2b992bd52..d5862127a321 100644 --- a/workflow/ttlcontroller/ttlcontroller.go +++ b/workflow/ttlcontroller/ttlcontroller.go @@ -130,7 +130,12 @@ func (c *Controller) processNextWorkItem() bool { // enqueueWF conditionally queues a workflow to the ttl queue if it is within the deletion period func (c *Controller) enqueueWF(obj interface{}) { - wf, err := util.FromUnstructured(obj.(*unstructured.Unstructured)) + un, ok := obj.(*unstructured.Unstructured) + if !ok { + log.Warnf("'%v' is not an unstructured", obj) + return + } + wf, err := util.FromUnstructured(un) if err != nil { log.Warnf("Failed to unmarshal workflow %v object: %v", obj, err) return From 174eb20a6a110c9bf647b040460df83b6ab031c4 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Fri, 28 Dec 2018 15:32:15 -0800 Subject: [PATCH 038/145] Issue #1040 - Kill daemoned step if workflow consist of single daemoned step (#1144) --- workflow/controller/dag.go | 2 +- workflow/controller/exec_control.go | 6 +++--- workflow/controller/operator.go | 7 ++++++- workflow/controller/steps.go | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index dcd9c263209a..438dc46c2be0 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -126,7 +126,7 @@ func (woc *wfOperationCtx) executeDAG(nodeName string, tmpl *wfv1.Template, boun } defer func() { if node != nil && woc.wf.Status.Nodes[node.ID].Completed() { - _ = woc.killDeamonedChildren(node.ID) + _ = woc.killDaemonedChildren(node.ID) } }() diff --git a/workflow/controller/exec_control.go b/workflow/controller/exec_control.go index 3f332d36f485..bc438c713e06 100644 --- a/workflow/controller/exec_control.go +++ b/workflow/controller/exec_control.go @@ -64,9 +64,9 @@ func (woc *wfOperationCtx) applyExecutionControl(pod *apiv1.Pod) error { return woc.updateExecutionControl(pod.Name, desiredExecCtl) } -// killDeamonedChildren kill any daemoned pods of a steps or DAG template node. -func (woc *wfOperationCtx) killDeamonedChildren(nodeID string) error { - woc.log.Infof("Checking deamoned children of %s", nodeID) +// killDaemonedChildren kill any daemoned pods of a steps or DAG template node. +func (woc *wfOperationCtx) killDaemonedChildren(nodeID string) error { + woc.log.Infof("Checking daemoned children of %s", nodeID) var firstErr error execCtl := common.ExecutionControl{ Deadline: &time.Time{}, diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index ed03abc2f000..792f63a9aef9 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -103,7 +103,12 @@ func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOper // TODO: an error returned by this method should result in requeuing the workflow to be retried at a // later time func (woc *wfOperationCtx) operate() { - defer woc.persistUpdates() + defer func() { + if woc.wf.Status.Completed() { + _ = woc.killDaemonedChildren("") + } + woc.persistUpdates() + }() defer func() { if r := recover(); r != nil { if rerr, ok := r.(error); ok { diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 571206cfd84b..54ee0fdf1643 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -28,7 +28,7 @@ func (woc *wfOperationCtx) executeSteps(nodeName string, tmpl *wfv1.Template, bo } defer func() { if woc.wf.Status.Nodes[node.ID].Completed() { - _ = woc.killDeamonedChildren(node.ID) + _ = woc.killDaemonedChildren(node.ID) } }() stepsCtx := stepsContext{ From e400b65c5eca2de2aa891f8489dcd835ef0e161c Mon Sep 17 00:00:00 2001 From: Tang Lee Date: Thu, 3 Jan 2019 09:55:27 +0800 Subject: [PATCH 039/145] Fix global artifact overwriting in nested workflow (#1086) --- workflow/controller/operator.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 792f63a9aef9..57fc5ee475d5 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1281,7 +1281,7 @@ func (woc *wfOperationCtx) addOutputsToScope(prefix string, outputs *wfv1.Output if scope != nil { scope.addArtifactToScope(key, art) } - woc.addArtifactToGlobalScope(art) + woc.addArtifactToGlobalScope(art, scope) } } @@ -1388,7 +1388,7 @@ func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) { // addArtifactToGlobalScope exports any desired node outputs to the global scope // Optionally adds to a local scope if supplied -func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact) { +func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wfScope) { if art.GlobalName == "" { return } @@ -1402,6 +1402,9 @@ func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact) { art.Path = "" if !reflect.DeepEqual(woc.wf.Status.Outputs.Artifacts[i], art) { woc.wf.Status.Outputs.Artifacts[i] = art + if scope != nil { + scope.addArtifactToScope(globalArtName, art) + } woc.log.Infof("overwriting %s: %v", globalArtName, art) woc.updated = true } @@ -1417,6 +1420,9 @@ func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact) { art.Path = "" woc.log.Infof("setting %s: %v", globalArtName, art) woc.wf.Status.Outputs.Artifacts = append(woc.wf.Status.Outputs.Artifacts, art) + if scope != nil { + scope.addArtifactToScope(globalArtName, art) + } woc.updated = true } From cb538489a187134577e2146afcf9367f45088ff7 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 2 Jan 2019 18:10:42 -0800 Subject: [PATCH 040/145] Fix issue where steps with exhausted retires would not complete (#1148) --- test/e2e/expectedfailures/failed-retries.yaml | 30 ++++ workflow/controller/steps.go | 3 - workflow/controller/steps_test.go | 18 +++ .../testdata/steps-failed-retries.yaml | 153 ++++++++++++++++++ 4 files changed, 201 insertions(+), 3 deletions(-) create mode 100644 test/e2e/expectedfailures/failed-retries.yaml create mode 100644 workflow/controller/steps_test.go create mode 100644 workflow/controller/testdata/steps-failed-retries.yaml diff --git a/test/e2e/expectedfailures/failed-retries.yaml b/test/e2e/expectedfailures/failed-retries.yaml new file mode 100644 index 000000000000..2930d2677f88 --- /dev/null +++ b/test/e2e/expectedfailures/failed-retries.yaml @@ -0,0 +1,30 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: failed-retries- +spec: + entrypoint: failed-retries + + templates: + - name: failed-retries + steps: + - - name: fail + template: fail + - name: delayed-fail + template: delayed-fail + + - name: fail + retryStrategy: + limit: 1 + container: + image: alpine:latest + command: [sh, -c] + args: ["exit 1"] + + - name: delayed-fail + retryStrategy: + limit: 1 + container: + image: alpine:latest + command: [sh, -c] + args: ["sleep 1; exit 1"] diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 54ee0fdf1643..a4b5c8adff63 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -203,9 +203,6 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod } if childNode != nil { woc.addChildNode(sgNodeName, childNodeName) - if childNode.Completed() && !childNode.Successful() { - break - } } } diff --git a/workflow/controller/steps_test.go b/workflow/controller/steps_test.go new file mode 100644 index 000000000000..65d2f93cb128 --- /dev/null +++ b/workflow/controller/steps_test.go @@ -0,0 +1,18 @@ +package controller + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/test" +) + +// TestStepsFailedRetries ensures a steps template will recognize exhausted retries +func TestStepsFailedRetries(t *testing.T) { + wf := test.LoadTestWorkflow("testdata/steps-failed-retries.yaml") + woc := newWoc(*wf) + woc.operate() + assert.Equal(t, string(wfv1.NodeFailed), string(woc.wf.Status.Phase)) +} diff --git a/workflow/controller/testdata/steps-failed-retries.yaml b/workflow/controller/testdata/steps-failed-retries.yaml new file mode 100644 index 000000000000..bd249586e311 --- /dev/null +++ b/workflow/controller/testdata/steps-failed-retries.yaml @@ -0,0 +1,153 @@ +metadata: + creationTimestamp: "2018-12-28T19:21:20Z" + generateName: failed-retries- + generation: 1 + labels: + workflows.argoproj.io/phase: Running + name: failed-retries-tjjsc + namespace: default + resourceVersion: "85216" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/workflows/failed-retries-tjjsc + uid: c18bba2a-0ad5-11e9-b44e-ea782c392741 +spec: + arguments: {} + entrypoint: failed-retries + templates: + - inputs: {} + metadata: {} + name: failed-retries + outputs: {} + steps: + - - arguments: {} + name: fail + template: fail + - arguments: {} + name: delayed-fail + template: delayed-fail + - container: + args: + - exit 1 + command: + - sh + - -c + image: alpine:latest + name: "" + resources: {} + inputs: {} + metadata: {} + name: fail + outputs: {} + retryStrategy: + limit: 1 + - container: + args: + - sleep 1; exit 1 + command: + - sh + - -c + image: alpine:latest + name: "" + resources: {} + inputs: {} + metadata: {} + name: delayed-fail + outputs: {} + retryStrategy: + limit: 1 +status: + finishedAt: null + nodes: + failed-retries-tjjsc: + children: + - failed-retries-tjjsc-2095973878 + displayName: failed-retries-tjjsc + finishedAt: null + id: failed-retries-tjjsc + name: failed-retries-tjjsc + phase: Running + startedAt: "2019-01-03T01:23:18Z" + templateName: failed-retries + type: Steps + failed-retries-tjjsc-20069324: + boundaryID: failed-retries-tjjsc + children: + - failed-retries-tjjsc-1229492679 + - failed-retries-tjjsc-759866442 + displayName: fail + finishedAt: "2019-01-03T01:23:32Z" + id: failed-retries-tjjsc-20069324 + message: No more retries left + name: failed-retries-tjjsc[0].fail + phase: Failed + startedAt: "2019-01-03T01:23:18Z" + type: Retry + failed-retries-tjjsc-759866442: + boundaryID: failed-retries-tjjsc + displayName: fail(1) + finishedAt: "2018-12-28T19:21:32Z" + id: failed-retries-tjjsc-759866442 + message: failed with exit code 1 + name: failed-retries-tjjsc[0].fail(1) + phase: Failed + startedAt: "2019-01-03T01:23:27Z" + templateName: fail + type: Pod + failed-retries-tjjsc-1229492679: + boundaryID: failed-retries-tjjsc + displayName: fail(0) + finishedAt: "2018-12-28T19:21:26Z" + id: failed-retries-tjjsc-1229492679 + message: failed with exit code 1 + name: failed-retries-tjjsc[0].fail(0) + phase: Failed + startedAt: "2019-01-03T01:23:18Z" + templateName: fail + type: Pod + failed-retries-tjjsc-1375221696: + boundaryID: failed-retries-tjjsc + displayName: delayed-fail(0) + finishedAt: "2018-12-28T19:21:27Z" + id: failed-retries-tjjsc-1375221696 + message: failed with exit code 1 + name: failed-retries-tjjsc[0].delayed-fail(0) + phase: Failed + startedAt: "2019-01-03T01:23:18Z" + templateName: delayed-fail + type: Pod + failed-retries-tjjsc-1574533273: + boundaryID: failed-retries-tjjsc + children: + - failed-retries-tjjsc-1375221696 + - failed-retries-tjjsc-2113289837 + displayName: delayed-fail + finishedAt: null + id: failed-retries-tjjsc-1574533273 + name: failed-retries-tjjsc[0].delayed-fail + phase: Running + startedAt: "2019-01-03T01:23:18Z" + type: Retry + failed-retries-tjjsc-2095973878: + boundaryID: failed-retries-tjjsc + children: + - failed-retries-tjjsc-20069324 + - failed-retries-tjjsc-1574533273 + displayName: '[0]' + finishedAt: null + id: failed-retries-tjjsc-2095973878 + name: failed-retries-tjjsc[0] + phase: Running + startedAt: "2019-01-03T01:23:18Z" + type: StepGroup + failed-retries-tjjsc-2113289837: + boundaryID: failed-retries-tjjsc + displayName: delayed-fail(1) + finishedAt: "2018-12-28T19:21:33Z" + id: failed-retries-tjjsc-2113289837 + message: failed with exit code 1 + name: failed-retries-tjjsc[0].delayed-fail(1) + phase: Failed + startedAt: "2019-01-03T01:23:28Z" + templateName: delayed-fail + type: Pod + phase: Running + startedAt: "2019-01-03T01:23:18Z" From f6ce78334762cbc3c6de1604c11ea4f5f618c275 Mon Sep 17 00:00:00 2001 From: Chen Zhiwei Date: Thu, 3 Jan 2019 10:20:21 +0800 Subject: [PATCH 041/145] add support for other archs (#1137) --- Dockerfile-argoexec | 4 ++-- Dockerfile-builder | 12 ++++++------ Dockerfile-ci-builder | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Dockerfile-argoexec b/Dockerfile-argoexec index e02426635e7b..159290c6f759 100644 --- a/Dockerfile-argoexec +++ b/Dockerfile-argoexec @@ -3,12 +3,12 @@ FROM debian:9.5-slim RUN apt-get update && \ apt-get install -y curl jq procps git tar mime-support && \ rm -rf /var/lib/apt/lists/* && \ - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ + curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(uname -m|sed 's/x86_64/amd64/g')/kubectl && \ chmod +x ./kubectl && \ mv ./kubectl /bin/ ENV DOCKER_VERSION=18.06.0 -RUN curl -O https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz && \ +RUN curl -O https://download.docker.com/linux/static/stable/$(uname -m)/docker-${DOCKER_VERSION}-ce.tgz && \ tar -xzf docker-${DOCKER_VERSION}-ce.tgz && \ mv docker/docker /usr/local/bin/docker && \ rm -rf ./docker diff --git a/Dockerfile-builder b/Dockerfile-builder index 8cb721fcd932..5eb5caacd77d 100644 --- a/Dockerfile-builder +++ b/Dockerfile-builder @@ -10,16 +10,16 @@ RUN apt-get update && apt-get install -y \ # Install go ENV GO_VERSION 1.10.3 -ENV GO_ARCH amd64 ENV GOPATH /root/go ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH} -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ - tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ - rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ - wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -O /usr/local/bin/dep && \ +RUN ARCH=$(uname -m|sed 's/x86_64/amd64/g') && \ + wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${ARCH}.tar.gz && \ + wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-${ARCH} -O /usr/local/bin/dep && \ chmod +x /usr/local/bin/dep && \ mkdir -p ${GOPATH}/bin && \ - curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-amd64.tar.gz | \ + curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-${ARCH}.tar.gz | \ tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- # A dummy directory is created under $GOPATH/src/dummy so we are able to use dep diff --git a/Dockerfile-ci-builder b/Dockerfile-ci-builder index 943176928518..37a3c8a712cd 100644 --- a/Dockerfile-ci-builder +++ b/Dockerfile-ci-builder @@ -2,11 +2,11 @@ FROM golang:1.10.3 WORKDIR /tmp -RUN curl -O https://download.docker.com/linux/static/stable/x86_64/docker-18.06.0-ce.tgz && \ +RUN curl -O https://download.docker.com/linux/static/stable/$(uname -m)/docker-18.06.0-ce.tgz && \ tar -xzf docker-18.06.0-ce.tgz && \ mv docker/docker /usr/local/bin/docker && \ rm -rf ./docker && \ - wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -O /usr/local/bin/dep && \ + wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-$(uname -m|sed 's/x86_64/amd64/g') -O /usr/local/bin/dep && \ chmod +x /usr/local/bin/dep && \ - curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-amd64.tar.gz | \ + curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-$(uname -m|sed 's/x86_64/amd64/g').tar.gz | \ tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- From c7fec9d41c0e2d3369e111f8b1d0f1d0ca77edae Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 2 Jan 2019 18:21:28 -0800 Subject: [PATCH 042/145] Reflect minio chart changes in documentation (#1147) --- ARTIFACT_REPO.md | 4 ++-- demo.md | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ARTIFACT_REPO.md b/ARTIFACT_REPO.md index df23309ba568..c126fa5ace86 100644 --- a/ARTIFACT_REPO.md +++ b/ARTIFACT_REPO.md @@ -14,12 +14,12 @@ $ helm install stable/minio --name argo-artifacts --set service.type=LoadBalance Login to the Minio UI using a web browser (port 9000) after obtaining the external IP using `kubectl`. ``` -$ kubectl get service argo-artifacts-minio +$ kubectl get service argo-artifacts ``` On Minikube: ``` -$ minikube service --url argo-artifacts-minio +$ minikube service --url argo-artifacts ``` NOTE: When minio is installed via Helm, it uses the following hard-wired default credentials, diff --git a/demo.md b/demo.md index 7684293561a9..63555f63f7ae 100644 --- a/demo.md +++ b/demo.md @@ -82,11 +82,11 @@ helm install stable/minio --name argo-artifacts --set service.type=LoadBalancer Login to the Minio UI using a web browser (port 9000) after exposing obtaining the external IP using `kubectl`. ``` -kubectl get service argo-artifacts-minio -o wide +kubectl get service argo-artifacts -o wide ``` On Minikube: ``` -minikube service --url argo-artifacts-minio +minikube service --url argo-artifacts ``` NOTE: When minio is installed via Helm, it uses the following hard-wired default credentials, @@ -98,8 +98,8 @@ Create a bucket named `my-bucket` from the Minio UI. ## 6. Reconfigure the workflow controller to use the Minio artifact repository -Edit the workflow-controller config map to reference the service name (argo-artifacts-minio) and -secret (argo-artifacts-minio) created by the helm install: +Edit the workflow-controller config map to reference the service name (argo-artifacts) and +secret (argo-artifacts) created by the helm install: ``` kubectl edit cm -n argo workflow-controller-configmap ... @@ -108,18 +108,18 @@ data: artifactRepository: s3: bucket: my-bucket - endpoint: argo-artifacts-minio.default:9000 + endpoint: argo-artifacts.default:9000 insecure: true # accessKeySecret and secretKeySecret are secret selectors. - # It references the k8s secret named 'argo-artifacts-minio' + # It references the k8s secret named 'argo-artifacts' # which was created during the minio helm install. The keys, # 'accesskey' and 'secretkey', inside that secret are where the # actual minio credentials are stored. accessKeySecret: - name: argo-artifacts-minio + name: argo-artifacts key: accesskey secretKeySecret: - name: argo-artifacts-minio + name: argo-artifacts key: secretkey ``` From 3561bff70ad6bfeca8967be6aa4ac24fbbc8ac27 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Thu, 3 Jan 2019 15:08:24 -0800 Subject: [PATCH 043/145] Issue #1136 - Fix metadata for DAG with loops (#1149) * Issue #1136 - Fix metadata for DAG with loops --- test/e2e/ui/ui-dag-with-params.yaml | 57 ++++++++++++++++++++++------- workflow/controller/dag.go | 42 ++++++++++++--------- workflow/controller/operator.go | 11 +++++- 3 files changed, 78 insertions(+), 32 deletions(-) diff --git a/test/e2e/ui/ui-dag-with-params.yaml b/test/e2e/ui/ui-dag-with-params.yaml index a954c0a8bb94..9756cda593e3 100644 --- a/test/e2e/ui/ui-dag-with-params.yaml +++ b/test/e2e/ui/ui-dag-with-params.yaml @@ -3,24 +3,53 @@ kind: Workflow metadata: generateName: ui-dag-with-params- spec: - entrypoint: diamond + entrypoint: pipeline + templates: - - name: diamond - dag: - tasks: - - name: A - template: nested-diamond - arguments: - parameters: [{name: message, value: A}] - - name: nested-diamond + - name: echo inputs: parameters: - name: message + container: + image: alpine:latest + command: [echo, "{{inputs.parameters.message}}"] + + - name: subpipeline-a dag: tasks: - - name: A + - name: A1 template: echo - - name: echo - container: - image: alpine:3.7 - command: [echo, "hello"] + arguments: + parameters: [{name: message, value: "Hello World!"}] + - name: A2 + template: echo + arguments: + parameters: [{name: message, value: "Hello World!"}] + + - name: subpipeline-b + dag: + tasks: + - name: B1 + template: echo + arguments: + parameters: [{name: message, value: "Hello World!"}] + - name: B2 + template: echo + dependencies: [B1] + arguments: + parameters: [{name: message, value: "Hello World!"}] + withItems: + - 0 + - 1 + + - name: pipeline + dag: + tasks: + - name: A + template: subpipeline-a + withItems: + - 0 + - 1 + - name: B + dependencies: [A] + template: subpipeline-b diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 438dc46c2be0..1f5601830751 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -251,12 +251,21 @@ func (woc *wfOperationCtx) executeDAGTask(dagCtx *dagContext, taskName string) { // All our dependencies were satisfied and successful. It's our turn to run + taskGroupNode := woc.getNodeByName(nodeName) + if taskGroupNode != nil && taskGroupNode.Type != wfv1.NodeTypeTaskGroup { + taskGroupNode = nil + } // connectDependencies is a helper to connect our dependencies to current task as children connectDependencies := func(taskNodeName string) { - if len(task.Dependencies) == 0 { + if len(task.Dependencies) == 0 || taskGroupNode != nil { // if we had no dependencies, then we are a root task, and we should connect the // boundary node as our parent - woc.addChildNode(dagCtx.boundaryName, taskNodeName) + if taskGroupNode == nil { + woc.addChildNode(dagCtx.boundaryName, taskNodeName) + } else { + woc.addChildNode(taskGroupNode.Name, taskNodeName) + } + } else { // Otherwise, add all outbound nodes of our dependencies as parents to this node for _, depName := range task.Dependencies { @@ -287,6 +296,16 @@ func (woc *wfOperationCtx) executeDAGTask(dagCtx *dagContext, taskName string) { return } + // If DAG task has withParam of with withSequence then we need to create virtual node of type TaskGroup. + // For example, if we had task A with withItems of ['foo', 'bar'] which expanded to ['A(0:foo)', 'A(1:bar)'], we still + // need to create a node for A. + if len(task.WithItems) > 0 || task.WithParam != "" || task.WithSequence != nil { + if taskGroupNode == nil { + connectDependencies(nodeName) + taskGroupNode = woc.initializeNode(nodeName, wfv1.NodeTypeTaskGroup, task.Template, dagCtx.boundaryID, wfv1.NodeRunning, "") + } + } + for _, t := range expandedTasks { node = dagCtx.getTaskNode(t.Name) taskNodeName := dagCtx.taskNodeName(t.Name) @@ -311,12 +330,8 @@ func (woc *wfOperationCtx) executeDAGTask(dagCtx *dagContext, taskName string) { _, _ = woc.executeTemplate(t.Template, t.Arguments, taskNodeName, dagCtx.boundaryID) } - // If we expanded the task, we still need to create the task entry for the non-expanded node, - // since dependant tasks will look to it, when deciding when to execute. For example, if we had - // task A with withItems of ['foo', 'bar'] which expanded to ['A(0:foo)', 'A(1:bar)'], we still - // need to create a node for A, after the withItems have completed. - if len(task.WithItems) > 0 || task.WithParam != "" || task.WithSequence != nil { - nodeStatus := wfv1.NodeSucceeded + if taskGroupNode != nil { + groupPhase := wfv1.NodeSucceeded for _, t := range expandedTasks { // Add the child relationship from our dependency's outbound nodes to this node. node := dagCtx.getTaskNode(t.Name) @@ -324,17 +339,10 @@ func (woc *wfOperationCtx) executeDAGTask(dagCtx *dagContext, taskName string) { return } if !node.Successful() { - nodeStatus = node.Phase - } - } - woc.initializeNode(nodeName, wfv1.NodeTypeTaskGroup, task.Template, dagCtx.boundaryID, nodeStatus, "") - if len(expandedTasks) > 0 { - for _, t := range expandedTasks { - woc.addChildNode(dagCtx.taskNodeName(t.Name), nodeName) + groupPhase = node.Phase } - } else { - connectDependencies(nodeName) } + woc.markNodePhase(taskGroupNode.Name, groupPhase) } } diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 57fc5ee475d5..b6c39e75882b 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1180,8 +1180,17 @@ func (woc *wfOperationCtx) executeContainer(nodeName string, tmpl *wfv1.Template func (woc *wfOperationCtx) getOutboundNodes(nodeID string) []string { node := woc.wf.Status.Nodes[nodeID] switch node.Type { - case wfv1.NodeTypePod, wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend, wfv1.NodeTypeTaskGroup: + case wfv1.NodeTypePod, wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend: return []string{node.ID} + case wfv1.NodeTypeTaskGroup: + if len(node.Children) == 0 { + return []string{node.ID} + } + outboundNodes := make([]string, 0) + for _, child := range node.Children { + outboundNodes = append(outboundNodes, woc.getOutboundNodes(child)...) + } + return outboundNodes case wfv1.NodeTypeRetry: numChildren := len(node.Children) if numChildren > 0 { From 4fd758c38fc232bf26bb5e1d4e7e23321ba91416 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 9 Jan 2019 13:06:27 -0800 Subject: [PATCH 044/145] Add slack badge to README (#1164) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 153807b01833..1db20285c04f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) + # Argo - The Workflow Engine for Kubernetes ![Argo Image](argo.png) From 6726d9a961a2c3ed5467430d3631a36cfbf361de Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 9 Jan 2019 14:55:08 -0800 Subject: [PATCH 045/145] Fix failing TestAddGlobalArtifactToScope unit test --- workflow/controller/operator_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index e320535da3db..42e43e22fa1e 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -765,19 +765,19 @@ func TestAddGlobalArtifactToScope(t *testing.T) { }, } // Make sure if the artifact is not global, don't add to scope - woc.addArtifactToGlobalScope(art) + woc.addArtifactToGlobalScope(art, nil) assert.Nil(t, woc.wf.Status.Outputs) // Now mark it as global. Verify it is added to workflow outputs art.GlobalName = "global-art" - woc.addArtifactToGlobalScope(art) + woc.addArtifactToGlobalScope(art, nil) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[0].Name) assert.Equal(t, "some/key", woc.wf.Status.Outputs.Artifacts[0].S3.Key) // Change the value and verify update is reflected art.S3.Key = "new/key" - woc.addArtifactToGlobalScope(art) + woc.addArtifactToGlobalScope(art, nil) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[0].Name) assert.Equal(t, "new/key", woc.wf.Status.Outputs.Artifacts[0].S3.Key) @@ -785,7 +785,7 @@ func TestAddGlobalArtifactToScope(t *testing.T) { // Add a new global artifact art.GlobalName = "global-art2" art.S3.Key = "new/new/key" - woc.addArtifactToGlobalScope(art) + woc.addArtifactToGlobalScope(art, nil) assert.Equal(t, 2, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[1].Name) assert.Equal(t, "new/new/key", woc.wf.Status.Outputs.Artifacts[1].S3.Key) From 31e5f63cba89b06abc2cdce0d778c6b8d937a23e Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 9 Jan 2019 14:56:59 -0800 Subject: [PATCH 046/145] Fix tests compilation error (#1157) --- Gopkg.lock | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Gopkg.lock b/Gopkg.lock index eb7e6c92a286..768463856c7e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -984,6 +984,7 @@ "tools/pager", "tools/reference", "tools/remotecommand", + "tools/watch", "transport", "transport/spdy", "util/buffer", @@ -1119,6 +1120,7 @@ "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/clientcmd", "k8s.io/client-go/tools/remotecommand", + "k8s.io/client-go/tools/watch", "k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/workqueue", "k8s.io/code-generator/cmd/client-gen", From 149d176fdf3560d74afa91fe91a0ee38bf7ec3bd Mon Sep 17 00:00:00 2001 From: Konstantin Zadorozhny Date: Wed, 9 Jan 2019 15:52:58 -0800 Subject: [PATCH 047/145] Replace exponential retry with poll (#1166) --- workflow/executor/resource.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index 086834509f70..00fc98fcbae6 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -82,10 +82,9 @@ func (we *WorkflowExecutor) WaitResource(resourceName string) error { failReqs, _ = failSelector.Requirements() } - // Start the condition result reader using ExponentialBackoff - // Exponential backoff is for steps of 0, 5, 20, 80, 320 seconds since the first step is without - // delay in the ExponentialBackoff - err := wait.ExponentialBackoff(wait.Backoff{Duration: (time.Second * 5), Factor: 4.0, Steps: 5}, + // Start the condition result reader using PollImmediateInfinite + // Poll intervall of 5 seconds serves as a backoff intervall in case of immediate result reader failure + err := wait.PollImmediateInfinite(time.Duration(time.Second*5), func() (bool, error) { isErrRetry, err := checkResourceState(resourceName, successReqs, failReqs) From 8db0006667dec74c58cbab744b014c67fda55c65 Mon Sep 17 00:00:00 2001 From: Pengfei Zhao Date: Thu, 10 Jan 2019 07:59:10 +0800 Subject: [PATCH 048/145] add support for hostNetwork & dnsPolicy config (#1161) --- pkg/apis/workflow/v1alpha1/types.go | 11 +++++++++++ workflow/controller/workflowpod.go | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index fe2c99073341..3b0bef13553b 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -117,6 +117,17 @@ type WorkflowSpec struct { // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Host networking requested for this workflow pod. Default to false. + HostNetwork *bool `json:"hostNetwork,omitempty"` + + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty"` + // OnExit is a template reference which is invoked at the end of the // workflow, irrespective of the success, failure, or error of the // primary workflow. diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index cd4d29ddd5d2..9bbc803b167f 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -115,6 +115,15 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont ImagePullSecrets: woc.wf.Spec.ImagePullSecrets, }, } + + if woc.wf.Spec.HostNetwork != nil { + pod.Spec.HostNetwork = *woc.wf.Spec.HostNetwork + } + + if woc.wf.Spec.DNSPolicy != nil { + pod.Spec.DNSPolicy = *woc.wf.Spec.DNSPolicy + } + if woc.controller.Config.InstanceID != "" { pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } From c3cc51be2e14e931d6e212aa30842a2c514082d1 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 12 Jan 2019 10:18:30 +0900 Subject: [PATCH 049/145] Support HDFS Artifact (#1159) Support HDFS Artifact (#1159) --- Gopkg.lock | 102 ++++++++ Gopkg.toml | 7 + api/openapi-spec/swagger.json | 132 ++++++++++ examples/hdfs-artifact.yaml | 81 ++++++ .../workflow/v1alpha1/openapi_generated.go | 236 +++++++++++++++++- pkg/apis/workflow/v1alpha1/types.go | 68 ++++- .../v1alpha1/zz_generated.deepcopy.go | 75 ++++++ workflow/artifacts/hdfs/hdfs.go | 217 ++++++++++++++++ workflow/artifacts/hdfs/util.go | 53 ++++ workflow/common/common.go | 6 + workflow/controller/config.go | 13 + workflow/controller/workflowpod.go | 9 +- workflow/executor/executor.go | 98 ++++++-- workflow/validate/validate.go | 7 + 14 files changed, 1080 insertions(+), 24 deletions(-) create mode 100644 examples/hdfs-artifact.yaml create mode 100644 workflow/artifacts/hdfs/hdfs.go create mode 100644 workflow/artifacts/hdfs/util.go diff --git a/Gopkg.lock b/Gopkg.lock index 768463856c7e..08a10cc5dbfd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -71,6 +71,18 @@ pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" +[[projects]] + digest = "1:5cf8a8393124ac3d5632a8c51d08d8ff2aa29b6b328306cb8b7560a7e83cf760" + name = "github.com/colinmarc/hdfs" + packages = [ + ".", + "protocol/hadoop_common", + "protocol/hadoop_hdfs", + "rpc", + ] + pruneopts = "" + revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5" + [[projects]] digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" @@ -276,6 +288,14 @@ pruneopts = "" revision = "9cad4c3443a7200dd6400aef47183728de563a38" +[[projects]] + digest = "1:d35e07e002ccc51cb01fa77e932ea62206c4d3b2fb0fa1f1b052885942108a96" + name = "github.com/hashicorp/go-uuid" + packages = ["."] + pruneopts = "" + revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc" + version = "v1.0.0" + [[projects]] branch = "master" digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" @@ -311,6 +331,17 @@ pruneopts = "" revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" +[[projects]] + branch = "master" + digest = "1:1c030807110db46f33e7abd02c08dd98dc2c1c6620eea6941185025f16ad8bbb" + name = "github.com/jcmturner/gofork" + packages = [ + "encoding/asn1", + "x/crypto/pbkdf2", + ] + pruneopts = "" + revision = "2aebee971930cd0dd525873330952ab7df5ac95c" + [[projects]] digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc" name = "github.com/json-iterator/go" @@ -587,12 +618,14 @@ "ed25519/internal/edwards25519", "internal/chacha20", "internal/subtle", + "md4", "openpgp", "openpgp/armor", "openpgp/elgamal", "openpgp/errors", "openpgp/packet", "openpgp/s2k", + "pbkdf2", "poly1305", "ssh", "ssh/agent", @@ -714,6 +747,70 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" +[[projects]] + digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12" + name = "gopkg.in/jcmturner/aescts.v1" + packages = ["."] + pruneopts = "" + revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" + version = "v1.0.1" + +[[projects]] + digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1" + name = "gopkg.in/jcmturner/dnsutils.v1" + packages = ["."] + pruneopts = "" + revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" + version = "v1.0.1" + +[[projects]] + digest = "1:f727cb776135c090d4043eca9cd921b9967f75704a97309172fde92591b3c828" + name = "gopkg.in/jcmturner/gokrb5.v5" + packages = [ + "asn1tools", + "client", + "config", + "credentials", + "crypto", + "crypto/common", + "crypto/etype", + "crypto/rfc3961", + "crypto/rfc3962", + "crypto/rfc4757", + "crypto/rfc8009", + "gssapi", + "iana", + "iana/addrtype", + "iana/adtype", + "iana/asnAppTag", + "iana/chksumtype", + "iana/errorcode", + "iana/etypeID", + "iana/flags", + "iana/keyusage", + "iana/msgtype", + "iana/nametype", + "iana/patype", + "kadmin", + "keytab", + "krberror", + "messages", + "mstypes", + "pac", + "types", + ] + pruneopts = "" + revision = "32ba44ca5b42f17a4a9f33ff4305e70665a1bc0f" + version = "v5.3.0" + +[[projects]] + digest = "1:269a70a6997455a9130b3005af6d2983323e4b8c712f3288a0df0e6013c18ee1" + name = "gopkg.in/jcmturner/rpc.v0" + packages = ["ndr"] + pruneopts = "" + revision = "4480c480c9cd343b54b0acb5b62261cbd33d7adf" + version = "v0.0.2" + [[projects]] digest = "1:6715e0bec216255ab784fe04aa4d5a0a626ae07a3a209080182e469bc142761a" name = "gopkg.in/src-d/go-billy.v4" @@ -1069,6 +1166,7 @@ "github.com/argoproj/pkg/stats", "github.com/argoproj/pkg/strftime", "github.com/argoproj/pkg/time", + "github.com/colinmarc/hdfs", "github.com/evanphx/json-patch", "github.com/fsnotify/fsnotify", "github.com/ghodss/yaml", @@ -1085,6 +1183,10 @@ "github.com/tidwall/gjson", "github.com/valyala/fasttemplate", "golang.org/x/crypto/ssh", + "gopkg.in/jcmturner/gokrb5.v5/client", + "gopkg.in/jcmturner/gokrb5.v5/config", + "gopkg.in/jcmturner/gokrb5.v5/credentials", + "gopkg.in/jcmturner/gokrb5.v5/keytab", "gopkg.in/src-d/go-git.v4", "gopkg.in/src-d/go-git.v4/plumbing/transport", "gopkg.in/src-d/go-git.v4/plumbing/transport/http", diff --git a/Gopkg.toml b/Gopkg.toml index facaf7e03133..888ef9a90b8e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -55,3 +55,10 @@ required = [ name = "github.com/Azure/go-autorest" revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" +[[constraint]] + name = "github.com/colinmarc/hdfs" + revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5" + +[[constraint]] + name = "gopkg.in/jcmturner/gokrb5.v5" + version = "5.3.0" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 42fddd83d69b..221876f56de6 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -66,6 +66,10 @@ "description": "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", "type": "string" }, + "hdfs": { + "description": "HDFS contains HDFS artifact location details", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HDFSArtifact" + }, "http": { "description": "HTTP contains HTTP artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPArtifact" @@ -108,6 +112,10 @@ "description": "Git contains git artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GitArtifact" }, + "hdfs": { + "description": "HDFS contains HDFS artifact location details", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HDFSArtifact" + }, "http": { "description": "HTTP contains HTTP artifact location details", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPArtifact" @@ -249,6 +257,130 @@ } } }, + "io.argoproj.workflow.v1alpha1.HDFSArtifact": { + "description": "HDFSArtifact is the location of an HDFS artifact", + "required": [ + "addresses", + "path" + ], + "properties": { + "addresses": { + "description": "Addresses is accessible addresses of HDFS name nodes", + "type": "array", + "items": { + "type": "string" + } + }, + "force": { + "description": "Force copies a file forcibly even if it exists (default: false)", + "type": "boolean" + }, + "hdfsUser": { + "description": "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + "type": "string" + }, + "krbCCacheSecret": { + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbConfigConfigMap": { + "description": "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector" + }, + "krbKeytabSecret": { + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbRealm": { + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "krbServicePrincipalName": { + "description": "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + "type": "string" + }, + "krbUsername": { + "description": "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "path": { + "description": "Path is a file path in HDFS", + "type": "string" + } + } + }, + "io.argoproj.workflow.v1alpha1.HDFSConfig": { + "description": "HDFSConfig is configurations for HDFS", + "required": [ + "addresses" + ], + "properties": { + "addresses": { + "description": "Addresses is accessible addresses of HDFS name nodes", + "type": "array", + "items": { + "type": "string" + } + }, + "hdfsUser": { + "description": "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + "type": "string" + }, + "krbCCacheSecret": { + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbConfigConfigMap": { + "description": "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector" + }, + "krbKeytabSecret": { + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbRealm": { + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "krbServicePrincipalName": { + "description": "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + "type": "string" + }, + "krbUsername": { + "description": "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + } + } + }, + "io.argoproj.workflow.v1alpha1.HDFSKrbConfig": { + "description": "HDFSKrbConfig is auth configurations for Kerberos", + "properties": { + "krbCCacheSecret": { + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbConfigConfigMap": { + "description": "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector" + }, + "krbKeytabSecret": { + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "krbRealm": { + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "krbServicePrincipalName": { + "description": "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + "type": "string" + }, + "krbUsername": { + "description": "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + } + } + }, "io.argoproj.workflow.v1alpha1.HTTPArtifact": { "description": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container", "required": [ diff --git a/examples/hdfs-artifact.yaml b/examples/hdfs-artifact.yaml new file mode 100644 index 000000000000..0031b756387f --- /dev/null +++ b/examples/hdfs-artifact.yaml @@ -0,0 +1,81 @@ +# This example demonstrates the use of hdfs as the store for artifacts. This example assumes the following: +# 1. you have hdfs running in the same namespace as where this workflow will be run and you have created a repo with the name "generic-local" +# 2. you have created a kubernetes secret for storing hdfs username/password. To create kubernetes secret required for this example, +# run the following command: +# $ kubectl create secret generic my-hdfs-credentials --from-literal=username= --from-literal=password= + +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: hdfs-artifact- +spec: + entrypoint: artifact-example + templates: + - name: artifact-example + steps: + - - name: generate-artifact + template: whalesay + - - name: consume-artifact + template: print-message + arguments: + artifacts: + - name: message + from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}" + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay hello world | tee /tmp/hello_world.txt"] + outputs: + artifacts: + - name: hello-art + path: /tmp/hello_world.txt + hdfs: + addresses: + - my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local:8020 + - my-hdfs-namenode-1.my-hdfs-namenode.default.svc.cluster.local:8020 + path: "/tmp/argo/foo" + hdfsUser: root + force: true + # krbCCacheSecret: + # name: krb + # key: krb5cc_0 + # krbKeytabSecret: + # name: krb + # key: user1.keytab + # krbUsername: "user1" + # krbRealm: "MYCOMPANY.COM" + # krbConfigConfigMap: + # name: my-hdfs-krb5-config + # key: krb5.conf + # krbServicePrincipalName: hdfs/_HOST + + - name: print-message + inputs: + artifacts: + - name: message + path: /tmp/message + hdfs: + addresses: + - my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local:8020 + - my-hdfs-namenode-1.my-hdfs-namenode.default.svc.cluster.local:8020 + path: "/tmp/argo/foo" + hdfsUser: root + force: true + # krbCCacheSecret: + # name: krb + # key: krb5cc_0 + # krbKeytabSecret: + # name: krb + # key: user1.keytab + # krbUsername: "user1" + # krbRealm: "MYCOMPANY.COM" + # krbConfigConfigMap: + # name: my-hdfs-krb5-config + # key: krb5.conf + # krbServicePrincipalName: hdfs/_HOST + container: + image: alpine:latest + command: [sh, -c] + args: ["cat /tmp/message"] diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index d7401fb91e99..81f606863664 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -22,6 +22,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact": schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSConfig": schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSKrbConfig": schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), @@ -177,6 +180,12 @@ func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) co Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), }, }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS contains HDFS artifact location details", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact"), + }, + }, "raw": { SchemaProps: spec.SchemaProps{ Description: "Raw contains raw artifact location details", @@ -201,7 +210,7 @@ func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) co }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, } } @@ -242,6 +251,12 @@ func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCall Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), }, }, + "hdfs": { + SchemaProps: spec.SchemaProps{ + Description: "HDFS contains HDFS artifact location details", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact"), + }, + }, "raw": { SchemaProps: spec.SchemaProps{ Description: "Raw contains raw artifact location details", @@ -252,7 +267,7 @@ func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCall }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Artifact"}, } } @@ -480,6 +495,223 @@ func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) } } +func schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSArtifact is the location of an HDFS artifact", + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is accessible addresses of HDFS name nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hdfsUser": { + SchemaProps: spec.SchemaProps{ + Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is a file path in HDFS", + Type: []string{"string"}, + Format: "", + }, + }, + "force": { + SchemaProps: spec.SchemaProps{ + Description: "Force copies a file forcibly even if it exists (default: false)", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"addresses", "path"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSConfig is configurations for HDFS", + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is accessible addresses of HDFS name nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "hdfsUser": { + SchemaProps: spec.SchemaProps{ + Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"addresses"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HDFSKrbConfig is auth configurations for Kerberos", + Properties: map[string]spec.Schema{ + "krbCCacheSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbKeytabSecret": { + SchemaProps: spec.SchemaProps{ + Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "krbUsername": { + SchemaProps: spec.SchemaProps{ + Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbRealm": { + SchemaProps: spec.SchemaProps{ + Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + "krbConfigConfigMap": { + SchemaProps: spec.SchemaProps{ + Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "krbServicePrincipalName": { + SchemaProps: spec.SchemaProps{ + Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + func schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 3b0bef13553b..22700b5b5ff9 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "hash/fnv" + "strings" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -329,6 +330,9 @@ type ArtifactLocation struct { // Artifactory contains artifactory artifact location details Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty"` + // HDFS contains HDFS artifact location details + HDFS *HDFSArtifact `json:"hdfs,omitempty"` + // Raw contains raw artifact location details Raw *RawArtifact `json:"raw,omitempty"` } @@ -653,6 +657,68 @@ func (a *ArtifactoryArtifact) String() string { return a.URL } +// HDFSArtifact is the location of an HDFS artifact +type HDFSArtifact struct { + HDFSConfig `json:",inline"` + + // Path is a file path in HDFS + Path string `json:"path"` + + // Force copies a file forcibly even if it exists (default: false) + Force bool `json:"force,omitempty"` +} + +// HDFSConfig is configurations for HDFS +type HDFSConfig struct { + HDFSKrbConfig `json:",inline"` + + // Addresses is accessible addresses of HDFS name nodes + Addresses []string `json:"addresses"` + + // HDFSUser is the user to access HDFS file system. + // It is ignored if either ccache or keytab is used. + HDFSUser string `json:"hdfsUser,omitempty"` +} + +// HDFSKrbConfig is auth configurations for Kerberos +type HDFSKrbConfig struct { + // KrbCCacheSecret is the secret selector for Kerberos ccache + // Either ccache or keytab can be set to use Kerberos. + KrbCCacheSecret *apiv1.SecretKeySelector `json:"krbCCacheSecret,omitempty"` + + // KrbKeytabSecret is the secret selector for Kerberos keytab + // Either ccache or keytab can be set to use Kerberos. + KrbKeytabSecret *apiv1.SecretKeySelector `json:"krbKeytabSecret,omitempty"` + + // KrbUsername is the Kerberos username used with Kerberos keytab + // It must be set if keytab is used. + KrbUsername string `json:"krbUsername,omitempty"` + + // KrbRealm is the Kerberos realm used with Kerberos keytab + // It must be set if keytab is used. + KrbRealm string `json:"krbRealm,omitempty"` + + // KrbConfig is the configmap selector for Kerberos config as string + // It must be set if either ccache or keytab is used. + KrbConfigConfigMap *apiv1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty"` + + // KrbServicePrincipalName is the principal name of Kerberos service + // It must be set if either ccache or keytab is used. + KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty"` +} + +func (a *HDFSArtifact) String() string { + var cred string + if a.HDFSUser != "" { + cred = fmt.Sprintf("HDFS user %s", a.HDFSUser) + } else if a.KrbCCacheSecret != nil { + cred = fmt.Sprintf("ccache %v", a.KrbCCacheSecret.Name) + } else if a.KrbKeytabSecret != nil { + cred = fmt.Sprintf("keytab %v (%s/%s)", a.KrbKeytabSecret.Name, a.KrbUsername, a.KrbRealm) + } + return fmt.Sprintf("hdfs://%s/%s with %s", strings.Join(a.Addresses, ", "), a.Path, cred) +} + // RawArtifact allows raw string content to be placed as an artifact in a container type RawArtifact struct { // Data is the string contents of the artifact @@ -840,7 +906,7 @@ func (args *Arguments) GetParameterByName(name string) *Parameter { // HasLocation whether or not an artifact has a location defined func (a *Artifact) HasLocation() bool { - return a.S3 != nil || a.Git != nil || a.HTTP != nil || a.Artifactory != nil || a.Raw != nil + return a.S3 != nil || a.Git != nil || a.HTTP != nil || a.Artifactory != nil || a.Raw != nil || a.HDFS != nil } // GetTemplate retrieves a defined template by its name diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index e8e1f77e4cca..a95538470e8f 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -120,6 +120,11 @@ func (in *ArtifactLocation) DeepCopyInto(out *ArtifactLocation) { *out = new(ArtifactoryArtifact) (*in).DeepCopyInto(*out) } + if in.HDFS != nil { + in, out := &in.HDFS, &out.HDFS + *out = new(HDFSArtifact) + (*in).DeepCopyInto(*out) + } if in.Raw != nil { in, out := &in.Raw, &out.Raw *out = new(RawArtifact) @@ -269,6 +274,76 @@ func (in *GitArtifact) DeepCopy() *GitArtifact { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSArtifact) DeepCopyInto(out *HDFSArtifact) { + *out = *in + in.HDFSConfig.DeepCopyInto(&out.HDFSConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSArtifact. +func (in *HDFSArtifact) DeepCopy() *HDFSArtifact { + if in == nil { + return nil + } + out := new(HDFSArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSConfig) DeepCopyInto(out *HDFSConfig) { + *out = *in + in.HDFSKrbConfig.DeepCopyInto(&out.HDFSKrbConfig) + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSConfig. +func (in *HDFSConfig) DeepCopy() *HDFSConfig { + if in == nil { + return nil + } + out := new(HDFSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSKrbConfig) DeepCopyInto(out *HDFSKrbConfig) { + *out = *in + if in.KrbCCacheSecret != nil { + in, out := &in.KrbCCacheSecret, &out.KrbCCacheSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.KrbKeytabSecret != nil { + in, out := &in.KrbKeytabSecret, &out.KrbKeytabSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.KrbConfigConfigMap != nil { + in, out := &in.KrbConfigConfigMap, &out.KrbConfigConfigMap + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSKrbConfig. +func (in *HDFSKrbConfig) DeepCopy() *HDFSKrbConfig { + if in == nil { + return nil + } + out := new(HDFSKrbConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPArtifact) DeepCopyInto(out *HTTPArtifact) { *out = *in diff --git a/workflow/artifacts/hdfs/hdfs.go b/workflow/artifacts/hdfs/hdfs.go new file mode 100644 index 000000000000..2209fc0367bb --- /dev/null +++ b/workflow/artifacts/hdfs/hdfs.go @@ -0,0 +1,217 @@ +package hdfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/argoproj/pkg/file" + "gopkg.in/jcmturner/gokrb5.v5/credentials" + "gopkg.in/jcmturner/gokrb5.v5/keytab" + + "github.com/argoproj/argo/errors" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util" + "github.com/argoproj/argo/workflow/common" +) + +// ArtifactDriver is a driver for HDFS +type ArtifactDriver struct { + Addresses []string // comma-separated name nodes + Path string + Force bool + HDFSUser string + KrbOptions *KrbOptions +} + +// KrbOptions is options for Kerberos +type KrbOptions struct { + CCacheOptions *CCacheOptions + KeytabOptions *KeytabOptions + Config string + ServicePrincipalName string +} + +// CCacheOptions is options for ccache +type CCacheOptions struct { + CCache credentials.CCache +} + +// KeytabOptions is options for keytab +type KeytabOptions struct { + Keytab keytab.Keytab + Username string + Realm string +} + +// ValidateArtifact validates HDFS artifact +func ValidateArtifact(errPrefix string, art *wfv1.HDFSArtifact) error { + if len(art.Addresses) == 0 { + return errors.Errorf(errors.CodeBadRequest, "%s.addresses is required", errPrefix) + } + if art.Path == "" { + return errors.Errorf(errors.CodeBadRequest, "%s.path is required", errPrefix) + } + if !filepath.IsAbs(art.Path) { + return errors.Errorf(errors.CodeBadRequest, "%s.path must be a absolute file path", errPrefix) + } + + hasKrbCCache := art.KrbCCacheSecret != nil + hasKrbKeytab := art.KrbKeytabSecret != nil + + if art.HDFSUser == "" && !hasKrbCCache && !hasKrbKeytab { + return errors.Errorf(errors.CodeBadRequest, "either %s.hdfsUser, %s.krbCCacheSecret or %s.krbKeytabSecret is required", errPrefix, errPrefix, errPrefix) + } + if hasKrbKeytab && (art.KrbServicePrincipalName == "" || art.KrbConfigConfigMap == nil || art.KrbUsername == "" || art.KrbRealm == "") { + return errors.Errorf(errors.CodeBadRequest, "%s.krbServicePrincipalName, %s.krbConfigConfigMap, %s.krbUsername and %s.krbRealm are required with %s.krbKeytabSecret", errPrefix, errPrefix, errPrefix, errPrefix, errPrefix) + } + if hasKrbCCache && (art.KrbServicePrincipalName == "" || art.KrbConfigConfigMap == nil) { + return errors.Errorf(errors.CodeBadRequest, "%s.krbServicePrincipalName and %s.krbConfigConfigMap are required with %s.krbCCacheSecret", errPrefix, errPrefix, errPrefix) + } + return nil +} + +// CreateDriver constructs ArtifactDriver +func CreateDriver(ci common.ResourceInterface, art *wfv1.HDFSArtifact) (*ArtifactDriver, error) { + var krbConfig string + var krbOptions *KrbOptions + var err error + + namespace := ci.GetNamespace() + + if art.KrbConfigConfigMap != nil && art.KrbConfigConfigMap.Name != "" { + krbConfig, err = ci.GetConfigMapKey(namespace, art.KrbConfigConfigMap.Name, art.KrbConfigConfigMap.Key) + if err != nil { + return nil, err + } + } + if art.KrbCCacheSecret != nil && art.KrbCCacheSecret.Name != "" { + bytes, err := ci.GetSecrets(namespace, art.KrbCCacheSecret.Name, art.KrbCCacheSecret.Key) + if err != nil { + return nil, err + } + ccache, err := credentials.ParseCCache(bytes) + if err != nil { + return nil, err + } + krbOptions = &KrbOptions{ + CCacheOptions: &CCacheOptions{ + CCache: ccache, + }, + Config: krbConfig, + ServicePrincipalName: art.KrbServicePrincipalName, + } + } + if art.KrbKeytabSecret != nil && art.KrbKeytabSecret.Name != "" { + bytes, err := ci.GetSecrets(namespace, art.KrbKeytabSecret.Name, art.KrbKeytabSecret.Key) + if err != nil { + return nil, err + } + ktb, err := keytab.Parse(bytes) + if err != nil { + return nil, err + } + krbOptions = &KrbOptions{ + KeytabOptions: &KeytabOptions{ + Keytab: ktb, + Username: art.KrbUsername, + Realm: art.KrbRealm, + }, + Config: krbConfig, + ServicePrincipalName: art.KrbServicePrincipalName, + } + } + + driver := ArtifactDriver{ + Addresses: art.Addresses, + Path: art.Path, + Force: art.Force, + HDFSUser: art.HDFSUser, + KrbOptions: krbOptions, + } + return &driver, nil +} + +// Load downloads artifacts from HDFS compliant storage +func (driver *ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error { + hdfscli, err := createHDFSClient(driver.Addresses, driver.HDFSUser, driver.KrbOptions) + if err != nil { + return err + } + defer util.Close(hdfscli) + + srcStat, err := hdfscli.Stat(driver.Path) + if err != nil { + return err + } + if srcStat.IsDir() { + return fmt.Errorf("HDFS artifact does not suppot directory copy") + } + + _, err = os.Stat(path) + if err != nil && !os.IsNotExist(err) { + return err + } + + if os.IsNotExist(err) { + dirPath := filepath.Dir(driver.Path) + if dirPath != "." && dirPath != "/" { + // Follow umask for the permission + err = os.MkdirAll(dirPath, 0777) + if err != nil { + return err + } + } + } else { + if driver.Force { + err = os.Remove(path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + } + + return hdfscli.CopyToLocal(driver.Path, path) +} + +// Save saves an artifact to HDFS compliant storage +func (driver *ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { + hdfscli, err := createHDFSClient(driver.Addresses, driver.HDFSUser, driver.KrbOptions) + if err != nil { + return err + } + defer util.Close(hdfscli) + + isDir, err := file.IsDirectory(path) + if err != nil { + return err + } + if isDir { + return fmt.Errorf("HDFS artifact does not suppot directory copy") + } + + _, err = hdfscli.Stat(driver.Path) + if err != nil && !os.IsNotExist(err) { + return err + } + + if os.IsNotExist(err) { + dirPath := filepath.Dir(driver.Path) + if dirPath != "." && dirPath != "/" { + // Follow umask for the permission + err = hdfscli.MkdirAll(dirPath, 0777) + if err != nil { + return err + } + } + } else { + if driver.Force { + err = hdfscli.Remove(driver.Path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + } + + return hdfscli.CopyToRemote(path, driver.Path) +} diff --git a/workflow/artifacts/hdfs/util.go b/workflow/artifacts/hdfs/util.go new file mode 100644 index 000000000000..3af330ae012e --- /dev/null +++ b/workflow/artifacts/hdfs/util.go @@ -0,0 +1,53 @@ +package hdfs + +import ( + "fmt" + + "github.com/colinmarc/hdfs" + krb "gopkg.in/jcmturner/gokrb5.v5/client" + "gopkg.in/jcmturner/gokrb5.v5/config" +) + +func createHDFSClient(addresses []string, user string, krbOptions *KrbOptions) (*hdfs.Client, error) { + options := hdfs.ClientOptions{ + Addresses: addresses, + } + + if krbOptions != nil { + krbClient, err := createKrbClient(krbOptions) + if err != nil { + return nil, err + } + options.KerberosClient = krbClient + options.KerberosServicePrincipleName = krbOptions.ServicePrincipalName + } else { + options.User = user + } + + return hdfs.NewClient(options) +} + +func createKrbClient(krbOptions *KrbOptions) (*krb.Client, error) { + krbConfig, err := config.NewConfigFromString(krbOptions.Config) + if err != nil { + return nil, err + } + + if krbOptions.CCacheOptions != nil { + client, err := krb.NewClientFromCCache(krbOptions.CCacheOptions.CCache) + if err != nil { + return nil, err + } + return client.WithConfig(krbConfig), nil + } else if krbOptions.KeytabOptions != nil { + client := krb.NewClientWithKeytab(krbOptions.KeytabOptions.Username, krbOptions.KeytabOptions.Realm, krbOptions.KeytabOptions.Keytab) + client = *client.WithConfig(krbConfig) + err = client.Login() + if err != nil { + return nil, err + } + return &client, nil + } + + return nil, fmt.Errorf("Failed to get a Kerberos client") +} diff --git a/workflow/common/common.go b/workflow/common/common.go index ce5ce1602097..7b326186464e 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -119,3 +119,9 @@ type ExecutionControl struct { // used to support workflow or steps/dag level timeouts. Deadline *time.Time `json:"deadline,omitempty"` } + +type ResourceInterface interface { + GetNamespace() string + GetSecrets(namespace, name, key string) ([]byte, error) + GetConfigMapKey(namespace, name, key string) (string, error) +} diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 52ff479a66a6..31874163b581 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -70,6 +70,8 @@ type ArtifactRepository struct { S3 *S3ArtifactRepository `json:"s3,omitempty"` // Artifactory stores artifacts to JFrog Artifactory Artifactory *ArtifactoryArtifactRepository `json:"artifactory,omitempty"` + // HDFS stores artifacts in HDFS + HDFS *HDFSArtifactRepository `json:"hdfs,omitempty"` } // S3ArtifactRepository defines the controller configuration for an S3 artifact repository @@ -91,6 +93,17 @@ type ArtifactoryArtifactRepository struct { RepoURL string `json:"repoURL,omitempty"` } +// HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository +type HDFSArtifactRepository struct { + wfv1.HDFSConfig `json:",inline"` + + // PathFormat is defines the format of path to store a file. Can reference workflow variables + PathFormat string `json:"pathFormat,omitempty"` + + // Force copies a file forcibly even if it exists (default: false) + Force bool `json:"force,omitempty"` +} + // ResyncConfig reloads the controller config from the configmap func (wfc *WorkflowController) ResyncConfig() error { cmClient := wfc.kubeclientset.CoreV1().ConfigMaps(wfc.namespace) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 9bbc803b167f..4c47e23b3b8d 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -548,7 +548,7 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat ArchiveLogs: woc.controller.Config.ArtifactRepository.ArchiveLogs, } } - if tmpl.ArchiveLocation.S3 != nil || tmpl.ArchiveLocation.Artifactory != nil { + if tmpl.ArchiveLocation.S3 != nil || tmpl.ArchiveLocation.Artifactory != nil || tmpl.ArchiveLocation.HDFS != nil { // User explicitly set the location. nothing else to do. return nil } @@ -584,6 +584,13 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat ArtifactoryAuth: woc.controller.Config.ArtifactRepository.Artifactory.ArtifactoryAuth, URL: artURL, } + } else if hdfsLocation := woc.controller.Config.ArtifactRepository.HDFS; hdfsLocation != nil { + log.Debugf("Setting HDFS artifact repository information") + tmpl.ArchiveLocation.HDFS = &wfv1.HDFSArtifact{ + HDFSConfig: hdfsLocation.HDFSConfig, + Path: hdfsLocation.PathFormat, + Force: hdfsLocation.Force, + } } else { for _, art := range tmpl.Outputs.Artifacts { if !art.HasLocation() { diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 017d89dfad09..cc55964e265c 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -25,6 +25,7 @@ import ( artifact "github.com/argoproj/argo/workflow/artifacts" "github.com/argoproj/argo/workflow/artifacts/artifactory" "github.com/argoproj/argo/workflow/artifacts/git" + "github.com/argoproj/argo/workflow/artifacts/hdfs" "github.com/argoproj/argo/workflow/artifacts/http" "github.com/argoproj/argo/workflow/artifacts/raw" "github.com/argoproj/argo/workflow/artifacts/s3" @@ -40,6 +41,8 @@ import ( // WorkflowExecutor is program which runs as the init/wait container type WorkflowExecutor struct { + common.ResourceInterface + PodName string Template wfv1.Template ClientSet kubernetes.Interface @@ -50,8 +53,10 @@ type WorkflowExecutor struct { // memoized container ID to prevent multiple lookups mainContainerID string + // memoized configmaps + memoizedConfigMaps map[string]string // memoized secrets - memoizedSecrets map[string]string + memoizedSecrets map[string][]byte // list of errors that occurred during execution. // the first of these is used as the overall message of the node errors []error @@ -87,7 +92,8 @@ func NewExecutor(clientset kubernetes.Interface, podName, namespace, podAnnotati Namespace: namespace, PodAnnotationsPath: podAnnotationsPath, RuntimeExecutor: cre, - memoizedSecrets: map[string]string{}, + memoizedConfigMaps: map[string]string{}, + memoizedSecrets: map[string][]byte{}, errors: []error{}, } } @@ -253,6 +259,10 @@ func (we *WorkflowExecutor) saveArtifact(tempOutArtDir string, mainCtrID string, } artifactoryURL.Path = path.Join(artifactoryURL.Path, fileName) art.Artifactory.URL = artifactoryURL.String() + } else if we.Template.ArchiveLocation.HDFS != nil { + shallowCopy := *we.Template.ArchiveLocation.HDFS + art.HDFS = &shallowCopy + art.HDFS.Path = path.Join(art.HDFS.Path, fileName) } else { return errors.Errorf(errors.CodeBadRequest, "Unable to determine path to store %s. Archive location provided no information", art.Name) } @@ -393,6 +403,10 @@ func (we *WorkflowExecutor) SaveLogs() (*wfv1.Artifact, error) { } artifactoryURL.Path = path.Join(artifactoryURL.Path, fileName) art.Artifactory.URL = artifactoryURL.String() + } else if we.Template.ArchiveLocation.HDFS != nil { + shallowCopy := *we.Template.ArchiveLocation.HDFS + art.HDFS = &shallowCopy + art.HDFS.Path = path.Join(art.HDFS.Path, fileName) } else { return nil, errors.Errorf(errors.CodeBadRequest, "Unable to determine path to store %s. Archive location provided no information", art.Name) } @@ -415,15 +429,16 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv var secretKey string if art.S3.AccessKeySecret.Name != "" { - var err error - accessKey, err = we.getSecrets(we.Namespace, art.S3.AccessKeySecret.Name, art.S3.AccessKeySecret.Key) + accessKeyBytes, err := we.GetSecrets(we.Namespace, art.S3.AccessKeySecret.Name, art.S3.AccessKeySecret.Key) if err != nil { return nil, err } - secretKey, err = we.getSecrets(we.Namespace, art.S3.SecretKeySecret.Name, art.S3.SecretKeySecret.Key) + accessKey = string(accessKeyBytes) + secretKeyBytes, err := we.GetSecrets(we.Namespace, art.S3.SecretKeySecret.Name, art.S3.SecretKeySecret.Key) if err != nil { return nil, err } + secretKey = string(secretKeyBytes) } driver := s3.S3ArtifactDriver{ @@ -441,45 +456,48 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv if art.Git != nil { gitDriver := git.GitArtifactDriver{} if art.Git.UsernameSecret != nil { - username, err := we.getSecrets(we.Namespace, art.Git.UsernameSecret.Name, art.Git.UsernameSecret.Key) + usernameBytes, err := we.GetSecrets(we.Namespace, art.Git.UsernameSecret.Name, art.Git.UsernameSecret.Key) if err != nil { return nil, err } - gitDriver.Username = username + gitDriver.Username = string(usernameBytes) } if art.Git.PasswordSecret != nil { - password, err := we.getSecrets(we.Namespace, art.Git.PasswordSecret.Name, art.Git.PasswordSecret.Key) + passwordBytes, err := we.GetSecrets(we.Namespace, art.Git.PasswordSecret.Name, art.Git.PasswordSecret.Key) if err != nil { return nil, err } - gitDriver.Password = password + gitDriver.Password = string(passwordBytes) } if art.Git.SSHPrivateKeySecret != nil { - sshPrivateKey, err := we.getSecrets(we.Namespace, art.Git.SSHPrivateKeySecret.Name, art.Git.SSHPrivateKeySecret.Key) + sshPrivateKeyBytes, err := we.GetSecrets(we.Namespace, art.Git.SSHPrivateKeySecret.Name, art.Git.SSHPrivateKeySecret.Key) if err != nil { return nil, err } - gitDriver.SSHPrivateKey = sshPrivateKey + gitDriver.SSHPrivateKey = string(sshPrivateKeyBytes) } return &gitDriver, nil } if art.Artifactory != nil { - username, err := we.getSecrets(we.Namespace, art.Artifactory.UsernameSecret.Name, art.Artifactory.UsernameSecret.Key) + usernameBytes, err := we.GetSecrets(we.Namespace, art.Artifactory.UsernameSecret.Name, art.Artifactory.UsernameSecret.Key) if err != nil { return nil, err } - password, err := we.getSecrets(we.Namespace, art.Artifactory.PasswordSecret.Name, art.Artifactory.PasswordSecret.Key) + passwordBytes, err := we.GetSecrets(we.Namespace, art.Artifactory.PasswordSecret.Name, art.Artifactory.PasswordSecret.Key) if err != nil { return nil, err } driver := artifactory.ArtifactoryArtifactDriver{ - Username: username, - Password: password, + Username: string(usernameBytes), + Password: string(passwordBytes), } return &driver, nil } + if art.HDFS != nil { + return hdfs.CreateDriver(we, art.HDFS) + } if art.Raw != nil { return &raw.RawArtifactDriver{}, nil } @@ -508,8 +526,48 @@ func (we *WorkflowExecutor) getPod() (*apiv1.Pod, error) { return pod, nil } -// getSecrets retrieves a secret value and memoizes the result -func (we *WorkflowExecutor) getSecrets(namespace, name, key string) (string, error) { +// GetNamespace returns the namespace +func (we *WorkflowExecutor) GetNamespace() string { + return we.Namespace +} + +// GetConfigMapKey retrieves a configmap value and memoizes the result +func (we *WorkflowExecutor) GetConfigMapKey(namespace, name, key string) (string, error) { + cachedKey := fmt.Sprintf("%s/%s/%s", namespace, name, key) + if val, ok := we.memoizedConfigMaps[cachedKey]; ok { + return val, nil + } + configmapsIf := we.ClientSet.CoreV1().ConfigMaps(namespace) + var configmap *apiv1.ConfigMap + var err error + _ = wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) { + configmap, err = configmapsIf.Get(name, metav1.GetOptions{}) + if err != nil { + log.Warnf("Failed to get configmap '%s': %v", name, err) + if !retry.IsRetryableKubeAPIError(err) { + return false, err + } + return false, nil + } + return true, nil + }) + if err != nil { + return "", errors.InternalWrapError(err) + } + // memoize all keys in the configmap since it's highly likely we will need to get a + // subsequent key in the configmap (e.g. username + password) and we can save an API call + for k, v := range configmap.Data { + we.memoizedConfigMaps[fmt.Sprintf("%s/%s/%s", namespace, name, k)] = v + } + val, ok := we.memoizedConfigMaps[cachedKey] + if !ok { + return "", errors.Errorf(errors.CodeBadRequest, "configmap '%s' does not have the key '%s'", name, key) + } + return val, nil +} + +// GetSecrets retrieves a secret value and memoizes the result +func (we *WorkflowExecutor) GetSecrets(namespace, name, key string) ([]byte, error) { cachedKey := fmt.Sprintf("%s/%s/%s", namespace, name, key) if val, ok := we.memoizedSecrets[cachedKey]; ok { return val, nil @@ -529,16 +587,16 @@ func (we *WorkflowExecutor) getSecrets(namespace, name, key string) (string, err return true, nil }) if err != nil { - return "", errors.InternalWrapError(err) + return []byte{}, errors.InternalWrapError(err) } // memoize all keys in the secret since it's highly likely we will need to get a // subsequent key in the secret (e.g. username + password) and we can save an API call for k, v := range secret.Data { - we.memoizedSecrets[fmt.Sprintf("%s/%s/%s", namespace, name, k)] = string(v) + we.memoizedSecrets[fmt.Sprintf("%s/%s/%s", namespace, name, k)] = v } val, ok := we.memoizedSecrets[cachedKey] if !ok { - return "", errors.Errorf(errors.CodeBadRequest, "secret '%s' does not have the key '%s'", name, key) + return []byte{}, errors.Errorf(errors.CodeBadRequest, "secret '%s' does not have the key '%s'", name, key) } return val, nil } diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 83d813b7a485..8e39608af0a5 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -10,6 +10,7 @@ import ( "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/artifacts/hdfs" "github.com/argoproj/argo/workflow/common" "github.com/valyala/fasttemplate" apivalidation "k8s.io/apimachinery/pkg/util/validation" @@ -197,6 +198,12 @@ func validateArtifactLocation(errPrefix string, art wfv1.Artifact) error { return errors.Errorf(errors.CodeBadRequest, "%s.git.repo is required", errPrefix) } } + if art.HDFS != nil { + err := hdfs.ValidateArtifact(fmt.Sprintf("%s.hdfs", errPrefix), art.HDFS) + if err != nil { + return err + } + } // TODO: validate other artifact locations return nil } From 864c7090a0bfcaa12237ff6e894a9d26ab463a7a Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 12 Jan 2019 13:45:33 +0900 Subject: [PATCH 050/145] Update codegen for network config (#1168) --- api/openapi-spec/swagger.json | 8 ++++++++ pkg/apis/workflow/v1alpha1/openapi_generated.go | 14 ++++++++++++++ .../workflow/v1alpha1/zz_generated.deepcopy.go | 10 ++++++++++ 3 files changed, 32 insertions(+) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 221876f56de6..023aec707924 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1083,10 +1083,18 @@ "description": "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{workflow.parameters.myparam}}", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments" }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "type": "string" + }, "entrypoint": { "description": "Entrypoint is a template reference to the starting point of the workflow", "type": "string" }, + "hostNetwork": { + "description": "Host networking requested for this workflow pod. Default to false.", + "type": "boolean" + }, "imagePullSecrets": { "description": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "type": "array", diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 81f606863664..47392e025d05 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -2086,6 +2086,20 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "Host networking requested for this workflow pod. Default to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Type: []string{"string"}, + Format: "", + }, + }, "onExit": { SchemaProps: spec.SchemaProps{ Description: "OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary workflow.", diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index a95538470e8f..72e00d152e1c 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -979,6 +979,16 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { *out = make([]v1.LocalObjectReference, len(*in)) copy(*out, *in) } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(v1.DNSPolicy) + **out = **in + } if in.TTLSecondsAfterFinished != nil { in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished *out = new(int32) From 649d64d1bd375f779cd150446bddce94582067d2 Mon Sep 17 00:00:00 2001 From: Hamel Husain Date: Mon, 14 Jan 2019 02:16:15 -0800 Subject: [PATCH 051/145] Add GitHub to users in README.md (#1151) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1db20285c04f..77fb0a68ab44 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ Currently **officially** using Argo: 1. [Cyrus Biotechnology](https://cyrusbio.com/) 1. [Datadog](https://www.datadoghq.com/) 1. [Gladly](https://gladly.com/) +1. [GitHub](https://github.com/) 1. [Google](https://www.google.com/intl/en/about/our-company/) 1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) 1. [Intuit](https://www.intuit.com/) From 0b41ca0a2410b01205712a2186dd12851eecb707 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Tue, 15 Jan 2019 00:54:04 +0900 Subject: [PATCH 052/145] Add Preferred Networks to users in README.md (#1172) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 77fb0a68ab44..50255b425e91 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ Currently **officially** using Argo: 1. [KintoHub](https://www.kintohub.com/) 1. [Localytics](https://www.localytics.com/) 1. [NVIDIA](https://www.nvidia.com/) +1. [Preferred Networks](https://www.preferred-networks.jp/en/) 1. [SAP Hybris](https://cx.sap.com/) 1. [Styra](https://www.styra.com/) 1. [Quantibio](http://quantibio.com/us/en/) From 528e8f803683ee462ccc05fc9b00dc57858c0e93 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Mon, 14 Jan 2019 08:00:28 -0800 Subject: [PATCH 053/145] Add missing patch in namespace kustomization.yaml (#1170) --- manifests/namespace-install/kustomization.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/manifests/namespace-install/kustomization.yaml b/manifests/namespace-install/kustomization.yaml index 9b33fa715a44..5533dbdabe13 100644 --- a/manifests/namespace-install/kustomization.yaml +++ b/manifests/namespace-install/kustomization.yaml @@ -10,3 +10,6 @@ resources: - ./03c_argo-ui-rolebinding.yaml - ../base/03d_argo-ui-deployment.yaml - ../base/03e_argo-ui-service.yaml + +patches: +- ./overlays/03d_argo-ui-deployment.yaml From 521eb25aeb2b8351d72bad4a3d3aa2d1fa55eb23 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Tue, 15 Jan 2019 03:05:47 +0900 Subject: [PATCH 054/145] Validate ArchiveLocation artifacts (#1167) --- workflow/validate/validate.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 8e39608af0a5..95524577d8ea 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -133,6 +133,12 @@ func (ctx *wfValidationCtx) validateTemplate(tmpl *wfv1.Template, args wfv1.Argu if err != nil { return err } + if tmpl.ArchiveLocation != nil { + err = validateArtifactLocation("templates.archiveLocation", *tmpl.ArchiveLocation) + if err != nil { + return err + } + } return nil } @@ -184,7 +190,7 @@ func validateInputs(tmpl *wfv1.Template) (map[string]interface{}, error) { return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.from not valid in inputs", tmpl.Name, artRef) } errPrefix := fmt.Sprintf("templates.%s.%s", tmpl.Name, artRef) - err = validateArtifactLocation(errPrefix, art) + err = validateArtifactLocation(errPrefix, art.ArtifactLocation) if err != nil { return nil, err } @@ -192,7 +198,7 @@ func validateInputs(tmpl *wfv1.Template) (map[string]interface{}, error) { return scope, nil } -func validateArtifactLocation(errPrefix string, art wfv1.Artifact) error { +func validateArtifactLocation(errPrefix string, art wfv1.ArtifactLocation) error { if art.Git != nil { if art.Git.Repo == "" { return errors.Errorf(errors.CodeBadRequest, "%s.git.repo is required", errPrefix) From 5a917140cb56a27e7b6f3b1d5068f4838863c273 Mon Sep 17 00:00:00 2001 From: Edward Lee Date: Mon, 14 Jan 2019 14:38:30 -0800 Subject: [PATCH 055/145] Update README and preview notice in CLA. --- README.md | 29 +++++++++++++++++++---------- community/Argo Individual CLA.pdf | Bin 65876 -> 60326 bytes 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 50255b425e91..6b3890b949c9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) -# Argo - The Workflow Engine for Kubernetes +# ArgoProj - Get stuff done with Kubernetes ![Argo Image](argo.png) @@ -10,22 +10,32 @@ We are thrilled that BlackRock has developed an eventing framework for Argo and If you actively use Argo in your organization and believe that your organization may be interested in actively participating in the Argo Community, please ask a representative to contact saradhi_sreegiriraju@intuit.com for additional information. -## What is Argo? -Argo is an open source container-native workflow engine for getting work done on Kubernetes. Argo is implemented as a Kubernetes CRD (Custom Resource Definition). +## What is ArgoProj? + +ArgoProj is a collection of tools for getting work done with Kubernetes. +* [Argo Workflows](https://github.com/argoproj/argo) - Container-native Workflow Engine +* [Argo CD](https://github.com/argoproj/argo-cd) - Declarative GitOps Continuous Delivery +* [Argo Events](https://github.com/argoproj/argo-events) - Event-based Dependency Manager +* [Argo CI](https://github.com/argoproj/argo-ci) - Simple CI based on GitHUb and Argo Workflows + + +## What is Argo Workflows? +Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). * Define workflows where each step in the workflow is a container. * Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a graph (DAG). -* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo workflows on Kubernetes. +* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes. * Run CI/CD pipelines natively on Kubernetes without configuring complex software development products. -## Why Argo? -* Argo is designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments. -* Argo is cloud agnostic and can run on any kubernetes cluster. -* Argo with Kubernetes puts a cloud-scale supercomputer at your fingertips. +## Why Argo Workflows? +* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments. +* Cloud agnostic and can run on any kubernetes cluster. +* Easily ochestrate highly parallel jobs on Kubernets. +* Argo Workflows puts a cloud-scale supercomputer at your fingertips! ## Documentation * [Get started here](demo.md) -* [How to write Argo workflow specs](examples/README.md) +* [How to write Argo Workflow specs](examples/README.md) * [How to configure your artifact repository](ARTIFACT_REPO.md) ## Features @@ -85,4 +95,3 @@ Currently **officially** using Argo: * Argo GitHub: https://github.com/argoproj * Argo Slack: [click here to join](https://join.slack.com/t/argoproj/shared_invite/enQtMzExODU3MzIyNjYzLTA5MTFjNjI0Nzg3NzNiMDZiNmRiODM4Y2M1NWQxOGYzMzZkNTc1YWVkYTZkNzdlNmYyZjMxNWI3NjY2MDc1MzI) * Argo website: https://argoproj.github.io/ -* Argo forum: https://groups.google.com/forum/#!forum/argoproj diff --git a/community/Argo Individual CLA.pdf b/community/Argo Individual CLA.pdf index f91c4a5a3048e0671f00c91822fa4054a586cf50..e25d08bc473881d9a80b6fcd9feb5f4defa96421 100644 GIT binary patch delta 25818 zcmZ^}V{jmE@U|OgV<#JD!;Nijw6SeF8ygebwrwXH+qP}nIQ#p*=bWndU~1~>>1Xcg z>go?uHTN^~eh@`V;3#Br!lJZ{bgZysT~B$Ru#8Nx$aoA4%uH-V^kPJsMC>f=M2xIV zMA}63vP6vk7Db3y*qAukiRfjBSc&MBh*&u}IXPmr+`(ZPnPXQ7K{?`m-mv14L4N^^ zOhin~|2-)F$HeBpO9mnZB6 zkD6W{+!f&BlJ#BYJ((h&Hl4GQ?# zL4_=XlLl0Q!I0P?tKy3lDErMnFfuT{UO`ft=#gfEfXJIeDj8Kw!fD zPtP_67O*@#|N7N8HFh9kVvipKBLroQ-`OQHRwQC$CZd-z{$uKB_HS%Ne0;DDj`qg- z*08P_hI)E>V0laMGcclnOu<=9s1o2_uu%PVmiQqr~u@jt}kK=!a$;U@T zFKYA0k?6nvME}kH|D3TXGZ7Q(|E2g(r08hxWaucTZ*Tm^(Se8|UVjoRUVfMoU}528 z`ro>~&|Zt$Z$k0Pu0B2jNg608CHl6F@9~kL=j$neiu-LF#O{+Yq*aqy`p?<@d~6u8 zDX&gH23CG80b#d3%$$j>V zI{Wi>rqkX*$2;(Km^^Xveq_U+UG;VIRxP{rwVv($v2~E1p9!4h^8%O2&|MtNgJ<+W z<`>Pw$7(_NF?!iU zw$6`}F&o)Q*}Jcom##0jSBEXlujIL8fFFBH9XIIn9cVoBN2Wsxnj8mAvq0&)-UzGc zQ~pGd%WQ6J)*Opp(a8$3uORpFjOh??BqtCA|ER#anuD%E|JpkUy@FzYeVJ>9Ch;WG z$-dW)@XiVnXY4x|k<~>cARsh6YEQ?_fS;WOf>j4o?zBSZ#KhEs1WStDX3%oD?gZ;)~`n7TC;e(=&83OR%REriZG5<(sDa?X+%u zwCAXiFj=fK|0bUfuqux?br#w>5%^>(olT@y*S;lN%V~mFy=vh$>a6OP#E*xOX#@wu zAamWJoSERtD}LV|z}(>bc>8J*znV+#;?vYVpwaV|>#LcJbA?yLf(Cx<}M9F7ssWwj09maPs~2XB$v6 zzLv=DtwEk!pD%7QKa1c^K+TNsgVQ~0Nmp$$ATjUvH3;Sog%TSL35`vJA)yy?Lhceg z(0P7fs6*c!p-&0o) zsdx40?gldk!{T%oY52JT`+5aE4UF`kP&CAm-@U%s@BKx`hjM|wybKQ#p?S)DBl!KX zv(lpzg<^oYY|2t{Pg9dKw1bRLl0Cf+VA1Xvo$K-Ew(bsqi5mQMuYfN#^&ruXqN8*Z zvxLfN{ZUf5Yky7KWpy262Ysez|_m z;RH{{vw%o5jP6o|{)KpFO`cH=1rt?)WUl*Cetw^@gMl-9t4GPpV1&&2er=Bdz~wjh z@mkk{UFtO=rv(?Tg-8rtCtlL7_{KHa?xcJ_hkt{3%q#F+$&F5;OkYyVM_AxQt zQu_NN|B+Nt)>yOR9w!j+M6AUc%yXKdE7n{!`zCr4;P@Ki8W4Ydmc%E9R%^37#` z?8Doju3Tctp^#{@SwV&qV@a~s1`_r$|BTr<86-XWD$|-dJdn-gu4&<*cPpUeO5=trY&;CCCCI}d(7z|_q(Bsi- zSLzb*z+Dbmafe*wJyqH&AY@{Yy;(?1^4Bs%a5eM)9pUu?D^Su4*Vt;Nc1X`&P}3CdiS zgPV9TDAOF>_|(kOh0}=?ip)qXSUni3{l2pw_n$}E2BreKkwZf&04=WLmip0AGTO2V z{;yV5tf{n4AQd|BSg}Q=Ju|(q#Rq2BkKu0%1e&3_#ia8sKbf= zHbihgXywnSZeblw;xD;P+^9y6oqU<_I`_V)g@jUcd*9`I%4@15Cp7bsf?QjLx{LZ8 z2w$OZouMx}0ZrmR0o6Hu|HkdSx6ilRZ!cOHE>W53uWssOdewZF4dopw!(}wLwlO<0 z@(?3@W(CXen5zaJY`df0F zePCbsT#KtLC8C6uWU&y?mSi}>+RQf_`o37O#cJcDxrw4Ygj|s=UAF|fXI-T^PDV^GKe0igEPT1M| zdM-p_H*nU(0RGzjLH3WaR{H%WTi{LE;2%ypOWFmWx6RKw20^$z&INDPfPps2`aMg+ z^Nw^0A#0k9-tNInv!F%jEd=Z?M#lQ|b;Si(9&xM{io1v<08!r7R5M(n8`x%)*n(4kQMxgz{C;8VNew%(Uo=>l<)@blI z!K-bE9l>Nxlu)D|%z{(%8nV1G!MyjMwqBc%^p7tECXbwWyJBrvm_t10D599-xMd4? zDu{On2@XOhZZWt+qyD%m=3$NO_&F$Yo{ZfvKC6ST*6mJLm4U~H*qqwPnKnb;xA`ML zgTI64_ybT7{52aXetC_}h%>N?%vKVlRz`n%VkYyDX2;upAww6Jz)9SxIJ)Si!<3yj zSDr!08qXg1sjgKVn<}hl3OEA6qMcqo(nVjjaOT#wf}jcP2r*Q7o~$TVX-!_py1qHq zZUef!2*K=rr&Oa=I`ufu&K2XI3!^lxKFNYjF+imMJk&*=e8?l!enDiZ3=X{E98bIK z^dFuYt5A}bpH$gP`MHrpX|c9(9lSQpNorKqdW9J9V`wx;lN4El;71cdwQ~+e2Y2ix zq0}Au*Mms3lg-UthlbyoIjS-Ge!^YGh}jY*eJ@1K=bFgwA_bGo(eavpv8Za^vnIL= zzW|CK{#048@DZQQqoDX$gxCt@WT&|tbqb@ry(8@$3TV1Rq%&4G!%WeHDq#f-Rrmzp zbZN#<(fm1EBhnHj@zk>)uoZ>dxyw;N`ien>Tt0-w9jZ4Yw%Pj`a=W{MXJ(}p8?U^k zEgz}IRzE@;&PHmi0&%odok>li8qScZ!vgera$jcV>S_})J%Hng{K^u@gX)?SNAVNB z*Av1+q#cHnuQ4$J+2jQ4{1q&6vp{<=+B&9ENKv6qj!Ro=tjR+c@O!&$T^M#HUPK%f zk&Pl8HM0}f(q5T~^1i68XdmVRf-}Gw53;XZdnt#P^DYq8`pQ{sNiyj-Y8n`d>;O=V zv1PlT_(W9E`fdEUZ3l;zV6}EH*m-!v7W~XyC!P^oV8WJE2}(*7p6+4usk$7nw(JB-?C+!fS!J7CU}P zR$h`$@+kN>tfs}urslYdq;9#HZ4_WqZj`gbb)sEY=MAxD>=0lga)dx;?$Z}=W>L6o zkDX8!d1}zb<%3P+i+%ONq7nRk@`v4^2ULmp6AuVGvc`|U$e&HYW=keYQ3}K`e+j+( zDf1N6h+(DZ_i$G7u&w-^FB=j*oJRAI`c)xrZvH@-QJ66veQ*oTa%6*Ln8pG5g$2VZ zWY-5m>_I11?ok&E8#jq3pCLjNILjqaF~o$65wBJxxcSH3C?gA}Gd@;93%i1x?GFh^ zGV)TP`NaY+SzIyDaRb<71pc-6tDPgYW0i4}1cB=enp3HX{4=B7&gZkJflu?}Y#7C* z;r^Us*aOb(=)2_$IB3qeVJoYH$tg z7pdc=Z+1&iKzG^fZ>Aw|`BqoY&(P;c^sQglueRy;-mnvg7%bVw`Ol|U##8)y|&{e=zOO=a>EeR3Qqc!j@ng;Yv+Eu+f*G%Zfmr*+| zGMON4I7WU(Uj!7w`_-W1c!w5Q**!l;ISoP(#hgZ{-nZOLFnCu_ zs~6gK+5w&w%;cP0h!8PcIe{cY+c!PtO6&!X&p&ii+edm|g&uMXm%0jd22~sDV<3jT zVegT(f8i$44u@w&US2!K#d}wA_yU5wP7EsHS0KjrG6V2EOberPb8b=!nY)HhclZjH zu)VF9pUhyu2!uKl1AK7z?M1_N>sr;FhmqJ?yhE94?_1O3TTZ4ocuyDIG;g@U7> zf|D9DYlP+vu7qV$mb;Io)eT$?U}Q|;6A4z*f7DT`a5L+{KH$!u0xo+?%#OQl8$d|D zCAuzkD#IKsMcFL4-c|bjaPvH5;%kkbOcO=(;K>2x;bWBI5eHe$*i(L*B%ir8jSTd3 zt{j1aHVHyRL1;-7$lU7qX>!}R_$~Xf2F@@+6k1!qmBd+<`#WrZn{sCnXvkZy>aQh! z;nsKtT`&jlAQms6>{T4rj90O(`dRa|-78|MIZaX^ByH32U27@bHps+~d(%DN%fYQz z2>lX}K+2j>Zv?F ze>_9nLQ6|Ga4y*qWQ6apEz1ZD9;+?|k52)Fojva9H*q%Z$BvS6V2<$tfqi^D-BW*r zcJ4t=1HJ1!h%1wSz--Xhd4)H`z>&pOrc(MnmI#^uFb8Ge8cH`K2v=~cDbsG)Nm2$1 zKd>I&ueNEwp_rKksz|CY3@T0Z2_1)o@P=AD69eMT(TQ(%WIDFT+_=^4dff6jhFU-- z8k3&i3`79aHxs@2A=BwRr%?+kakC-{A%vmK^6y#cI2n)E&dc@#P})YMCKCO%Kj4;A z+JgI~VwdR9@&eXr_U(Sk_n?m4kVHp&dM4aIN7oe1W)N6_!_=tdEb7#h6PpdZV@Ou^ zl?Feb-fq{nyxxL#`twM3Mb=h*f7k}-dT9;0<&p9Q^sz+zB%}zi4}|(!g-Xhi(4N`Q zTvNTouLMhy+eE49f4n04ameKb27-baC_?1k6Kk{gv)0(HgsrU4_i{>zag6D)#7*xo zmS}83Rr*K;BrS`jfE#3LJ)txiw`g}a56Cb-$oQ86Fc+&?nPa6z^NrHJo0xwdOwX5Fh292<{Ymjz75E&o4i@jtN0bF)-h%sdT(|xP`4~izp@XHG!n$jqIlK~S-#T_Dry}x-u6s^H2hi-AS`#;5# z^{i0z!q{Sjz7Kve`}V=_TAIpqVVi7%nAPW#1gRu!h^&ycF(RclP!tO zs>(UE5GU%Hl=p?%jn|FSJrEL|AnTiff6YI{XD~Sl-GseP z61_jaq<}F&ce^tPO`eZvfV?g9dxp`9s7r1pY)Ve|Zz(IH>PK+U_olxIg(P}!E~^*dDQFeI|3ZXR^y!Td&`Qj zMp{jE$J|EKEc|Wi=jW_W{OiJ3VfFuB=D39RYuFsj@veGnP*|!gYJMAQ8P%=;^f`h#C=fy;t1K&aNN$K!wb(QaePaKPK7)(VE87TKe(E|6aN zp0b6c#@$fL0LFUgbM5mBZJSW7X&P)=SMw3tKV_=YUV1sI4&8^K_qZCA1{xktQqLuj z*1)OB4c#0VSb@5RrHE|iFpnrH7SChnM4^CwJyM@ie7od21|snb8;6&4tve`Xxcxd% znx3IK2tgG1uOrx;=Bp0578Sk)pEi;tv64J#>%rOTR~aDx!0UQ=!}&l`qnbaCMB ze^yLFpU?O-igrLnH_jj7Hh?}C$$2@K%<1Ew&MgA9ble7RO~W7BnY3!f*M`<+pY}k4 zN5(qI0UyOwCo=GRUG&rW93N19yC!HI)>;w*rSvg6UN*lZ;cjT%MpDbyRo{ZUtdWMj zoZf`Ltl7k0H`7G-+-Rk30lgzwwJ%!4^+?r{&8=4&6weCJb(lw$g&Y4kCm4WxJH^Ya zegr`4=z3n;{BCoetlg?col2~`TZMRXZ335tMRu7rptsu0H6y(llTAi>m$+I?A#NO>C}$lT*%!BI+#TvbP`w5eIy; z6RzvI2J+Pgj%{W7{eq_roz<7PMw}p@(8p;twyhLa43EWxrP&A`p>@jASV5f1>w#R= zUy%=X=&WqfD(dc#fbzqaFhfaZHx5_W1DUUW1^YuIUZF>Al1l0Gch^WQ{#E%?TEG(G zZcO1RG{5BLcjpF$s81Uau* zC=&>#{l#aNp}u5ON!l$S>9#H*2FAENRxCH z`Qp3ulDGWP6he|MPX6=jHr`hllL+GeS*gIhoG&H>K0*ur!#7Szd~KtVA>K2lV@BWT zASsH`hAPlZJWs(}O584pH!)<%pUNYRpa+)U3Z{@6Z(f}HA2sIv@39;;0idv&);o?o(b6#8Zp+3|ae8=J1ACW!Jo$$H?I}zTDa- zBJ0emF4_=4uC0kIF-d$o6lg&spDlfVniI3bc-%qnN6Votl7d;R+HVYuO$LIz|57-a z?Rg!^>=gb2_NNFz*CZ*af}l9oCK(fuw)wt5@htqELTfwY_FvxlA8xuJ_g%2{a>D=& z1Xb31j9r_287m6yK^!>%_ZSSugj;ps&bz{cS@u}B@RfTXsgX$oNEe!n`p8ig`_{;P z_z=B1-PTAOa7qNxq_i#qvdi!cioeEz^hwo2Wl1C7SRIRO(Y*XYJDe9AGN+`cz`B@g z9a&%x(~f@8CbsBS8<6CTd8w=}>Lnpy#fyq86z9_!h>FY|2eNra)(~ph6hUnzp$>aM zp>%|QNAiJ6?|%gctf$tO=>A?;*gEu{sX8>peb}5$@)$|Rchy)DK3nb9c%jYwq6b}^ zO!rv=ss8-3mb_%Lj}A$A>-T7}fv{Ee`hJySGoz6gUmk*^OKQ=I_lCS#mYyZN#WQ`= z{RJhQKLs8!SAwFv7+_)LO;`ghNA^0WeArP(zlt4nJ`!03@CE;%KakmOn&ViFv1tdj zr9XB7DXAOW=xzr6`t}L3Z|{?wX9IaH*1!y^IOC0V5ekCn&a?du#G7pgXD$K$k#Km*z+rd|~?XmVQy44`4OD8*7 zgWJ=~hD!!AAj$`@?m%}p8#rB)UM8?PMGtDLZ2Ud=nCU<5?9knyNPS7o>%G0CxvKsj zbmRJ{YCQH55^57|-4*Ko^qL3&wfH4h%xUHeJ4HTY-tZ2u;telhXct9m$X>6x;!o3! zSW7guQ+>n!JcKjHQW=j~OuInUh}#I==pr1D0+7+6B!@rj%GhPQs?CSt?+V%V*KO3T z-8LRTTLv_CE&Rc^$u!h9qHWSK*S7d5f1o=fSsbd=thCX5(gg4u?5C^U**lu~#G)7W z9(LTryqoxRvU6nT=bWlLr2t_-3LqXtontPNm9moZ9KTMfNEtKu6TN~yW>{IfVc^jZ zJVsq$Sw`dIq$7KHS5PnF5A6}}8Q|@2M}7NUke^Jl%Ghm}L^8uT%h)5ouXfq8SvzW% zZ`iT-YX~h0XUHKcGb$P1hVzk&&+(naTdnADv_aO$W_GA_;8sS4>qXl^v4-+{LbjY@;=ixzwb92Kw2<}k zkaf^V4PqBwVwd>G8oj*o6k0m8vwr+tf}0puiTP0QU24hhy1u$8`}GK|hz0WrUhDlR zBn`e2iRfvMQ~B1YVFaOlg$Fy6{VIWd0?~c8=J56PlyJkvM&T-M;w5(H80^36z%m#H z=Lb7+@(Su-rzonS!=&Mk&|!Rm{lRA8Nqk^HRZ1iZA1e|*7I%>YGuDAk#yn${qwD5$ zaC9&oCq0dhn-e8gla2%{;YzU?+T(`(g^rffHKZ#NV+H}kjk;VfTuogOW0gR?egbBq zFD;l23G@Z4H4Re{1!ESHrS@bGC=!x>OWU-tFniUU(-~QDbeYqZmSy1$xq6TuORKWc znAtbyNO;cU5ps0RPxv4`y-p0S?IV=oJm>Lz+^I*D=1s6O>&>QR-fu2Lma${$hVo zH**2V^nB?Hv_0CjGpFhT8lWGO$!v<6pSUFKptemRsRmKQkdv0&VNU|RZ|Qzpuc)c0 z{do1PcTexfMA-B}4b!F{bf(WXQCibgr89kwyp{8G`P|2Z_o}U-i_>2B9K3$if@Hn! z_?5+b_crIVF0)kbp@=ihj{Md|S0~)Bz65y`$4>5vSp__!#ZF3qfK6PwPColFzI*s) z5%0aMhQQUV&gyU38NpYq4Z*9qtz+K#Qxq~F^y z#JQ~#!$(@~$Cj7!NpeS?C++Fr!xiB3LE86k$vk@(CFnC; z`Ia=gx4iOBbGd!v$w1f}e5=pnTfJLO8=G|bu6dzx!DU_O?BArJS-w$Gx%6l0a`Ccn z^_SKuLkKsP z^o+Di^U!9YOzmc@&}H>;b|{{sPK9F$ceu*8lRAed4^Zza-=w)}xBu8gw+U??Ah?NI zrhWuMT?V%LJ$Ainbv=nbqZChtZelrpB8El}=--5zA!mn8^&joR-4s3tKNmks;GyV- z=kMa$Nxe&YQb&aY;aB}nyU;fzZy8=cyixhXb^EJ#1I!96%61a1NO`5FrvJ3#FU+#J zr>abc-#SoRC$aq=n>Vq%Yj{}=)M#Q;9m@j!F~Wa zDLbMv?R`!DzogYFnrdWrGD%t;FECd+eoyIiya|guT`U3J-M^f9$=NPxe!p-35DF$E zB}b5z!H#u*9G1@?ioj?8%z!zP$=H`l=ON=YJas#geVe<7Ga1iFyf<;4fuT@)8($WuTx*MlX>2|j4mXWD`z0fbwXg9sDVk~KQ-Bz<# zXlVaE9BgFYXunL-T-tJe!*|(qdg6Wh({l9*T}I$}yz`r2<9WGgh}~u;%k{Rj()ToN z)moPS&T8ZF$oYKz;T`UQzO&)&Kvl=<;(>kB^X!86>U`t()#pXW(-Uz01WpKeJ6=x@ z1@m({xbXb{obOYYQ8L92v;7Vf8O+}6%Z&M=S(#kKWNxks2E8@8LQq58lVRbJGZ+3) zh{s#iHjYZ!n;q_t#{?^tmdPQLJDI&!D&f-{`TY?srGMRt_}Gbd_a^!I#hdoDj%|U) zlVOD85y!obyxsN>5#93xfr;3EPvlfLI6WoS(Op~>h?Rp~pjNPz zh^VR>(nO?}p0uBdaxEpAC z?7YZM4_Gsm(m$^l$=-sR@;@3!Bg~@d(i0Qek9}WPR;+&;?lV(Ba}kZ?ww#l82D z-*)f+a#h?Ua;|_jp^`gsqH$6idSAA{uf%%<5*(wmb5_;dhG`WP=^FbS(u9_tEKU98 zT^9IYx*x4LZ%OGc__X7!x(8k8D6yZ6TkVwTuz8Cu=sz946xAzlb7l?=p|gf@8B57S zZ!0L8G7qJRCm4_x`UP?CiJt`+4e84m9`x}hQQZysA?R|#EBJyL^`jlLkTo;ex1T59 zf`)^u_Ky{ctP#P-0)m2i%M9woO_alwo*5;p`EjUN zOE)uig|S@7y}wgKDydC9nR*G)NYL7gCdJ*%Ad;pg*;cs@9;$zs+6w|QK1ZXmuhU{z zZuUlQW(6WV?1+i&(jE5?hyer5?@P3GvQrVgH=v70E3hiA*qVfsJ4Gd$V&cXs5*V)P#PQL_%#fXL`vU}`{Z>tzEv@hrqnZKM``ZAGGf z0K$pS0Z3%owNcpxgxDu*Bk!Bo!_tD|S=IihRy@-zkp)S4%+l?%xsmI%CmI8SF)OEMA25HxR~{1f$QFB?_#6kT4)o(Eh>sr!`;w@O;~gc z6FAO`&;kT%qQ`satbN$TL=1Am1c)Z0*z*6RWq#M;c^WysetPy!)$PQx4&7q9AwcUg z+}7jWQiY?QgX4~$ULzZEg{1d-tIJ?)R-kG*2Ez|hNvU6+y{Y|7@25k6h18L5u{TxcvIgoE!9V)J3Vy*m+xy~All}oxM8!qK_^^$%5D_b} zxB#5bU#z`wbxvaQ4xZSObFy1dMkN#^M5bL%$KNx)it^INZ3$ySAi*V7_c*|x;4OgL zf-T-Guo_4aU63A<9u1gtW+{h_4%92j2bda-9ovaW?^2s%`UnjvbTf6^cQ=w=x8q!! zcI$#Hl@Rdf;K})3R=|?O~s1(_M5Hk`aRuumSG}KgyUKkh((u^2es{a)PdrSYz{tF63 zv0$O-Vg*0{g8(rkDy77Kh!K-VLNy1Rk`-w>EfE~bNz}zz9LNxLMzaEkYw<7A7~Iw zP=Fr9Dr^<*83K7fKchA;-8$(4bWLRqj%`du(?8OsezJ~FZ^Oc?$7038y2hTjlK(32 zQQnHyvEH4o?@kj05rv0>(gKwbrj4X_DysKMjBj@3Bjxjgp~>aDhyM39u80kuh(f2` zBgR~3uFS!0?Xk(aOq3h63uq9-U1-_n_PvCen*<*<@)s7>sNlJ}P;=PjA?FCYL6?6T~@1!4x}b z*W&yLWMFZ5Nc@rkhX!q`BB5Zt9)xt!VrRN}O}$BPa`Mse@!`>i$Q#{P#Lcs02_~l_ zii>Rv`_ERYBY(==Iji|j9O02rBM=IX%5S}OtY+PX)ta)0{kHMA^OzoY+B@BW>0PZt zZgEb1ty43wGS|7DW$;AdrT?_@W#qDQv`p`m(>3TF>Q(F*Z?DF-bsNEp23xF;8HMxa z7R7b2tMrE2HpjQjC$mMlCuxY#VHoE;`c1A(pBLo@|F9&UmCr^B|LhXa1mOOoY1+2T zw&EN`JBewDYlQpnrM*Z(!&G6ZCamVFrk27g6>!42}6&S5oq^{2I+1w_j3O?e&t@ki%)uL4xE1?vfOZ|#aVNgyj~#sgoW4AUeBuc zo=ekKMo+5u-iP*X_uOLe4>0=6#;3Z>FdR6k!`R9O6)TG%u4Dlesv#c#5gHf+mx1<)@THYb`CoJz<-M zmqlt_D6VNfyGVLOYE0cna>mEp#uY(+v<<1Uqp`_}$C|Om50IPMXAjSY*5x#3qoH*) zd^z|nlb=Q`y@a%k_|k!JCL|;!kO$XH?#rpZl*V$Rcid;Bd`{I$AIg!owhc)a$(e~W zNh3w1@$w=wKjm4eQ-Kzr-E-cJ(6i!6V`n+O2RG@0cCOjST{Y-_&Uqy!C%^6I^Yr{? z;bpM8yze$^2LRPID~81MEdS1(Ts~<o1-Tg^qOFZ+9Gb@jj6gM(rNe!3$VWZ3BLECw43J5SXv{;4V8z= z4S#-~1!3^d!kleoRE{buC}UK)=c`A_e2l#PSt-5=l*ID)V8KF&0f>GG%>^CMmQe6Y zLFJ^eF){JmhEzJmQ0akwo2Y8pr3=OLZ$dDPuURS|f90j5GJGL7dDA~W(7NSJHV>B#fV?cHq@uH-+3!G2S97(El;yHa%ub+>!;@%#D+d1YVe)Kk^XSH*6leC8}!wttS{%^$oVcF~|FU`Hy3;*0iJ_O-SY0itR#g z|LnRfZ-ZDX8fv93+L`XoolIAn>;cIad{b-O??!iRJ*NIwdCskZ0FShYaJ7gsj|Ba+$Mok9FRAR2SKm*EWs!3xwVPT%`JC;M z)FZHCtaFe?+-vC8!Ii7R-3@!U!LRzLx#v~qg%Rp21Q?ILC1ef9ERS5QnkSeC)5vU+ zeG3jjzcFa}{=+0RTo^f3yfMQx)pEsX#lUUEex0jFLyxx6+gMY-jqm&-zE6hWPtjrT zp@0Le1v`AABP+2TKW{+mRuCT09vadK9*uhpZInwGuJ6C|#KD#*ZOXhHc#e8#LcNS| zuJX@0Oy=#+>V;>3{^NfI!xK4mKzDh#bcg$_(TT7%^>1SV`W=KHYAU~hgZ3A0uCyZm z5M>YE`zbr+pPnZZ>*~j>N7)PNi|=)qWKf~@c8x8XJmgKGF0@!`VK*N@%mJii$APwF zeBsDwfV(Ax@?E+b|O8G9u1^POh;^)y^UXPDv2{ zE`k9&fAV9!)}ks;QKA~KxzE@D**3$~8Gof0Z;qSYeRPL_Q%i5R?-lCa752w4JSDoR z#?XqVEhtid?AUAbcluaWF-%Wzi(ck3i*v>{s5Uk1aoyHeDi`kW;A647kG$|_xW{6y zR&Z=DKR*5FIT#ZdZ~qPHzCZiDbPgn^USuQMR|*yN#%j@Cwf^XU`qM1iOPgX7<6`Kp z1(!_MEXtTeNy*5v_}9ug%2W81zoDPFn!cKG1I$@f%w%8K3#xJ*ob^g81T`<7hd7mr z6S=l9q$7fJho+zJcF?v{57{sJ4#*WWn%GP|Wi=G1m=uBPy*HEBn93e5o~x&$7fsy> zNF`a_db2~lQZ*!Ch$fu&j3%9Sjb=*oQSCR;>e5_C$?=rj#7@dDz79#zbBHR zEU2GRfmbU^{SFPq?2Z>wF- zIPXNjpoL0Vt;Q3`{J)GzX`?$vzUANk)c1 zyufd^9Y$iZA`dASXo3g`))Nh{c*ReDYVG+xL~Xh%^1bR>WY+TH0_?#0Q1S`Nar+cO zppi`Tb3z;tfjc#RNf+1rxq6!@u1UFyWBtz;T}d;T)kn9{{4<+bQa45(kBCK2ITm** z<)*DD&E<#b*fCMirq-+uFq~95iZ7wjeyzKyH@~32uB{~f;ZI|sm?61uu4k1vXIw3S z417v4n#Bw|zM+V3@|$Ef$31xX$#}wWx>6r4?ve*Y@byV?Wwfm#C9RY6(lrJAQv9$-Dh0a-0tgyEHmn%87i(X^7SJXS=nW7WGD09j^X` z`vS0>@Be?_O>}GtTgaBy7#uA>o;lm+K(5VnU0w)>egy;xY_*3_s7`x znLR0-cVI?5?6Um%D6L_iUv}oh7(w_=3WpWBLv98;aB3Y8K-r%E^fK)NJ=R-rKbPs>40KjzH*q^XKRx(ZBo(OjRwmUwi^n16J zJ1N`xx9gR#zMCrJz^G90$y?sDC$z<)*SGl%C-@3of~iz#Ass4|c$+gu{Tt!CgazXL z3y5*U_bIRgA(+8+rS)BL?DNxY`_v?OZzSRZA;D3}NV0^IieZ%Gx&)ikxUMlhF-+=6 zdjKl%x>L>kUa!Bp1ul^m+o{XEr`RN=+UUcgUD!NU?R8(7&vpE70@cQBzUM)rta?02 zT$g};-4r--w+@M9eeQYP0>`(Bko|mP;Ubc3Laq%e zDgr$rm(CT%Dq>oP1r0Kt#AGIn8DcVeULbKn-bx~nW}o_Hph!8IQ<2j|xSkajuA8zE zyQ9+XA!X(vjrr#1r~QJ(M47CzRqXBMJu8X^XJ^vi;aY>jYUSV9=QK-3N-)~Yl6L>_1JLE}pc(4qtYO$?_gw8aaOSP~Ua?8ITrr$l4Q~(&1 zSqhDl5f(J^af4E;D$T`(b&e6b#wJB-ZX}j-!|796BO1j5N zIE{vJ0qf>R{z8k6ajmz3GH=Yr{g*GtEq43*>2-vSLxQDNRcwu;9jR$bMPTF%z50Bj z+X2&xUTy(5EUiK|JIpn=)S6=gLKjmerqqWrCAPpcS9Mf&;0|md+c)d4>!_10flL&< z=agKB`g~w7hEeJ0LuOIcLy~u&;Xh~7#9MQzuk?m-l9%26ZS_NlHL+MU z*{7?0z{Gou&!WFdYozHC0RA`x4r`Y=O|d<~M*ro18~-rOo!$Mj0vKC4NLkubs9RmG zxzSQ&>(700$HFM9i;T$}v#B<>8Gj^{c8uCw;=_%v@@uyXQtOD0Z-`2sMcI{gfYo;z z(H@m*FJE&_jZ1}=zBppzRG2LADGXi$7RnhGoDI>V_;ZDSSn1Sg0jB&5=BM(lI$o^> zZ}D_quMx_>w(0C?P1Hjilv<lqb@N9Wcb@C!gj6v?~t#}YMSm3fF$o|Emh33?%^9(lZBWLG

hHU1tr|7vT=RK~rS5-S zyQi;T!z&J{e-&OZ&}8=We`vrOj3?k>evgC*|?Uu3Y%_^Ecn1 z-KX?S9#nDf@0|$PdQOtNr&o1M+51il-t+J7N_>tjN-$GNMyg(~B+j#CA;R8!h zW-U1AtTW^{$Yt58W;r(-o{W;af6o0J?sjPDw7q(!kYamDX;Gdq$>_=CZgfHyY}YIz zGKE0Yi!9hV{cZuvRveH|Z(;B+u2OG-OM`0Jzr8Cm8#314WxAnv{3X%Izm~CN;jGqu zcaQP;eHOpJL%Fa;1Z~$S12(5-Ewx-4p*;oSF*9{wqNl!OXArXL@v|myaht_-Q}nk<&j2)IC`iU8e0T zwC||1o6LV`XJDaIy^bswl`9*T*78=J_~^yN`04FCg|_gCLVI8Q!ux7_Gqk8HhlAY$ zw+>-dw%K48G@1jP$#^h|f(@QON8d8LGvH|vFuBQouoJFcL7#J(iS<~t5)Tca1gX9- z<=bGUah0=p;-wQ$Va;y1Z*jhcCbEfxaNW^28zd*OZ zPII^w2}zKBpfrcQ(c}H_DLbw~Bdxnj`Jl#eGIj`NpUwurWU$R4Z=h9s2-f~)HGCEG zHj8Bo-x}wFU)a7OqOR2{$s@lOr|Ks`Pd0Ej)%GNZB=~Q;YyS|=j&c~L1a2+yJp~c*ckkJ-i zhTv)Va`723U0dN4k|SsL+|I~<$iOuJqAp!h#yI>Gg0(d!4GaEkrQau4{Ry1^`7{g^0a!SFk- zaMS75bx84gO5&?8?59TJ3k|-b5c3z=wl&zprNt)e@_Sc}EMdpgCIvq|Kgfy08U3ql zvlA%Qe*0jZ__StXAr+%r@#>h%@X`)=nIQZrl_cSimnQS8RLSMsD_rABtLZ@MnJ0t- zLn9Kcj>Ea_=62`3;F{RzO%gV#I6U)kjckUJSl1KkdS&8~1FU+?edl$1f6eC74%GP^ z;h9aBIXiN$>idm@SNK(+V#dnN_M@hDB-ul;b}8{ zA5~gWgV~FdqyU>GXx^sJL`1(ojK6EHK8~Q%{22PQMw?q){cNs685+2?4>!sRdl_fH zvPc=n`$kZK^LM%5KRm(9O2PA9T!>Rp(a>~6{3O;iK>_b{a z-jvemDmMI4b0K)ZS&l|%8El3RV^sU;ObXpc1o>+3ZV_8t<8j292w$jbh~GkiqkLZ0 zV^Ubo18L}W2Hh@uC4J|x6Tg`)zU*|X!(|6(R&_E@6ZAmjl8wedaQ2gD!!6Gy7d3b0 z`?uWetZ%tL)I*u6SV6I~{j~=5K7&?}Q0(kqHijsmdH6X0hT0GhqXJz2L`dn=QczhI z#pt}%8O21?JA0(0EDFPkqW|!&fux*7v0dpqjpsJ6`qi008)B1$1Oo;xd^P#iU1yH7 z)flYmA1oN?svtTvLuej677R3Jydv(Oq_P8|s;r7I)-_xT1hSQ-ZQOFYSzWvJYChr2S(!T#v-#>#gW@+Ix)?c3dx0qGDK|bL=@4Yk->hU zZn;9C<0i4=D_v78(;hj@m!Ur|8-HH%t<<~411w0NRebO$G7@r^>C0L&TUurI9TT$L z->Q=L46;=tcFE(EUrPwuAJ@={OS{#iS}@h$8Md~A?~i5TPYR-Ta0INHr-*zPse0Mu zxZJWq^|;ds7Vx2$_-;qGOwsB%Sv9mR)DcAVK^jicKNWwLvWBVAI{YDonz;Hg(7)=b z06e+nF#CfVr9Jp3K+k5}xBFk>?7Qxeu)gv!uqlB)23w@E9MwH2fO6v_w#JaY#KoMw zhXGA!?N(6CP8F&SB&|H?b(UA4l3dqcc9X;Jq>cPA38PmXCFfX8#V_2@YIWf1S6_wC zRJl7h(&4%^cX2`7zR%qs5=u+nwA3s`1khU8diyukFyyhU%Q&aZzEde`97iJBO}Ipa zQ{dPqQYGvKAB36dY~_DTq48D>Q}c?N%%r8@M1a<}-z1T7uB>$V$SVW0g)%5w#$_+f zhJy<+Lv5SWma?3Ix5_QJoWZR^EGBRhx)`4t@+Cyqwqfm8_BWn=W-3Q9uOAsTY=D&Q zo(4!!X;I9+ZDW_t!ft&jUKs*^IlhUW1V3H^MG!`&O-nSNkQcK{`DGd9lLU#EZb>d{ zrYuu3?=FU(!Ojy5A+uSEQiUGV` zytjc&$i@;$g)&JQ(Ggy?iKv01Kt@fky#et@o8k+ck5Gx*cP*f7Z+gS4P`|8S&2qQO zZqfK7jXBjHCY^Vw0&Tyn3_o#d?k zMJEbByQSOZ55iT$xrT;;^jsh_q1B~8PwIt(;uX!lg`ehFT~^|Mc3eo7XBuxYr*M(B zm3zYU@jjd_JD(+cxo0MMvCnRMgArYBh>#nGczIrJYO8#v2?vWZ6t~`sffJZx?D}7tMC-71x0dp2nUcYIg$nX&VpFT zU+z|f!aE74{qH8g;2jY920QH%h^5Ov1Bm?fieP>V-XT{3?3R;Wp1?aMhj=Vf5Pf3%Q zTK@?DRU7X3rKFSi(fg5-ZaRFT8o!^$Q)B;geszLKlu~-cek*Jc9<*FFj<&`coL-pYPZ1i|mzeez| zaWkUMgDYWSD|r#RDem=bbS;2;Mdy6IeFM{i9f(sV6qsx@(>TN1G&5!!%A!>%bzg64 zd{ek+M`A}naoybq%1{wLb$SX1?&RI9XP{%a_+Epz9Qvdy;`bj$T9f;|^_XIDzm~Yl zu8#R6u1m0;Jl@>oxNMg`E?Ij}Z(8r8Wh)JMJ#UOu1=$cpTFGB+F!M3KM;qU+xuiib7BiQAnqU&vQj^ z^~o8nQ~djeox$RgtL;GYio#^W3!$&dfajAIRf|NWg0mF%Juw3k^IQYkq}=rnxCWll zPaKNo^sFXQq7BHajp*$`Sc9SA)nncP#;q6alZ{Gwn;RXmdrdn=P90n;;Zv8iK^QPh zmt}@Q9R4A&ML1rW8}Q(;&E+P_P2x~n()ubTC4vFUN<#J)L^DADP(_fA5}`ky^kB|I z;-h$5)RRs9__B_;l)k4=+dU&HAQ)#zvgNFekKx%%$sClo=o4ki2$_PrQb}&#b?V}LFYF36 z8Gmj-5hsTnx&-*}*J)>!+$0|?2JArkg>KZ^_b&){)nRJ}N0@s#s(&xCv?Pi0j<^$( zDUFV*=1RNz;qf% zV&D<$3ChrPgpFv#a^f;QeZ?ICqKm;B0dFO+dvJ=8BM!($UDt-60-F!*Zbm$^pTQb; zZ~V^m^+FGyfp6f!J9;dMeYTH!g7UNLF*`U?U`pOUn9?^Yj8yXoi{cNQAucs5O{KIT z_HJO@>!Z>{OUkl_x-8+qVfMo#an}E`O`B0rc3)!r?T*tV$L2j~M-3!ey98$O>vhL}XrPmu0B^<`5`+kkOCeXmmWu@U^{u_!fLJukI zhR@v&cH;&m0)WudI3DN~a!z@(U_ST*X0XJq!6xF;R-|#`Cg+q9fu&VK2=)q}+Nvny zVYvs_$ZX5+UDd^Vec@&a#vmOj8&+dZ9(1;r9Alu=1jR{U=<)hAL=aBTHe3ubmLi*5 zN<`>Rx^0Ih#05F_JSkLU`wuwumIj`si0&|cEsvyK)cD?@k!>}5u#23}+b+wMtfUM= zoh!esC6Vmua)rzHe8h;%*F_FJp=OL3Coiii4_)_TpRRG&kOX??53+2CU?W;#o`AgF z1}gwuy}q3f4`lhC-+V#3-$~WO#cP9|3alsO?3n@Qb806vk4af(`3$bl@Q!{IRH?UY<%hoXNS!UlmwqF0_go|*9`R#L?yeb z%xIOQ10Eb4veJK(U*+(dctxg231Y@T2Up9xV)1cy6snjDq8d4rK>M>7=?p_n%4f{m zIxQs`Z|&Dgmstxc2=O(ZlI^-^#WbzMx>-F%(S^I4iba-!8#>1(h2e(WO8%Im)c`l2 zy837;I{Oxza?mf3;?y(&`Yy9D68kcoq3l!k_{Pk8k=*pcNGJ$J%t~VBuH6lXohs?nTWQPyugl?Le3)l~O z*!Uxw$$x@=(BU{~JjsfcSSKeg z6zRw`43+7eF7x0Kl*3bQOJBz)S83 zRrJ#H;bFd1?R9D3>QmI@P4=nEL<}N=1ux|us^-P8lP-8Kzh)$g!`gO96LWqwK#uma zks0Y#wk3P=Iv!<;Nog+eDPRp<1j-o+7$k(?$398Re!sk`zQ)7k82GzvaOxmhavcrx zYrVf2ZzYLiTwmmOt9!$aXxpGd(_d5Oqj*e*{L|Vy`l}F?4zPynAMT0^_t*0r495s( z4}H{CVt8DV-!m|RQr?zB^6mbJsFA%F4$jBt&&>x|f^pVx4b^PRB>>O@!(#g<1jS9{ z=`<=|4^=hnUBuKoc-GVk8|!^Wb>?9@Dw;yl2HTWyCpQAyJ}7HD&gS^bHKlvq3W{4A z*Xh=daIimq^|;>z3CPLh{Ub?>nwcA+ZhxlNbCtuUR5iN7>H98qmafFOGqyM-A`k|0 ziw-s%>3UYe6|t#;3_$Bf8p{8)zUIuEh(dtEW-cJbu87BH`N+rw6lN2%C3gn5ld z79gO_kW8#JXnJ*;M)lRW;Yz~Q+CRj9R!oPO@60P7!)MP(7`5!6M@~}5JWZxt?RWb4 z)E`l~E?+g#0TSc2X3I#DCi^Mp9czs(9RllM$buIag;`bdMUq_#=)z1Emc`JT%XOL7 z8u@_w9xZFg-#|%8*jCkkqw`Aee>C)2kjT)Xf3ksG+&E9@p?l#MyRQUNR0m08#g5O zu{+xM*fGr6hQ0~Q#o#BlA;~#dl0LmvHJU`}(1%f#4F@zZvszNZdE}FRrJDJI`iPa8 zyVx6&Mooyk-v#Z_WR9byy;4zQ;`IpWnZ2hVU;Y@J`@4G7**f zcIs+U?yrOdIW)bQfAa2*D&$1xu?h}~zFbBm{*vDwj=}t{Yxew!366``w z`qJ;BwPjBac(0NLkKdmtDB(ZqK%Z(BHn5B}k4R<7EC0TumG(><>YV16jtpTh8|qA! zojZyC)5Mz6KuLlPM`5ZyG&@y~nNWtyrHz=$#=QyjVRq8N?cGA%u7sQuP|gi>PuNHe zV9&|F-X^dvyG5=;BLtYZZkT3gIq}ZVD{#@%M7j_!sx|uXxs_S#@usOzS`>qr1`lPV z6Y%0^hpT#SnB=C#tP<`5!e~m;oUx27SZ9QOE$`C#hA*Xg1q&VF&ql!=s@B{(E@y|3 zNyG#C7x~gBlP+lJu=*BcP@WtN=clm8?Y2}uHu~`=JvB(iQQfkO$rg^#?{`j92)_y` zFEJ=D1GwMA}k2U;?f4}v*z*qdesL^ z%~ID)Ny-a$6^}KBQ6p);VOYPQi$VMVBV=Z&e?b#HsvIa!wL?l=!z2?9ajyvK&y@FU zvWfLa(0i}me#K|E2aZ~uTK3# zn4Vc6)hJ`v#Ry$p#!RjcqN)o(H9Zhe7?hV&UdtB)jE?Nl=B=w+t>QElK4$;InnvXx?Z2G0rtN2^Bbu8ZDd|yuJPD@bZ z`klXEzM`jL3~GW8cXAvh*rXG%<1}PVE-G=%Q$8ZAlkscWq;xDZT-vmzh%$sJ4k=fY zcu;&QGWPng`XMi<&v>=g;$ZqIy<_61Mr(Gkeac20K4F5g#0ZkL%;1jouM=BJM==zs z7MW9b5e*V29^=WG8fT03HIdi&^krBv+_^DgHKI2@O^q!Ynk0ucj}|>}T6sp0af!WM zikgs!mfz&eeZKI(Q05WJ<91VvYqw?`vAKtIfTufSzuSwLsE9M59;>_>wQ?K}Ycl*E zcu!`J)1b%z!-Qzdy$YqTGqb-dkO5IDt({@=211Pu!4SrRlKGBkbut9ybBbxS9JbMX zqSb$Lh8>E&qPofRN%tMLWeIl$NAGp&-wAUc!E-+JqI9408 z%$nsFIrCEM>g{>%ECfF3%TMDnCS``sQVxsIxaVbbj+6E$; z$9>l>G(DG`TYYae|(X$GLQww3I=zt7|>z`e^JB-@X z()XprJ@q8-q#zrJ1p1CyxFqDg9<-%aF74XKPAv*V`icNNu00z_-m--AN#rgjh|Rm$ zy(5c1PkQ@Q?xP}e9k%7` zkS-|lJbdFD&D8)JTi@^$=H%rkwTc?c(Eg5l>!$_Sd{8-Djl8z$j z+6n&zOj!O07TJ9&yTuEV36Pr}tb6)r7UV^Y*hrvfRqN7TaDGJN6Iy8rw`gJ#Ywx>@ z=ZFtd)vvH{e09>1j&|Ay0!#&D_tfn_O0Lx!E#lWv2Z$K86dZc4o`=&av20f<`HBqd z-F7*kpX;tBb3NBvlUB~O&&qbsPfhFdzpBi^>=AK^erngM+lilJ-^s|Sq@rcXhSiH2 z)I9-g#TurC2KI6Fyr$ckIBDS5QTTwS9*#r)w`;YE+=Z`tjiZ4+Mh;V|K5Uh6GrD}UvNW7lx2v^+6=-5( z10+?!VWDkDUA%bC{fsP8H4-)zH(Tk1axdY-N$^A=5@$U0OW6Wb|gSw1XqUOV; zCbaUa^w|5c^Ybkp!N#w`J5@I{^b&SG6?gVkYEN8wS}+bpqGDIDr?|+&{ww*dNOJ(r zeDGX&433V8G}v&q$HSZ@Kct>KuR>)x=uM42jL`L$f;K-c^CL?8~)Q$Fl%W_I=)BWBEmSBOs zOFSjg>80h8o%qDB4THFV4_(x?G~l@`7YsFKtU-UM zRvixu2?1`}Zf^WvO*w>2&PFdU!K{YBYO>FrK_zl*CiPB6h+@;(6y+Fgf`+bgXNvW8 z;^)*4LtisOv<8mXdlk_7D9>0RSj0K}(ZJIC4D*2`RZe|1OMgDyQ**4L`|l&OZ*3)o z{*K1!xQM)$?R`|Sdg7uU$^E)%;g5UHsH%|s34Ea^iiWUq%{a&{O)k5m_XqYPa0df} zA%SbNZymOwqd}*pYGO;_@*&9hJ__c8n?sBm32QWhVY+XM@I^3JTPOw9xfGQ?ZFC>` zt#*|P8gvO#S|M5?_@tzGba`BR&jX51XNv1UG(uj&nJ2$x>>F`dRlD|Jyo}HwQ4?e) z$y|h>Y7iUGiOxV=KXN}@QWiTrfJzD-7ouE>!ilU~sTz|!J>~w5?C2mU_kmMK$#Ji7 z7+L_)K%ZHi=ID$3aHRM?*~f3g-KD{l$JiO+mX4Y$+seX4foyz~_);=%dT zBO{4=RgvqVSM4lGH}xQ#?0L=lg&9i=x*79VWp4Y!M?z8d@q)fFA9Mz%c}10FFD_{g zy}e^U&4}wavWQo>6@3h4F|UD929RYJb;4jUpfX#acoFItSC~q+3)Hnmx^j_RsK}Q z!ee--tDd2kv<`+Qy^0H)vXcCs?BP9g5$B3tgt?~<$Ek)bfkSWeNmYI~@bvQhWvg^E z)<(wwfx$d+-OX|G2C{j1m})2Nt5Rfdqi=Y(H-xzaam-(t(EcHRczgUS4Br<38W)oF zzn6mZuM|*he|07Qj=}R!NA_Q0usQyJV#tQOu5E3Z{BgQ>-j%339^CK<3 z+?oarug`u>N)<^&LM2{Pa)0CAfRaanRo~@2nf6&5!LW0DEUx(wV*MNf?;OZ hFznIZ3?3-d1Xkj@5OZnp=6|a?l7mpwMIa$@{}-aOh7tjnR<507GV~GU@>V_B&At6Sb30Xdhd!}k@*v`NZEP#`FKHWA3?ex zK5iZm2RA=RAH=2%;@||aNr8AcdAayOY)T+55Su!PmxqUsJyG8q281k-Z2X5Dnujkr z@Q(^0Yj0-m`PTtIh?DEzXjyv;2M~tvk;pXRLw=gvoVCUuHGv+qs<+0$h0RNvJ1pkjN z$y@J8m`($=hCc*3AxE^qP>1{u2w8N-Y(@T9q8X&+hpG^>2XaG2jVZx_(uN=?B^w-Z zC96Q|<0JnkTnCdc$il*ZLp8QEcL8y-Cr?3ByyHj~g9RC>fq1w&59 zWwOsPJTO17KX!s8=xS;YPB1nU4HXLK30*4L2?mFN^KZ%icMK8^_O9mkt}Y-BuH>_3 z`giQfgx_cZj{g)D3bp1P3c7L-I|F4wzuax~eiT}K+dpVl_PyYWhG`BbV7Y+y4|BfqJ78V5Xati!k z>7VG&+8?)I`Q2$Ay96>8(I;W6;6~)~PcyL2U|1zX!F?(?e1l2tZdA_^qA=<82us;Cw^t&eW`IDclkim4!t?V<^W+Uvc zU5Xj7YLR?99k|NSv+|E&>(dyf$g7Ldub*!demsG|I$Z2C`R%_fJyCw46*hdh$Tkv| zITI54bwIO-zR|Wat?WU9;};ulRUrN3-cPg?3RNd8xovEBZEd+=0Auwo31ZN1eSz+89kqr|yU7x-@?Rvyqc^hYWi*}5vpcP!W zwREsvyD#_Y^WNAl>f+=~DO`r$`RU_qU*0@|S5JR`eZe;Dt0|s^_=OXowgn)p7sdlZ zvm1^vr-VhjLcK5R(+zlpv!R{~mM&hl+Z$WKzt>qN9&eLr&r3=cXVKbRDliW8RZ;HT z*CvkHGY`%%>VE!E+}%2Wb26)9P?4SYZ_!!%YV|2*PuJglU7K z2pu62VSK>sM=?mxQ)23ApW*e^6;^i~A6{c;9dIBx%~)y;k_h#AMP%J<4YFGeZWW(L zb2rsDc9#5IzN_VIJu3?}>j{IY-??}daL{e7fbU>3hSsqy(=VMI5E?$g*To97S6i>g zO;^}0g~2=>!LMzu7!V|!;4j$^KR~Fz|3W-az1HoA+vyJ&RE)-Vn-dXzoLEcFKwffh zg!&RQi_GTS6ghI{!|p#jh}B=UWYTHG1${8CN*(Mi8h{lpo%gbYq=YpU^zIj{H$JM- z;1(USRnE*R0m~MlYq68~Srh?)a*zF$rTbhK3mjaG#U?}YO}H2wP~$XWF6Y2-pE?s2 zu#LuMRl_3l-sxWY9!9%AC(@W&&v{dl5Tv&(24~(V-yctjkO%N zh1a)};0*mPnjpTd+LIbKTO}tFL$~nj|%?r9ph=wkWL1p*Iu9*=~lYCl5LLrw7 z#)ufjLbZ*r-mGubrQ@ESnVE+VrlqnP9M^U2@bG4ft|pH^*3>it3&HpCp9%P5%y!cH z8VYFsK;JEbQ!Zvg@U8(`lx`kB6r0yu(jZE3T5gKT*%0VY<&X}Au6s1)*IIjSQrYfB z?apGJIejfrW_=_cij_zYkN6<#Lykj_v>6nj!Rdf=#&nhzdH`Q6@>m0s8B?3Q+HHBT z!(>*lMZu}hyGNu^Vd%z;$Ay$(ex-YgmQVx{u-TH_1`S!)C zF-@UkQoV$8RqrsoqS{xfa872F{&2B+<{9n?1ir$OWZpebm$3UNuuHuK!DYH9XOnu~ zdwfZwU<55&l%xT#xd0e6kcz(_Q-Wza-H1)$#t#b<&bF-&U*Mo|4g`Ntb+Xq{8B>^{ zk~Xe5M!&tEsucnXXy_WqD;z+H&^bJqb>}1G*X@*C@G3}eRedgUFk%NmFQjP()9r2V z%$?NFqK84V&GN!FW$U)qA+sI?sw{Mrp`-Bv;88MQP<$o#OzKtemW zAr(ATv0JD-xCOlo1wM-DMx0JEvQ};dAMH!f{fRFn@uW7^W>>S^2lIVMk}*p}(Y+^^ z%CjJ9#IXi;kNUz*__WR=y7MB+2i1vL@CKnsd+?%O`{EywaS*+Dqfic5Zk<=)48c9-#71`l$Hpbh$0GePHxY?%`h?E6(A|hp8C!mRkx5%q z*`owkBs2w;Ye2R<;hERNYxC>&6d>`VA$JJB+0g+8qPm?F-UuWX#1NykCOGQ%u^A6m zywutw+ysN<=c)-fl&y?ZhP(STU&7nfHQ3k39b8V=E2$YyvPKSy6P%O=VbrD=${%vMVx&qsN_kBC- zmn6VQFRz?R8U3ljiM|ky#So^tH_tYc_Ddo~P~PQdH7$2ip-r&_bAhG)0NBBsQ6_=3 zbNj>7gL3S%C#}Rxi%G6|VJMQyOt>(7=Efxi8z@iwbmgvrXvwVrTPOw3ieZH752ePi zj|INUl4au0?-D_-qiet}G9MO!diGQ49(%yrkFhYX^qedke=hxu z9I=QKu|FkN4rj}&w9IF{Vw1L4Lr`>3e@KoBd)#nSieq+72=>GiuB688<;KM|xkCu* zH!|)Dn!0V1a;2w2sbsT2$%NwQp}e2znOIPneoWj*C(~Rz8l4h%AMbyshCU%b)Y}8N zDozem9Zvc2auW)hO3|^$es6uPR|xDRJFn)3UB;66O2b>)4q?)u12^V6)$ru@ z*5f{+7?#~eigYaGcT#gzc4=~YaAP^6xwSSo^rE?3wXruz)wQEL&`#Caf!|sY)*#gt zN4&>~IH8(6^rLjK7RSDSSLO)@c7-fo0!;@_)|n9T!*akEo;YV^rP_9x3eaZ&J+?5= z6deYh1$AnGS0qHNBe%g4-964SNRm^LEn;jlcZ>URHBnQ2MSqs0A!e1bmH8J@68n}r zEYjyoHaZ!1`QRaKYrVYGuz2_~CuHFxIp+h7s38!l1Oc)*>;ZK&rsbDCwLSfVb96zI zU|>xn3Bnj3X3*Ho>vk~u95Mya90?ECVB>IK3PydBcQ|&%P33=L2g?Lu3Xm`_X4lrM z2Tbg&4t8fk#tZBiu@WQPyC?_M=oubhMT8?rw`^?jwE}PFm`n_EdSfap433x}WPZ|( zGy((V2cg`-2r49wasAwXqHKTRi;OtHh3^EOgnuNWj)wXSn_Gk^`>X>PCD(E-a2E~_ z8<{+z!PKh4;+w%FU`d7I9Y_*Awv!K{lYnUrYPgB@MR9j&RR3-ngJ0~p@5?Ajvucts z&=zGKic@c3mRK=lm`@JMG@|U*nAhS-AMYzAUH4pGkxbrcVWqOEHbh2aViR^@sIznq z;N3&@qeRi~#}h`p3Mv5aW9~o1m!Rg?Ka1PmO9f`xnQ6#SKchH5PT8!SO}z6puAY6^U~TuG}q2p+u3_hmpx55QrD(X6pSz>SU7q}O3Sn^ z)S7)@49kAI}{itZwr?>YN@3FOB#^E0Zc2T{{^- ztuIq2lW3sI#MpT_haGt_luD|Rvb*1Y)sIUvbDrr#kgNpv`l+Mnpr<6+;3hnX8sBwG zQ_w)YVJ9rBmxRiNx~fnFoDo|jwV?CMRm)O=B`J3LwZIMfNA%p&0y0$=3C^z7a$4F- z11?E+8lzLT&%1=b^$$KF4Eg}Tbwig>XD}lgX)>{wX)-9I&wM`^S1u&UWf@n+a*o8G zdpgeSf+p|?TE(o{sltQ)CL0NVV~)9ud(h~v*h1-|96cUsf(#NAmkCjn}JQE%fnI3!DJHVVC%QKpY+ zrk$74C-Za77`ZgbO4TS*=w;##9hLRh)}v-;BWAJpMgo&h7qzW5-JV6ldcp(lgSL)c z-)h8Z-m@X(mZ`B(gtE)0b$6c_wXSaOzO${RPA6lj?yDM)BTWJ~6CB6~sf0vOB*>5~ z@#Y=Se?%1nCQ?;`&;>_2&5(wuvxy%+rb#}_!E@Wqkn+FznRc#c8JRNN!C0BVcsM~H zmw`=U?te>Oi<#Y3+c@9v?$1nx&ejKFZq7r6*Z!!jIQS)*SBpuizs}tvN1spGygf=> z>UUTIgHOT8u6(wlYki-?4N>tQFEXFwu0*p@T)5VwL=smA@_oWTu*`>aFMP{+MuYZ2m?vzVSfPqa7u-pLs9 zS$Sxf1Vp)ZdGrf-LJ_QAL%L0e=?!=wnm(1>FPw=yazo#TTM2worGjAd8LaUTo4;3U zh1E6b;XToejeV4^n>`#dPXBRO{H-tgsIS>m}Dm{ zQaaGW5%ju8_{$9smk~ww&F}PV*FWaKV@vn-sn}DOT0FdTJd&<>+ukpUek)*573aj_ z{V|v9|G8^(h)$ONZl!oV6k=oDhEq2{YNzO~kJXZ0O$NKQtHmCqlwDzckiIS$OsL}I z0MN>5Z%O0XrAB`;5=J*P3&A)kM9G=i!NnlLEiPVYlsBn5$DysS`!qzm5`i%=#o>X7 z&GVj;`dl$d7DPtV=5@J_WYw)5jzqS*@{3B%V^z6DkRPE1^Q8zwvRtyNs)%-cLvrNv zfcJs06sXdRMx6QMo;2OVDu={EDivDpC7=_p?q-rXAEnpTr0<|dbU$XM|sqdA#q?`om2M zZuL7guH3hDZ%|E)yr)}H7$bqCy}bEbYeIDw$;M0$(Ky~{K7iX< z*aO~lt8&oZFs;dIyktKEt?;NS>bta?K=k#wVdOLM_z$CPM^wHUAv|s+;6ryCoBLwn zz${H-gCg%o)s9JNyu(|CMMb7;6+Z^SU^M-4Y3_KbhXtlTIGTSXXXqOQIYr2Dg)KqY zh&3~7j4q_6Rb}FyI#2p)`UTj!^1!ERL;k?=z!(fazI#r4oA1{LaAg!LHi-BMlp}3X zY`Efdzm~aFbeoV-{_vCQ4XN}lv8WD+7{9?Yu_;HiWhj^oEidhAN(>Gym5t9*QyvgA z^io^vNeUz;vVvZ~anwgBxb*jy5; zHqWy57`IfaB1^wkmWgO~99cqD^~?}LWh)e7emn7AUjexZ(o#E(p7la$HmP2XK&op} z*!M=_a6e~|M+LAx&PHc3sW>)q6sPqat)Mh;L1BFPo;s^;76AvMeR^Jqf}U9e~P9-2Mk`HF-*+l_5YGf|7FnrH_gQTmudS) zadGi;3;g9G|4X)U{ADY7|8K&n+sija>XO}HOzstT*9r+8A4qauBQ(%<&0^W3Z|XpD*`VCpSzxKP)2{cq{05bH_AbOwt+iSI z;mL=u2VMmip@4#?*S!CCXGyONR0$8%s3n_Lv#ELe9CrXr>CK4&D=XnC{AQhk&CJepwFV`asBturNF$=)OPzDKUx6S zE0XLXwd~i>x+cj>I8>T0)bH|^W^dioxDH`#qYR9`4#c+8>!~`vtg+#C%Fqp>BDB8V z)5m7^ylt83Ki=E>`A?5ErD0y9Hj<#AclnP_uMF({$f9q@`4%ic{vh=3s2c=N1LBkm zfvC9EKmytjRL2;9Ev$NZn#h%Kuk?N}^K!fU!R)iKK+zqqZXX-XmcaUS==Pp1oWXVG<{r0)2+`5SD`OJLtH_gnrl4>UUQ5*rESAG3fz0|i^2NH zGkBn<5^(!<@O4_^+$4WG%Cll$5Vl7eI5x@`L%sbR?1&Zjav6}@W>+ta^`aBm<>xz8 zpsz{AU4+&GYIc5ce73cBxqiYhYk7OOBNDTn;?l=K2JLuSWmpKan{H$%8B_cyHiWq3 zkM-*sE2JDVZ%+_4Ac6}`(opC_kr4EoIB6Va@VOwwO?Vyi3#Om|Mx2AVe{zH-5JN~fOdV)>WiDv z#gH(jut^>@U8+p_e2@v5sS^|iuw;VS`>xd_zsYUUIL9AOZ3%&0${Ve0bOOsT{%R%I zji16D%0&xaf#+JB_|chheBGIN9U=w<=!KZ-PmoLMk_N1V>SN4Ex(AG21_!d4f!HQ6 z=J0}E;lN}c>wXP$gcsVq*}c4>&3No07=z~w#}T04kfT77 zog>H~abkXCJ;gxm0tS=A2WJ^LJiM9FHUm9H$gVQBya*a%_=y+S3_|=6DXh*lLzLOc zvj-oCx&!wu8$^xbq?amyVVlh*Bi|lWT`WT`%>42p>qqO0N>LG39lG{p9J8;K?UIN2L|<2Uh( z-krNqeh+=UvNuh=vWNU8_yy7@-?0{0!++M?FY--u%G_53?YIAmrPFAX{}JCp?6~0U26~=s950>(X3Wu$z_B* z9cjxjrxh=d+nQafSmY=8M8Irr{n0_jmn7uRb-nr>YC2LE>mTsv70+B@1pDE(35%mm zaxVgPXTuAx8LU7+v+c}jYJ^au|HQkG@Dx=ce1jdLTEzP@s`6wd90hzg=6}PvoZx zPZ&edIZezlQ`mhzwH&%02w^XQjvEH#M~6X7Crldw>4#pwP#0~);)I?!5kmgJuF@rh z^M(fhEdKm+zj2(U>BZq7bBCai|2D1J*6h& zSZ=uOQ^Hds>~Z5vbMY!y)bUb^q1Ps9Ua`Q4fQ-tyK zba#EE?da3m1SX=yGl}%3A;@d+B~36GPGFAV_&|C9-Bw@+shtw!N(NPXr7z*S9@El| zx*d2FeXZixUK{NFMesGt-z2ZgFa<{#;Xzg;;4Umce(2TOl}b2bMpm@X#gnZX#J6Q^ z&XkfkjoI?$oLvFelK{d^Z^|^P6M_sotkFjn(S!*^IzkyUL2wWOOR7z|<0tFEwP%CA zz*hjtg}}tNmtadaBp&`(7&OJv;-7HnCz6~`X;(%x2w9Pq5QTf1H?o9j)?wkC;9JRr zG;ufteRl36v>64_e($_wC8lk^eT$!{NN-B{0KyL1C>F1JKbLD9%u6hsqN6v%P3Q)UJeZ&d?`C?|W>pT;AvD`jApp&k zj(r_)q%-b)@4Pf!2u&8{>@`V~Cm;a;{O@PiH^=TJmr)Pc=}9iiJy&7E4V@l-n!WPM z3$WH+`%crmr&4WXvJK4QxWmoE8(rVGD`ZGCBHME>d9)=_9#yrla#!p=8b#jcvtgmV zuk~03083wF`MbwG6LN^I+s1$vAAn-b_Zt8$HPm8poJ!NNk3X^#znp=(;%3H7woFd+ zh+=m?eQtRJ*NCPwPVf2;&wr0hta^^*L2RXbv4o9j#F>8IgLUCCdM44%ILK6g>OAh9! zcP%ss#2`FBIGi7P#J+ob6B2ezzlZyI9AMd_$&cFp^n6NY2~{i#%x&t1I4pI15Y1z_ z`Ar&YsqM74w=aVheRccJDUbS>kW}Eqa%R5ae*nut%D40x{?9!8{-qCi5k7` zsZgmX7fSXxtC7nx+OdVZP-nJVli#u5ZD^N_zsBpQR}IVAp+BS@936)pp&dgVG#z*y zUtM)w?+(akP8Pb$gbJP*w#Gy9Q!r^h$*N%EwQ?$qsfIC`ricUPIiHHSnV3?2Ql3)7 za~io;iFOrSJ8}ndxN->u`Gi{O<|Oq^KALEmn3*UPQE{+vP&t{H*cI_b(rPE?usW)y z51G43wH?>;Yc?{?r=BsovN+c`*SOcX>7R7;yOkeNYNp%N9U(2c)N1NGw5@x*Oz9uo zwvBY!th${(oQ4AV6ZtEm6fQaB(;{NZ^mNjB^uipck{6LlIBLr3xoy{jVK~{!u#bB; z1UGN<8w3X^u=gZi4sg?ssD;Lkb}XA$QLNhUs0o`_zcjD%%&o=L8}Y{KkMcHTFV!5g z-Xb0upEXRI=XWWuvr#NE?=N_{^7MXb-$we1Er=J5UAqL}!m*^srA??w+oW|OvUu?6 z*pD!zWp4>@v|GB3!MI@%n1A&#d(8oHnPZF`c28=)`TGtOVzcA=la-vltl3=5Uz zmIEcOdGr7*O2Wj&WOFl0>yx8FXR3H@AWU>Ee@|KJGTsr1IaF!}Y9G?pgYkrZb8_bM z?aXmQ*U!Q6dynp8%eBaY2c7@MOlzZq+s3Y6tlz*pa1H$ZPQs(c+j%Nt_M7+X){k)D z@#uD7qW+p6LK|`u&h}z6GD6_j^?bgr%!OfibphPQJ3Jng#h*S}!Su=8IW5QAQLsE9 zay`okP!{{G zd^)vaechpo#`D@}zr7@MFUS!+0i1JP!|EWI?Ap|_=RERq*$(YLpxo!qoU#pDfxDC< zSdW1(ZJ#;OhodGcX(=B=r>JfpF&@ZRqQPt($f+ zKiD>zv=9iysF*{B2)h+$kf!Qkbqer)6~O%BC`<3(cjS=1bH0bsi*kD9Uqxavh6AdD zCN|~oiEk6KN2fxtBiY{%ARSQYj z>hIX!llz125`4z|cKjQ2g+I6_AdE@u7+)gpF;gr{b$sM|Mno9(ZVWu8jZYdG-7X1- zO*Co@Kc=A^loOXFBn|6La>N$9O$6vZQb)emOr{&lA5|+!7go%p*!9uiJzKKo=+_;d$xMJ&mO8U7BD+iMHkofs zcgOA${g{S&g@T*9GTOpNMe69R%24=OBVzFy?v$`uM`E$C*^k6h zjk4(9RE1*8IJnb>XFan%kf|4Hs?jM@w^PT{CT?+Oc2s1S&8TNd&s)Rm%P)>;2&DK^ z>C@^{>NC_N{*)ytwJ9YjnF8im=1$sN)|o^=BJ?86Cn@f-@p3*Tbfx8``=#lni%vB? z3}gxG>EM)CMn76VO0wjh#8F(GTmDlnpCyCUavXY0(v~F=$4|A-4OR!qHZb?xee2n>& z;5P`i2wyf_raM-15Ahp*9gb_9UM4t}zr}mXA~OXnH|n>KDUS1x4Ucti$sSTjjW?Po zmg0};Zn=csbrwX|0(1}O^uqAdzfUSi+y`771%#(IbY;~_4?l%+D`wG5rF~CkEG3## zxQ`<(?S2+PS%n^PG+#$qOn%>%hMQcvBe=?D51J?le@-N&0xadVm^x#=$$p+AJ@h?< zuFR^jAhRXvOky$DY}mC$)1CO)aAOP8nWaBzZ$`4rbF8zP2-M;DT-w<(n07K1(To&K6{0z*0H=RChnu zW6f{6`);lGC6Ek54nyR5RB5BO_H->CeCs)#*Zq{|30x{G-+*5xo2EtHAkl2y&*NX~ zx?lJCZU?d*x?jErdc0k(KT(hhUvJ4!Mp{Xg2PrKL>#h#L24HKf5*I-Zd*QB;_X zkIqL+!$R6y=S$Q|YEH_5Iw{hHQv=7WyRN#Kq^DK|4>kWNV8Tm;=AAo9EeC4iuB?{6 zXt(j5%;k16!DplUVIp;s5Jj9H^x3@p@;-&Qzbt3`Q0)6dxs$iL(lm&!fiYAiBegPx zmZ4`+J}HHq(^7|Kjzm7G3U`A;Od0>0Rt$mi#I-2k(-#MNRo@b6hMQCtJ1s*UwV<^V z&TLw0#q+F2fYUgAET)8rA)lqvoS-3=DyX1kj-!*FFi3^LP~nqoC7WPnh|OlQUg@`+ zo+#_oQkn)O%g-8R@d_34S^L2y7d;86{m?jr?BF%nd;`>C?#tyqcympQv&XMK)$Db& z1E2EM4SmE)m8)ugn}%3yw$XrBXk?D*qL;juLdJO{0A174o6=k9P=due!{U4I^3UjY zZkLvy@oobf7d~q=^`M6L5XWkPCHiL%Qa`7HooUc3&oj@pE3d9-Q!lf!qH*&%tbIQX zJVt++;o&&)WA|%sVxk>xqH`oPkxRYdVoJ{GDOxJ2`jynCk5%vgczJ3SkL~wGUSp5d zrsB(zKG07|Y>Y%K=ZUZaOS2^K8SFRH7-EycjRXTJUtp+^HOKyLjSG0n^gni}j)ob2 zhAs8IIY^YwcM_lSJ|Z-j-t}uliH|ty9au0(c0E2)v-$FkICtWa%rR+X%&5Zn3F>5G z^t`nE3UgNZLw-@*wxpM&#@-ycAZO)J6-fL_41oL9((*-tH*4c?2Yr2JJJ3t(ye3Pd zG7T?-U_Z-0?R~y1xs8%sVo@Ds`z&Oj|d(Iyg1E}W2sNnCB`lsc|h|O zX8K1COr|Blj-Ve-f*qgcuc8#^V8>Xs#7jVvi`BtiX~lcA7lZnjgoL#$qZ+6U;r7M#al!}(!dpV* zPP80~ze*Lcv(P2-so6|MUhAo7vvD@ky>#cgf9W52~&3+z6g2*bC2Kpi$}ywzjn z*lX&k#FZm;(%y%e!0M1wXU3KH2LV0dkl%R;E*%Y`{2&U-eH57%E#`ZHfXyKn(N=W7 zZ1T<5QU{~VZB3r%jTFDE2+pF+?UGtD;KbSAz(y~(MQL0A$XfskIrN=%BOl!K>V!qME@eL-UO#R zSv|=iu@Yx}n-FrKuMgLitf;R{A1C69v$oSf;Hj6#Jni=>j){(GA1v#{pe+~Zod3zT zbTJ@icmPhGZ?g#sH)=L`Q*23c(X9d6&`I=r;`&d1FJ)}zkLpvW~BNd5)HNc48X6k=- z3co`7NzyHWX#D#pRWs43V}gGY_^ZeyzKH*O@mKy@SPA8z51T>`N&UOj+icm!Z;So` z^0EPz0U4s+&v0yT%6Vak^2d7>$bWPsBnr8|Ivus3Y7q|2Ur_t#S|mv)_ZP|&4sZik z0{duQp9-Gq^d;D+QsGzi4jGeZ&*koQHu28$`qBXiYv3E=9nv#Go8G2;!G}%rO?<*9 zoHjC2__%K2kWU60R0+I`#e;{v98dFFsOL#NKw>Lx!cyrU!WH zE(9P~>yf2%C`m4#8~g6+Y$wRTJJ*DSlc^L4-DM;2v2^4&`MxJcVP7sMI@df;yq}$m zYrBk~_b3jd+?5&VmDrd7;)j?{g$vrDG^1?8BrRekdtefY)Jazcm4NuMaKW&W7mN1B zh49nl#FD2+{$Ruy-<%Znl$FKQq~MBjCcvDrXr%Mgo5zr;Q_o>*RP$#KO z{ev-{$inVYxRp;j6@{vs<~Y0LzDH@-DLwl8roR5R0|m6{J}(#H?gMwz%`k)w#>DW|yNpB!tdCy+3!v0mk|PS$-7FbYR<7GhU@(vSD@0sf1seN!jrULjP?esG6<~4x~&b zf^(96`e%y+{IwuF#Cz^$sryrfqjOaGfV0l zOdRsg-I9wz0uA(Cu8}8c^OU;-n0Y@qqFu1x&_ik`VtW~~ji0L>Mqp;D2zB4r+L+zD z6@A#+Rl-gL#K<0J)6aNI#cBA<(U9TLI98^IRGHY@N&W|O3c2AYd&Yd#!xb#U-HB~U zP*2sW2T||KX(1(dMY9P_%B#Z0qil-wU*(~Mof{1n*p9Kb z>N?Uelq9(%j#Ep~r_l;J^C!?uP--FO9ZAZdzlXmv&o^_jW~PD%w51~&sGXBW3NIx~ z(9b1H-YXBkolgjXgI)LCt3A_tRVri|Z{9E5^D^7jKWR*UHT-0$BTp?%6r*XSFvgyaPiqQ5U(veOObGZ zr{R0mh3d~VC*gv-7`I=VgQw;nT7rq{uG0PyeV{#gn{` z$1Nh>_DjlkEkI~V9yg;OP(<3PevC9FZjx-H_OmQVNz+opG7#N{q7#FO?Czqbndu3P z^o#2kJZhks4<0eekKfyC%XkEA@TWVH($b%$Nw97oOB==lW)aX<%5Hwqb~sQtM(Aw7 zGdBIonB@R16h=l2qf-WzbP@c9UBz&kj3uB)Bqf3ZOo2^AhwTjiNNYg@xRb9aO~*;9*AtusUm6H*^wm95QNiDLj0Fv(r+>yagYX3v~eecWJ4&G;q?^lQJB$u-&ywG zIBIS>IeQr)v>2kW^~G28%e0~4_u=m3!x`L*KcZGaw+6lY89>BlJ1>j70O`DcA^YP9 zvx*432LROPh+^r!-6caxTnVSm!w(cC13zq&pYtwM+t6>Z^b1&LB1ZBhwUoJ_&)~uE zeCqvc=a_#|#Jra!9UP! zu@M*mk#q5~~M9HEQO~)&x5Y2M)M01~9rpu68%NkA2;HeS3Q89ep;5x^zU9 z6n2m z>3KZY&}&}Qfn|;27P5LY--E>NdW<~D71EG_bD~I4!wxBDyq?UfEK74<59J!|N!q$G z5F!$As{~E(tAgw_bYxKZa2>>Xn=ipQDpiuXs$fqSUK#JN2pGY;!wL02nzz2yzeyU& zZ3=a%jP$iTB2`PSth4wlU0Cu%y@de%cY|`$iSQTJkpnw6UrHif8SM)Bwtn0jTSvL( zUUQT54>yG#yWIx*rgn)Le!aU}+wbiM{(7A1+YaR8a1Uw#_)-d`*aj-v2=anj8}9D5 zB<@2-Fm6TDCMgMi)7zP`v?mHHA+vxf1J4aidJ`1eh2Iz0KpskXTwW8M%4O2X>Bm4khYCjuqPbLqvgUqyKl5X1f^0^ zkas+-c}b1}?S3LkM7o~R&xE5fd}osOXyG8PJ^>YxKY3!&hp0O8-L5?P5`1%O_uc6| z3LB+ov!)g8igHPuC3@CL_ke&B&i6un-=yDy&+C-kl-(?3so?au!bMx=K(~7NC6;6V zmh5hZd?vQxv(BCJ9fJqOcIDG~Rs9%NrSzK4kFEN5kp|@ym31?9^S#Ay>4l+#iLMKJ zM32EH#IZK)fu{vnwlo4^co&|fi2a$C1r!#WRrdtAvi{fd><%((?*VIWl>H<@jH?R* zY%_`&(DZ;DS?c-12)YD;fFwqf<)OA?z(RtRd+!2uuXye`$f8*B8mS{bCVZ_q;d_}b z4Jdyr?T%>pX~>aE`{S=oG3S^V%Jd}YM(p)M)j8BgVq_Q(Rp}TrYytF&cY4ErwjdjC znSw-U;hQLLWXusSSiqGU#%c(;A0n6*qi)S-Z^NS#)c+I{D#9B7IFq zI7OQyM%IO72*DT(=!pzd7?tspDLhTBkI;}+hQMOOu(VmIam2?jTFixY>&5+Ci8^|a z)gu|8r~dLvN(r6|MA)HE{RXz>rK285}Q99Hsu4F{AWg-$PO!+oBetA zkm?`Nf@DFxh2LPr>l7wYXv}zTZ(wo|ZulV|D>i#WaXnGFe&Et48c6ui;V{L}MedNN zeC@|0M!jIyc}-Cny;eqwDTIlZ>|esFFMz~ zP(rVF0n!zq+$V~@TeU!aU&hh?H<1JXVZ8HhY~McN-}05f{{FYeL9q6ajt0@%pi*h%v&9$Kk3_(wB;shP9T>f{P#X0ewhDb0ROSx zBtATpx0pY}BVDum^U?U{_JremLBuTlkrcf5=rHYO#{3yY#8!={!6pbkVK+~ZR|vyT z2Nb$H0&%RlpJ3meJ${MYLRMc)qnYxL-G1b+D4NJeyZKvf?u9K zZk6{lR}^i;YBD=V!uNf1K6%KN$Sqq`Xmi>fIbX(GVuGOCN1d_sB)jK)+xzSLuG?{) zyS-W6G6WOr(*-xlP@j%X3y8J4rW{&RCgp_J99D@b)l^IQ&dL)ObxF zk1NJ_m>!Whd4SFi!b}z*&lZU$3KHoze(2wIkr-XT#Qq)GydQp@pssA9l_rb)Kj~{^K&jxBZ@tRF_TUFlW6D7HYP@Uqi{a7=pL5}se$HiBP*Ka%6NygUwr<#U;^g4ov^MJpX@L4h?vPSpp2p-3EG=amn<+t6%XAB0&3DmK{1D2kW zrI!igbp@-r6$U%IfV47A==nz(su{Gkj#ZJWVc&O(4UfRXd+{p7bj-@Vz_XaH+>*J0 zODa0rm%ifsx6h*#9FGzp{4_5;K`-S&@=n(l1^m&oac<@XJX})L1`UEZBIvM+lw5BJ zfmG{k&a~oAr1RkZ;NoEGU|m%Db`7P3TjWHp)2H;YpEMhm1w^V8J=IdM_|6k7Sl)&G zMenIx1-k)O28OJnA?dhLkMg!;-sE6v&&MdTEBid>*$L&1Z!U)+D0FcLDRqm#V1QuaE&v;O-vPIP7K!{wn2p2 zDym<;$f1F=T~s-V-&HOf0M3r|zik!}+qxTNFS zem?0g$qH8)y-_`c$2HIp5kXiURghE)NRMTcd2rD2(#oS;*{p=9R} zjJ$|w8hY95i8U8iSKFNA#O^w=(%9=9YQ5PlihdTx9E8jKW`R9s<+hP9p4&*8>Pw!Zn55zQ zP0Yqd@BXuXUj4J|#Ml2-+FQlu5p-R;i7{r!%*@OjJ7#8PwqG+dbj-}m7&~TWW@ct) zW;>?IH*<6}zeeYbMkDn_NhQ@qx4KGu?N#fk(&*^+N@{QVe2)4k_{e!1dMC`(Z%8WI z;%zWp3y9h9;LUlJ$@Hq=c#_R7ucpyWDxs0dy1~fYj1e=I?k$Dj5gToa3>%N(7pJMo zMAby`iI~A6F0wvkeS*k%=pn}9r_k22kIlGV2@V8)jyHIJ$O$`po93l5$&X5Rs+-d< z66e$m4@y6K2jI9w6%8`E47Q%q6gxp6A;?S{_T5>#Cu8yGs+|cawp&f^eV~4rw=z)9 zyc@2Nr_5LCcI5g_EZ&t#z%s`gp!kqBJT{zjHJxmF5p+4>PBvj3W%!@*fb9-%cy)(& zGOz+ModfPx)3S>fx9>$hgFb>kn4yFr(j=x9lhTSPtdTrro;1cIic;vV(l_hg{(KTs zQ8AD)%%bAv5T3vwP9TMO)7(Z$P{&s_#0HZ11=xh1h{ceTag~!aQ=~$z^?m!CR(H1M z$0nd~?Qj>}ct@F2$&CAL^>znTs;PL{YG8flVzY*&T`8)}0|e{;a$6C{2C+1qrOW!54P>=8OgRC_6rhM#8#B6}OeSKnFpPC=@Q zzQ3WGV|{jP41XU?5-eh(PqSZV?+y?9nMoe_#Xxu&ROy@&MXKL4!zPCoktMooY^e%} zyW{-9=!5o}mg@1IHsI&Gd}<`Q;jX7~=(iY9MufqiOt?ddbYIVRk(3Je3PcY+^4pKx z{fa!vY1uI;22ec4>7U;NE~cbm?yp!da}QpHV^%{`l5q^#K7xkC#B+6a9d|Ql{QVlk z*ImcJ?JNT7NWQe1TOR+U9{8ZvD!b7cO*^1;^=OIQWQ$|x;WG1th0WY&2(A*-LB(6 zpf#z5wo>Z{=?nMSvBd%5;|~T|6ss+PsD;oU3>o_#mdOKEVa0~YxykEg{%Qv-L1Z&z z)FJT2edI`KKPe?tiB-R=R;~aRbu+30!5>%zJRBhR`Q`bYv;#qM#e)*^v?2O;jyd+` z;#7LULuY9PDf>iv7TRH>2%|?6g3p-nXG}u^JQ|_GHETr&%T}BZNe1+z!?Lk%ENohi zq81i;&gfXQXn$GF_4PxEjvrbn*=DXo%O0c5V&X&nIvLWA%wHeeWtafI6>N+w)>N;~ zh1!^h)P~RDpRE_4jjBH|2uFzW46(Yf{uFQ@BP{x1F=mMmD^FFC{B@vz zt$g^rOX#Gv#L%g0?C}CbHAnvpLzt`B&IUF8xIb=y4o&OlYj3&wXA#g-X+BCf@Eibd z+VU9c$Z9H(tQ<;EMnAlyI=(dtM25zm$TG#39}Mvo@g))Kz$2%e8=VeTNJGXU_>P0Y zAgX}UD~ri^5=EgbQb-NkZS9B~NkGmG6vshC_J}7${vghtlrg2+aJ*d1k|vBB4bpi? zAC+d5SCic0Nm|(HMqpT|6U9*-D1f6X;f@MLS|Pr|TkO=Lh@CW(wn9(-U0jmQE0PJ= z3Uf+*?S}?`(#hM2{6S>AWuYU`B2-jnlCw^GmC_Yw8UpB$RweS*2#J|qmqr~yf`O-R zyF6p2gN&_~j6ANUOt}_ezieRWOi-Jg6G#Z3SmMFe6_YDrnvSI0BD_4l6{2WaQZ=NI zX`)f{+6327*r={H)_XZWon6)Om3qzZO0VU(a?xYW*3QvX@08TX%sE?Rr#8Ql&{d&C z)oza5GXpn#8!l=0a&Zr-n>aY>D7ctMa?E+$!sVCM3R9Wi+4BmqqvnLi_=uTnE}yx| zt)fZU-5vT`HyE0DgL2SXzbN$|TF~t-v!Xo62`-|^GYb(5IZFA4n~!V0tWet(dS{XZ z6>=%5{g#N$FWfJLP|}ccJ}nLcA5y*`9Y3XE><3<*p^9d&ZZ5UmS@0@I$ReV5{4vk) zFILif=BP)$xryEOmB`{L+9;CnDMR($nG{wM%?5A2a2_OXxCjpEmS9!gP|po=z~2$d zQsHQ>#~uibt}p#ya#}1;b7G`gnbj9P+GH-b-wn=qOHAJ2pscfgpnvx{t~+~%70VIr z;Q(TE32otWj6qD*1Y*2z*R4X>RK`ApMbNq2TlRX++w=EUSJr@FUOINNo7 z&Qzdf-G_Ngja*W;de*hwbu)`Y zFOF-Gqwqh z@eitj_|(+YLV3A+lSWO3Xh4IYg{^|r$7cWZUYnG&_)2a)3#~p}$Et1BSNbpbNvDji zk^Hu*Z?C;2)>udzYP`pbeG{oBz4%aQEhR=+kf4c)k>+l4E-q@0v^S;VkxV@VGY0r7 zHdOH4oJQ^>7tl2M38e8zGDZ$$01XVo<>HQU}7FW z4q+ahq!EHj36j+C0XWTRl$wZ)k!=OBF`ur7Py705DnGSqx2r!wSU+r z?uvd)R;be71|+!}4=c!5N?(_?YUvQhgEM+X9#-eas(9x@cly$S>dAXq(i3!(XO1%% z-O|JZ9$)EIn|=Fq6?eDU!IYg~p}uANc{C<3SP7DCqF{g40fKK8LWp57R;AiH+u@;^ z`@r!VI-&Pt4{yl_q0o^U#}So-?X9!wLDN?ImV7O1(Uerwm>LPG(AdyKr|CQDlJzh? z=jQjML}YBv&&=DhN?;87ZOYnPt}4N92=B3#QuZq!NyeL{-S?E<`Ah#XtfyCckb_WBs_<^lYSFb>*~L( zJj>k?@V*}(RCWHH=hR=c8<5>ibr+5)7q?%7!>`;R14?1^a7hAXZPr1yY!;x|&>d2p zy!&`PCn=p#?ne)R?q^~&X(3Zju&-az!lc#=4Z2TK* zD(+KV1;Ls}9-NR}Jb3|wmbwI-@R)_iv0^D^@sxsDG?=mpgkJo0(SCF!ME1CUL4ReD zUa_w^pw5Z|)x-+UZA%oq#UCiguLgP_uvM$*RI}eB%Q~$WkKQk2GaXNef^*B1&c~mN zplB>gIJT)f)NS6QSUqVXe2mUobqKuQtwB7p81(T%oScIPoOm~PSSpb2F2W3sp?)uW-kp34qLv#he}X}R z^QaH&4u>hdr+@sPjHZ+?yvoHB`UHxRp5TP$_ILE>6Qmn9i>OHtmr<6DF9|txACHXY zs3YM>lC)-==n?Lvkq=8f}!HOb46chze zxWnz1u@|K4_b=q&Bbzrzbrr$6q1-nAGu3)nM9N}X`vh^lgJM7A3FxCnNAqEJ@2&b# zPpInSgS-z%AX%CHqSNndnKAM~U~oys_umI0l>ZE%Bomt+$B!5Mo@ou671_YBh?vA0 zKk{%mTmhl68ndvb%Z{VKG1Ey!Gl>BiB=pH-5~kFS$;=ChMk7IB9+@@plSSrZs1FH| zW7|fJGM&rrH5O^62<4_$`wfE)MeNB>J3g*I`3=0coV#zyy+@*l#Uu68qc#3HgqNRg zWBy)638Zy4nT-7KYx*&`%=|JQg=8$h4X(&?6K|S};2!o#qwjW)b$P8PkBkDCA@OH3 zvud8e$XqU_MTSm2>`NLbj`K*?ejy(x8KJ1#1WjY#dup0R@-7((F$PfcHdCcS6^J7Y zh@rs4rdu{i|sl4_On?(YPe50$n?dBS35V4jZ23> z$Je^M^-ZDiarM3k>?V>a9X!QNV-ivlx9O3`eL%l|dV6z14U4Sy?es(6_k3u&Gw9H> zOPuYe*psgqgi&gk&xVKwg=~L!NxT@lCWzl=4!(H`FBsQ9^`Mr-90UM)R&1Gy#8kiL zow(d<6KNGI`&jy6o02fSYl_b}vdX``#*3tjSFbGy6r?bsqKh9_F{%;JoZ+75ZsARn z99eP#Z)ksAKW8Ezxgl98XBzgu%;Ky1C*$aKnixmS5~vIG0rv+2Iu-G6-fL8f?p?{~8@e5Q;5WIY#aofcc z!+}%j#}p;GFGGTVf+UXAoD@ohTwwW4as2UpG&f`&#cadvraD85XV|PCEAdP~;d53} z%Q=d|=MwyvE_X+qsbZ5%F=eJLbROd;gygyu0^5Ll~AmF-taZWsNwoF#{N0@d6i#sX5S(aX7145O`pS_`kKK3TD|? zMXZDoLG{>xV?jFmDp+3u4?B-UI%=bSL4cvg{Ms8pYKU@*VGTD6f@0<%0euKCWLQ(< zOc0G?(1P$6&#cWY*aDceD{~*#85>J*qB^T(k?X&n(Kqr$nHe-?QGyB%^^?@z%VtXf zRjr3Asj?4Mi-j(liofc=RXYqs)-az4WHlX5^d1?tE1~><#aXaVBg&)CI-A`GDa}JiMgj zlGgC|5>(8$h6r zOC81;E@X&5z#gICi={-K-}wkED?jF%!H#z)i|$?_mhstWaKhB^=rKeNUf~VT{^3*9 zjzoz-mKRTh{3F~Jn;!k%KwRIe%G6Y`>i0GY(9;<+ zbG73t)KTvmM#P8Q<4pJ--F$t;!nr=Bk=D$vZ@1Zl(K%$kT9SS+eN;LU_Y}!}2M+1P zv(7IYX?Cl!ADKF14ZZ4?&Q!g@E~{_2K}#4gKsH9rk+^){i&9QlO&9a;t{3(s zW=%yXR41D?i2kbh##8zXR0W9;4#ZQpH&pHCPnikSim|vZWBc*e9=_65pi+1^ zdmPZS3A(0pVKFaj$yXekveEj`Ao9FHJk+;cqp@MjmQ1$v@Y>v-ERJ6J;7!I3tu+lu zn=Y%cz1se3f!8sHg;4Hp%X}59CubI^AFW}B%H8saKSNQN| zMIiOPj%Pv1^6}GD<;`*5k8hL}FstBNnBV;q!DiKD@p9TUNI+rKWg#J_{{g4(-hetW zom6m%)*w`XVW`>Z#vcCgc;!b~Eh<<`QEO<)+V}YNTuy%+O6yPn-Wag~5r5;7X8ktX9Ok9MjRe3W&hsM}u>HC7} zLjJ(e2pwPv|GpV1H#kJb{G{(`yPY}o#98Hvqhel=O@9`so*{Q9cxLg)zrvcMU&r-6 zZ{^`q!WBXvkVn}9T)q2!%(I^?OyU+_j)K3x&&HHC$Yn>LiR5(Y41p7r{rIO!f705> zYrUk9W_mVqsghQ?8rcFv9ghDhhcPr`8;omRN3?z6ua}Xq{7yBAL2l9Bfq2_b=b8$Y z4u=L-42}C%&R^k=04gj)vdHdN80bx}0Irn!ZTFSu4jFGG)YsP%+D&aG8SE>#k> zquQQk1-BkC`ZdR_q6qkk%e;T?b+tOz@S`*34M}ZL>4NKScyAzkozq?AW({3{`OVGK z9M1QnYuIG3PS@nyWtnx&(>S_fel}$=g5$?kbhS+8R~5ah&rvFHo0Z~$J%cUQlqUD1 zrA3b<=)^PxJyS;~WhY)WYSeUX?R0IcHQbDh#AH#IzQp*|$5o&&ae`ObjE(Vho84x# zkRovm{|65x93kM*_pi2!NIZ#x!&zNRJlW*1J5#cI4?HY#y)xRAJahbAfe}>Q>CmZk zWn-l?omhg_#IeYrbQ)XM_#GFMv&l0lIC`K#Hp=EgN>|v%%0`zLw!VRLv4Q#mp+Oe5 zkzPIhh&GB#O1i9RQ~Suq%F5P8*)roaN$b3;pivR$ApvZy&84e1t-KGO}^{ zk#cySuZ0JGV5OMm*ZOF>S@bBz=(t2YGt=swN+L7cKEI|FF=u7>u-M2rNWq}6Ia*$N=y{gDKzZE3XSrZq8If%>sI+%X$+MbncYqoh0h1>IZJGy@M6FuZ15Wush% z_10#0MOir(JRFc0)}z)n!k>&X$BnM?4`3%N5VG1spK6*N;+X8(FgR^i-Cd}dr^=k` zJi?x4Txe5KHIXzMu8s-~wEr*q2Xc{>iZ=$)qq^<6D&1TlB{WS0k z z@PII*SY{}VNfx_s$sZf__0jQ>(fOCV5SWGbh}HHnoLHOX#1dA@B-wNYKBw7v>d|?Y zgLO$%sp4@e)$ploJuRLV=h1N@{m7%&}i`RMpr?|HV$%yU@qAb&c5zt!GNt07_lKbj}Ze0bziZ@l!! zjJ2|l7*)ywR{^#Um_`fe1}|)MLEIU3{Y7n zHDcK5?!67R>QCwj(sG1h9n>~FJVdPzVewXwxR~Casu^m>p~@*+D(>xy@e|{TT+>_G z>Mj=rH-;K1uC&7&7a4En4>m&aBR>94_hJs_JdLTe)U5nNT%~zD?-%%kVnTqMJNqu# z*?p~_DV6@Ly+U>sUW>g0(n(v`2^h-vj9mB~wP|V@|2lBbP3Sor;Zg8mVuY7TT1r0a zp!+IGco(){sWtWaU>h_V0H)PA&C}~(xi(}UtjQXlL`UwT;wzZ06w6CPODRA(1j^~+_7WVQQOFcg~9ZN>g=G%U5Jsc2T3RI`I04j6`ZTzH> zlX|m@IH&s1c_8c5Sc{%zIwiDm&v2}gup5kQ>UXl)X|6c`PTGR>+o`-9yp%7D>>hU; zTN?P};rEh0^SW8CNXj2RyuRy=E}m00%X{duh_-uz(ABF~m9ZnXf;8u)a-bDUVHkmvSA%5r@I226g= z-c*h$rOnek_JzmorP)frQ31zj4~u>lgl}*J2OX-8#R+F`S0>b0JkjOSK;L|tgPF^O z$L)7L%qz7Idxbk}0zmWj5RJ=*;cchv0{ltc3NQfM^B7GJro|Heozl~Zhxm=%xyAN!!p}~CDQ@F~JqAwC+-Cb-}5s3|n zDfSF&JXLIXBuG9HH7iuBFtapKg}f>h_T1o(#%}USETY0YOm!(+C;U_mGXN5Dq#O;6 zY%1RQ;A@qX!L>vbp0P!o*~Q{lk3on2h7EK2s>vYcJzIxPhuYO{pLf?rMf5IxOPehJ z|Mkss{3mm(&Qns+EI|N*y6(Qp^eY%MSAup85jfYstpB`GOhE3xlL5pm9G#qrxc=)G z7*LayU;ByN@uWWf9Z~b@{7LFtSPYmVr0CDFuLlMwoDJ#AamIzWSG;Frn?b?JB&t+l z##iI#UTv?NWXFC1+eF_X{GEs(h+vW9H}4gc$rnQlw6oO+RomI%Ga%gd$Q~(<~ z37q(3V0kUzytafAX)oU|>x|GeTbCfGdwp4VdF!fI{6ToZKk!_cTeBA*nIeR?`?9p{ z>|1Tdkf@;Y!JJVGH6LHddA{x83w*7E9Aw6gw`S%%o4M-CPRPCM-H$$nUUIQ_* zLg=gKVa!5vcH#nB?M~sJ)mk6COhC}}-G0k8_G{{#`ulGh>Y6Y0x(Dfs(Dy9YGuO|5 z9ab>z3?C|jWA9ti&}4tDt#n@qxvI*HnXd8mPWBXbGP;6)7%|fxCJKUHUpHhpA5N~H z`SeAK>T9*b-ROF8G8|_XWx2|ujk>%Xlc|&z0>c2$r6$rxgg|MZ~(!HdyP=%1j@OOF&jm|_r zG>E_T#J8*DqV+*>CwUfkKPL3=Cpwmg9akPR4-}9`a#JHI^{0;Q&!^msh6dzc60yXD7voYxMkt9iVIKm$2VimXg-d3SLhT5i9FWa1^5vtDAMlY8gHfH3C`8DX zzHIE-=%`YQqI@uXS|1Yc5p@fbn45Jc+*3Sc#<`kxjY}tbY_d@$R)53>2Bprk@A>NI z+3`Q8%utd&c1n)m3juZ+Y;?8pu);hRmV|$GLxYDIMZ>w+XPU*)h|Li`1p7uruX0vC}o!((!p@=l0p*^_QIO;tr|n)B7o= zYyI=++KL@<>htjc=;Hl20}ek_Q5UVA3&#Bf>!KfVlc&sQ-j2_#+LrU=6z>0fmMd~b7|uzlXo zQ_i0cDQ$-hBk2@@uT@MniBCBF81p}gGuR-#r>r{`-18yf+h;5uhWI*I)gT- z{AAt$+_ec0xfuVufaG|TLj{gZ@l}!&>ZiV=`-QmUME#1*lst53@CjTVWGYBq2TavW z>G1f=d}7Tj^ExC=*m4CtQOG}rNVm`hW_7Tyx8n-O%B|1tmt6mAZK8TH)PPlY9x>Ij zqQ*E2aIf93y^;8XjX3fboie8PaJ@{Ig+jdwz@SPfVrYwFpiSsxd`BQXNjUv2~) zn=;nTf+6*wizx|~Lo0!{DBf?S?-aYmtk(uNdK(Zg_DZ=l&l6T`bBM`2&QOI7zFp{X zbR$C&UlxVrmr3fee~1^1Pdjn6H}eh`^t~=k1vLI}tnBTvTEGTjAmu+BAYwQk$UGuw+n1TM%9W3{$AB*8D+G zrI)Td%8bz<{|wp4n+BLtX8G%Cu1~Y11b|Oo$V~t7)8*gT4n?V=rbn-65Crv z!;Dto<&TZ{@lq@HsJW)Bze$a#kW@ruj=%Ho1#@(m5>rl3)YwgkLg&5)7>NXApj~VF z0Y(Cq47E#j1Hl~nwiBXIybv|G73YY_v}R|6WVWl+|Ngm|1(F)mQ8`u$RtZ8-NWJkv zr{NuszS2{kVXT>2LR~#}c9-Vf$L+YOlkhMv_bSD79j%a1Sx`&h&$ecEk}7pRF58@& zBu)BKfQKy+9yF)H(T~}%()NBdfJY z^SoS!=5$!pa2V<<^1uwfgBcXf-Q12@y#$-^s*wXlH?lCg&W)O4PZF8?-jxQV5Qn1- z+SYt#~@)vL0z(MEe@Zfe+ES|J=6!VaP|aQ@9()#&d{)$+7;F?O3wdL0e%8j>~0qVm*iI5L%?-#T;M5oRw}ca*ORER2PquaKZM zc7}+?rBr^cp5bR}IQ2Lhc=AOiRLy9qIN74jo@@~q+`I|Nem{#rJgB+8FEHamX}lx9 zgQ7--Wc&7?B{N(>02TipDD|YP#FI^sgIQ{pi6D))irkt^a~ArF5acn+sdJtZ9DxNT zGJy)S$BI&RgY5_}qi*!F@{=ORhlG7-xE@|g)A-3Rpn9p&~P!4gwxGMR`epqYrz5g{zb}kRyd7I%Rv-=s{1N zl_`@>rHR>OXj!{P6s} zd`NGpQ&@Q!{}lg6ryI>!=|} zUTW7C$7H9?5ycra4E?PqgFb6S@rGyU@Qu>`e9Nrgz+P(B^A^Pn>s+y_5s<*3=VN0( z{*Zw-)2p};qkC?Q7AuGKN-18;SI( zWZ6kX)jEoC3WlhN+uA&qRFvfYaF|T0u2+97TV>+y-|q4g=^J+$qw*ki`JgaXY2!#D z5dz!<(BgO})HSbgn&m4IYfOP(rCxglo`HNU`jW=@jQwi9Y;uCsB;s@*9hKs?K2nyP zg6+hZ4RIgFfK~zz+fKqFfy>T}st#WDatk&;5-c*C>@Ib0&~3U-uh1%x0ZU~*rvh*8 z5EXw*M`#wPNO3QbD-}!Lpwp5+Zm<0J_xMdA@K+g}2p38AhZ$*a&?t3)Zx0B)2ky{! z|L)-3k>Y_V%Qu|VOJxU=os(sch`gTscgpOZ;dng}eE)QMCZ7TZQ#G3w?TM5f^$)eB2Unt4WzcSQCzB5cPeRDmv@co5@ z4fNlk?EANKe8pTYND_YuwoCt$moCfzrsG*r*|&T?n6q(pFVfy~5&ApQi2~mtfgUzl zMZsmYZakW!(bA`aZwxK;M@^=)L7D<%hlrjUS7)13*@FR^W)ey&>Jo%mpYtH7kLv0# z3G17Pn*{$!^M!m;Kc{yB6N2j6gGXbrAz;Xzd)g(X*!1k|&X&ASTO7@X z+k5HDDMu(cfuJ1LQ7J+i<&uuM*F#N zDE_J0P|@sT>p)bdRa;ji{!4X)2v9>XxuiL{;^@?@*>!`nqRpN#g^H}0`nNBckF|}9 z&IOu9KEk656HzCH9QN$3O`m~DU#u_L@XJ3+ z68aaz`{Ce>-UtpfQ#F>hBzKsYO=Q~*e@am}w-IhO4h$Wr2T`MPxGQOnWIz>8;_#*a zRmZ&5fm)@S3#XFYz`$`Z#!hUKe{ZMu{MT<>TLT$!&Xj-NK5XU0^(F)rv_EJUFc?Zi!WU{gKn3k3t*xS>$O^T&fn)_{`Yd5367Pp2Ga(Q}l z-fvJF6Kv>$CCjSLZA0jfp@2ob>LdY?zq1QhqjSMj;4PQaR(FcJalOl`!Wwd)MI_0^ zeWw<*p_gy-5ZN!(zII85Jb!xm@ykrvl5FEbe`-LljpwDmxys>HSZ{`|T4OAJbJtt* zz-m8sH13+qz9%(4*sI;o3d@No1O;Ul{8FljdI#vx=_k@cFJnL!^mXVF9 z3evQZZkU<#zOMElLswzsrySLrwjdCgsk|s_ncR4yt4|)CfRhwt6(+n={*y)wzci4` zB`|Wtdp0XRia#w2X9B#^PSKZdapPO6WEwIYDDI+}T%*=Xeue)!d9TXb2Z`!os&xrk zIwDT4of~fY@-Yei?E5IyT>QJAnmt`OCoCuT01O&&gqONTm6H!dtIp17kjaGb;#Fcn z%`trKSTH z)8~0(M5`sENPm_LZG2L#_sgT116paMoZQq|b5}cXys^G0jU`0EBB02B$S#%6amoE*0D9!#^`~3D=sFw+D=cT9+XwBG!mO;sv^vAvY+IGxuyN|q+&Fvbl8C-r?;E} zi01_;rA0jDBIxN~CMF_$vLG-8iZvCBd;B+2#PAV$H#x>td+-bwUa*vqzODj%Q!QO|hxP);{qCJmdIW7Jn_M zWJvA}Y>yJK9B>w$IuYqgP049Xlcd%b1S&@;HWMNFWQF1oOi56fW=^dYjIgKYzV;p5pi$hI)i6;3> zOI!@~+CYwG#dp`npzWiBXIB<{LZ|0}ZwdU<`Q-;d89t_Z0Sp>-wIQ_l!b?F_5AYJm zccIx*vT$()PabVIlIMLkn-O@{rN8of61GLAhHoF51WF_GZ}6==GQK6sHwT6OrT?vj zta~#V#~U4-;GYs5ZTs(3ujQeiU&619m87@9^~AVkwKH6gO#{KvMPQQ}8N^TV>E*jj z26Fs)6L(okMubN zS9-sE9eQ@qtm|NM&>lILiHh%3mFciAFwm4poLTOL{3%h!Jy|A9nB610sOG9i#h;Xc z86OVw3J;_+-L&y>*;7~?;fRJb{G*cFCf%^3+kfp?RFEo z`kRJs;6_CIUFK-?tduFjS#VRnqB5|JJ36HqlL_3~0k3I9wW$t|F+N}Cr&9K$CNqHH zr@JCwBs`&z{9g`K7uB@nxueO8PVKjB6^Kl)CnBVkQEMqhIXH@~lGsG+YmG zT-b1>qOhlVYbntm>tW0qrGA8uC}1!Oo#k>&0l&c{sS;(M%9k*goq&qEE?26IUi4-n z6vLG|{c73P9Dzu6pQ0L)P^ze?+{Iacb$iBnqZ)IU^u@7v8STsO-=}?yNg|x(D$-gJ zn@#Ay+ag}Mf4tz!YJ}R*ks6ZgLL&tM`*Vc>&2(XOCq~Wx+Bs`hTIU6JW)lwhuoLNu z3nrs%tE*+2C$L!;y+tmr8fdFwiD`Ovg>@vhMLdnp`6evj>9S%oJaI>d0p;>qR(sv> zNmr(bWysYV%kT>@Z+0|>#Ev`n2C(sF`zGe8&idWBS(9aRe#V26zwpS|vrLQu|1;0~ zd8362@etWv?YTcRT4K}FRsH2_WSs~7w@7D;Hh0QfJ7C*nB!e}A1@gN3$i-#*QSC!< zd+vBZg3s1hLmluIs{~N$ZJtTcAU2`fSDrv7HII1}ZAMic2b3o?RR~kn6mA*pynga^ z54&R0ls;M=dqomb>C{lUZo6bPs(eoUY2FiYx*udgD&q6*Eq)#j~N9l^3OY1!(#UUD#Yw`cDmetJW0#hjTW@(6^FyhzRwe77jl zcy!3~o>I4fw%VIC3-{t2P{<4$nd?DGXQR`v4c_3=*^D=u+5G($Zn=)+{Vjcjk)0o5 z#s*!KM1Vy%M_xldD9_dC1uweWc zQ3SP7SDNP*)6W)cyA^tz6KypttfbaC8go;=7OJ?b2Z2+MLT)e!8E43;f75lKkEM<9 zj2&qDT8fnvj)(kCa+PDewt+4jxWm7nv#>9=fz98}@ia ze!)`e$B`6LIa#@dMWGCjJU42DA~EkTKNjc?sPJmp?YHiH2*Ux&)vrsxNHjc$b8Bx& z9B}>GPngh?2qtmksgTsJi#rBi1Z|*_;aRB5bxpk`T0O*CouBty{zMxgugTdgp^!F# zK7tptzn<&36StlKAnvtBT){Td{+~V<0+CuBi`-s~2sMrYN+4IoSWe+W)7h>;DhZHUEEj&HodE+UUzy3bEk2JFHYJEG#=e li8y# Date: Tue, 15 Jan 2019 16:46:19 -0800 Subject: [PATCH 056/145] Update README. (#1173) (#1176) --- README.md | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 6b3890b949c9..a98430a47301 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,26 @@ [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) -# ArgoProj - Get stuff done with Kubernetes +# Argoproj - Get stuff done with Kubernetes ![Argo Image](argo.png) ## News -We are thrilled that BlackRock has developed an eventing framework for Argo and has decided to contribute it to the Argo Community. Please check out the new project and try [Argo Events](https://github.com/argoproj/argo-events)! +KubeCon 2018 in Seattle was the biggest KubeCon yet with 8000 developers attending. We connected with many existing and new Argoproj users and contributions, and gave away a lot of Argo T-shirts at our booth sponsored by Intuit! -If you actively use Argo in your organization and believe that your organization may be interested in actively participating in the Argo Community, please ask a representative to contact saradhi_sreegiriraju@intuit.com for additional information. +We were also super excited to see KubeCon presentations about Argo by Argo developers, users and partners. +* [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be) + * How Intuit uses Argo CD. +* [Automating Research Workflows at BlackRock](https://www.youtube.com/watch?v=ZK510prml8o&t=0s&index=169&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU) + * Why BlackRock created Argo Events and how they use it. +* [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU) + * How Kubeflow uses Argo Workflows as its core workflow engine and Argo CD to declaratively deploy ML pipelines and models. -## What is ArgoProj? +If you actively use Argo in your organization and your organization would be interested in participating in the Argo Community, please ask a representative to contact saradhi_sreegiriraju@intuit.com for additional information. -ArgoProj is a collection of tools for getting work done with Kubernetes. +## What is Argoproj? + +Argoproj is a collection of tools for getting work done with Kubernetes. * [Argo Workflows](https://github.com/argoproj/argo) - Container-native Workflow Engine * [Argo CD](https://github.com/argoproj/argo-cd) - Declarative GitOps Continuous Delivery * [Argo Events](https://github.com/argoproj/argo-events) - Event-based Dependency Manager @@ -93,5 +101,5 @@ Currently **officially** using Argo: ## Project Resources * Argo GitHub: https://github.com/argoproj -* Argo Slack: [click here to join](https://join.slack.com/t/argoproj/shared_invite/enQtMzExODU3MzIyNjYzLTA5MTFjNjI0Nzg3NzNiMDZiNmRiODM4Y2M1NWQxOGYzMzZkNTc1YWVkYTZkNzdlNmYyZjMxNWI3NjY2MDc1MzI) * Argo website: https://argoproj.github.io/ +* Argo Slack: [click here to join](https://join.slack.com/t/argoproj/shared_invite/enQtMzExODU3MzIyNjYzLTA5MTFjNjI0Nzg3NzNiMDZiNmRiODM4Y2M1NWQxOGYzMzZkNTc1YWVkYTZkNzdlNmYyZjMxNWI3NjY2MDc1MzI) From 1fc03144c55f987993c7777b190b1848fc3833cd Mon Sep 17 00:00:00 2001 From: Erik Parmann Date: Wed, 16 Jan 2019 23:30:46 +0100 Subject: [PATCH 057/145] Argo users: Equinor (#1175) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a98430a47301..3d7a5972bb07 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,7 @@ Currently **officially** using Argo: 1. [Cratejoy](https://www.cratejoy.com/) 1. [Cyrus Biotechnology](https://cyrusbio.com/) 1. [Datadog](https://www.datadoghq.com/) +1. [Equinor](https://www.equinor.com/) 1. [Gladly](https://gladly.com/) 1. [GitHub](https://github.com/) 1. [Google](https://www.google.com/intl/en/about/our-company/) From 2b2651b0a7f5d6873c8470fad137d42f9b7d7240 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Thu, 17 Jan 2019 19:05:21 +0900 Subject: [PATCH 058/145] Do not mount unnecessary docker socket (#1178) --- workflow/controller/workflowpod.go | 4 +- workflow/controller/workflowpod_test.go | 74 +++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 2 deletions(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 4c47e23b3b8d..b205c515b168 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -306,7 +306,7 @@ func (woc *wfOperationCtx) createVolumeMounts() []apiv1.VolumeMount { volumeMountPodMetadata, } switch woc.controller.Config.ContainerRuntimeExecutor { - case common.ContainerRuntimeExecutorKubelet: + case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI: return volumeMounts default: return append(volumeMounts, volumeMountDockerSock) @@ -318,7 +318,7 @@ func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { volumePodMetadata, } switch woc.controller.Config.ContainerRuntimeExecutor { - case common.ContainerRuntimeExecutorKubelet: + case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI: return volumes default: return append(volumes, volumeDockerSock) diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index 5039994e0c9d..c5dfe4b8f121 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -4,6 +4,7 @@ import ( "testing" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/common" "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" @@ -192,3 +193,76 @@ func TestWorkflowControllerArchiveConfigUnresolvable(t *testing.T) { _, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) assert.Error(t, err) } + +// TestVolumeAndVolumeMounts verifies the ability to carry forward volumes and volumeMounts from workflow.spec +func TestVolumeAndVolumeMounts(t *testing.T) { + volumes := []apiv1.Volume{ + { + Name: "volume-name", + VolumeSource: apiv1.VolumeSource{ + EmptyDir: &apiv1.EmptyDirVolumeSource{}, + }, + }, + } + volumeMounts := []apiv1.VolumeMount{ + { + Name: "volume-name", + MountPath: "/test", + }, + } + + // For Docker executor + { + woc := newWoc() + woc.wf.Spec.Volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorDocker + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 3, len(pod.Spec.Volumes)) + assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) + assert.Equal(t, "docker-sock", pod.Spec.Volumes[1].Name) + assert.Equal(t, "volume-name", pod.Spec.Volumes[2].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + } + + // For Kubelet executor + { + woc := newWoc() + woc.wf.Spec.Volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorKubelet + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 2, len(pod.Spec.Volumes)) + assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) + assert.Equal(t, "volume-name", pod.Spec.Volumes[1].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + } + + // For K8sAPI executor + { + woc := newWoc() + woc.wf.Spec.Volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorK8sAPI + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 2, len(pod.Spec.Volumes)) + assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) + assert.Equal(t, "volume-name", pod.Spec.Volumes[1].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + } +} From f07b5afeaf950f49f87cdffb5116e82c8b0d43a1 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Fri, 18 Jan 2019 12:59:09 -0800 Subject: [PATCH 059/145] Issue #1113 - Wait for daemon pods completion to handle annotations (#1177) * Issue #1113 - Wait for daemon pods completion to handle annotations * Add output artifacts to influxdb-ci example --- examples/influxdb-ci.yaml | 4 ++++ pkg/apis/workflow/v1alpha1/types.go | 4 ++-- workflow/controller/operator.go | 26 +++++++++++++++++++++----- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/examples/influxdb-ci.yaml b/examples/influxdb-ci.yaml index 121a6fd2d8a0..d26c765fdd78 100644 --- a/examples/influxdb-ci.yaml +++ b/examples/influxdb-ci.yaml @@ -194,6 +194,10 @@ spec: - name: influxd path: /app daemon: true + outputs: + artifacts: + - name: data + path: /var/lib/influxdb/data container: image: debian:9.4 readinessProbe: diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 22700b5b5ff9..02d9ea9067ab 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -561,7 +561,7 @@ func (ws *WorkflowStatus) Completed() bool { // Remove returns whether or not the node has completed execution func (n NodeStatus) Completed() bool { - return isCompletedPhase(n.Phase) + return isCompletedPhase(n.Phase) || n.IsDaemoned() && n.Phase != NodePending } // IsDaemoned returns whether or not the node is deamoned @@ -574,7 +574,7 @@ func (n NodeStatus) IsDaemoned() bool { // Successful returns whether or not this node completed successfully func (n NodeStatus) Successful() bool { - return n.Phase == NodeSucceeded || n.Phase == NodeSkipped + return n.Phase == NodeSucceeded || n.Phase == NodeSkipped || n.IsDaemoned() && n.Phase != NodePending } // CanRetry returns whether the node should be retried or not. diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index b6c39e75882b..ba16443e5ed1 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -442,7 +442,8 @@ func (woc *wfOperationCtx) podReconciliation() error { woc.addOutputsToScope("workflow", node.Outputs, nil) woc.updated = true } - if woc.wf.Status.Nodes[pod.ObjectMeta.Name].Completed() { + node := woc.wf.Status.Nodes[pod.ObjectMeta.Name] + if node.Completed() && !node.IsDaemoned() { woc.completedPods[pod.ObjectMeta.Name] = true } } @@ -556,7 +557,12 @@ func assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus { newPhase = wfv1.NodeSucceeded newDaemonStatus = &f case apiv1.PodFailed: - newPhase, message = inferFailedReason(pod) + // ignore pod failure for daemoned steps + if node.IsDaemoned() { + newPhase = wfv1.NodeSucceeded + } else { + newPhase, message = inferFailedReason(pod) + } newDaemonStatus = &f case apiv1.PodRunning: newPhase = wfv1.NodeRunning @@ -578,8 +584,8 @@ func assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus { return nil } } - // proceed to mark node status as succeeded (and daemoned) - newPhase = wfv1.NodeSucceeded + // proceed to mark node status as running (and daemoned) + newPhase = wfv1.NodeRunning t := true newDaemonStatus = &t log.Infof("Processing ready daemon pod: %v", pod.ObjectMeta.SelfLink) @@ -1025,7 +1031,8 @@ func (woc *wfOperationCtx) markWorkflowPhase(phase wfv1.NodePhase, markCompleted switch phase { case wfv1.NodeSucceeded, wfv1.NodeFailed, wfv1.NodeError: - if markCompleted { + // wait for all daemon nodes to get terminated before marking workflow completed + if markCompleted && !woc.hasDaemonNodes() { woc.log.Infof("Marking workflow completed") woc.wf.Status.FinishedAt = metav1.Time{Time: time.Now().UTC()} if woc.wf.ObjectMeta.Labels == nil { @@ -1037,6 +1044,15 @@ func (woc *wfOperationCtx) markWorkflowPhase(phase wfv1.NodePhase, markCompleted } } +func (woc *wfOperationCtx) hasDaemonNodes() bool { + for _, node := range woc.wf.Status.Nodes { + if node.IsDaemoned() { + return true + } + } + return false +} + func (woc *wfOperationCtx) markWorkflowRunning() { woc.markWorkflowPhase(wfv1.NodeRunning, false) } From eaaad7d47257302f203bab24bce1b7d479453351 Mon Sep 17 00:00:00 2001 From: kshamajain99 Date: Thu, 24 Jan 2019 11:26:39 -0800 Subject: [PATCH 060/145] Increased S3 artifact retry time and added log (#1138) --- workflow/artifacts/s3/s3.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/workflow/artifacts/s3/s3.go b/workflow/artifacts/s3/s3.go index cbbe325d9f3d..e58f2f404e16 100644 --- a/workflow/artifacts/s3/s3.go +++ b/workflow/artifacts/s3/s3.go @@ -33,9 +33,9 @@ func (s3Driver *S3ArtifactDriver) newS3Client() (argos3.S3Client, error) { // Load downloads artifacts from S3 compliant storage func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error { - err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Millisecond * 10, Factor: 2.0, Steps: 5, Jitter: 0.1}, + err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Second * 2, Factor: 2.0, Steps: 5, Jitter: 0.1}, func() (bool, error) { - + log.Infof("ExponentialBackoff in S3 Load for path: %s", path) s3cli, err := s3Driver.newS3Client() if err != nil { log.Warnf("Failed to create new S3 client: %v", err) @@ -46,7 +46,7 @@ func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string return true, nil } if !argos3.IsS3ErrCode(origErr, "NoSuchKey") { - return false, origErr + return false, nil } // If we get here, the error was a NoSuchKey. The key might be a s3 "directory" isDir, err := s3cli.IsDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key) @@ -70,8 +70,9 @@ func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string // Save saves an artifact to S3 compliant storage func (s3Driver *S3ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { - err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Millisecond * 10, Factor: 2.0, Steps: 5, Jitter: 0.1}, + err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Second * 2, Factor: 2.0, Steps: 5, Jitter: 0.1}, func() (bool, error) { + log.Infof("ExponentialBackoff in S3 Save for path: %s", path) s3cli, err := s3Driver.newS3Client() if err != nil { log.Warnf("Failed to create new S3 client: %v", err) From 8eb4c66639c5fd1a607c73a4d765468a99c43da1 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Thu, 24 Jan 2019 16:26:36 -0800 Subject: [PATCH 061/145] Issue #1123 - Fix 'kubectl get' failure if resource namespace is different from workflow namespace (#1171) --- cmd/argoexec/commands/resource.go | 23 +++++++++---- workflow/executor/resource.go | 57 ++++++++++++++++++++++--------- 2 files changed, 57 insertions(+), 23 deletions(-) diff --git a/cmd/argoexec/commands/resource.go b/cmd/argoexec/commands/resource.go index 240b72cb3664..d83c767ab18a 100644 --- a/cmd/argoexec/commands/resource.go +++ b/cmd/argoexec/commands/resource.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "os" "github.com/argoproj/argo/workflow/common" @@ -35,20 +36,28 @@ func execResource(action string) error { wfExecutor.AddError(err) return err } - resourceName, err := wfExecutor.ExecResource(action, common.ExecutorResourceManifestPath) - if err != nil { + isDelete := action == "delete" + if isDelete && (wfExecutor.Template.Resource.SuccessCondition != "" || wfExecutor.Template.Resource.FailureCondition != "" || len(wfExecutor.Template.Outputs.Parameters) > 0) { + err = fmt.Errorf("successCondition, failureCondition and outputs are not supported for delete action") wfExecutor.AddError(err) return err } - err = wfExecutor.WaitResource(resourceName) + resourceNamespace, resourceName, err := wfExecutor.ExecResource(action, common.ExecutorResourceManifestPath, isDelete) if err != nil { wfExecutor.AddError(err) return err } - err = wfExecutor.SaveResourceParameters(resourceName) - if err != nil { - wfExecutor.AddError(err) - return err + if !isDelete { + err = wfExecutor.WaitResource(resourceNamespace, resourceName) + if err != nil { + wfExecutor.AddError(err) + return err + } + err = wfExecutor.SaveResourceParameters(resourceNamespace, resourceName) + if err != nil { + wfExecutor.AddError(err) + return err + } } return nil } diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index 00fc98fcbae6..2072eb23e215 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -3,11 +3,14 @@ package executor import ( "bufio" "bytes" + "encoding/json" "fmt" "os/exec" "strings" "time" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/argoproj/argo/errors" log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" @@ -16,28 +19,38 @@ import ( ) // ExecResource will run kubectl action against a manifest -func (we *WorkflowExecutor) ExecResource(action string, manifestPath string) (string, error) { +func (we *WorkflowExecutor) ExecResource(action string, manifestPath string, isDelete bool) (string, string, error) { args := []string{ action, } - if action == "delete" { + output := "json" + if isDelete { args = append(args, "--ignore-not-found") + output = "name" } args = append(args, "-f") args = append(args, manifestPath) args = append(args, "-o") - args = append(args, "name") + args = append(args, output) cmd := exec.Command("kubectl", args...) log.Info(strings.Join(cmd.Args, " ")) out, err := cmd.Output() if err != nil { exErr := err.(*exec.ExitError) errMsg := strings.TrimSpace(string(exErr.Stderr)) - return "", errors.New(errors.CodeBadRequest, errMsg) + return "", "", errors.New(errors.CodeBadRequest, errMsg) + } + if action == "delete" { + return "", "", nil + } + obj := unstructured.Unstructured{} + err = json.Unmarshal(out, &obj) + if err != nil { + return "", "", err } - resourceName := strings.TrimSpace(string(out)) - log.Infof(resourceName) - return resourceName, nil + resourceName := fmt.Sprintf("%s.%s/%s", obj.GroupVersionKind().Kind, obj.GroupVersionKind().Group, obj.GetName()) + log.Infof("%s/%s", obj.GetNamespace(), resourceName) + return obj.GetNamespace(), resourceName, nil } // gjsonLabels is an implementation of labels.Labels interface @@ -58,7 +71,7 @@ func (g gjsonLabels) Get(label string) string { } // WaitResource waits for a specific resource to satisfy either the success or failure condition -func (we *WorkflowExecutor) WaitResource(resourceName string) error { +func (we *WorkflowExecutor) WaitResource(resourceNamespace string, resourceName string) error { if we.Template.Resource.SuccessCondition == "" && we.Template.Resource.FailureCondition == "" { return nil } @@ -86,7 +99,7 @@ func (we *WorkflowExecutor) WaitResource(resourceName string) error { // Poll intervall of 5 seconds serves as a backoff intervall in case of immediate result reader failure err := wait.PollImmediateInfinite(time.Duration(time.Second*5), func() (bool, error) { - isErrRetry, err := checkResourceState(resourceName, successReqs, failReqs) + isErrRetry, err := checkResourceState(resourceNamespace, resourceName, successReqs, failReqs) if err == nil { log.Infof("Returning from successful wait for resource %s", resourceName) @@ -114,9 +127,9 @@ func (we *WorkflowExecutor) WaitResource(resourceName string) error { } // Function to do the kubectl get -w command and then waiting on json reading. -func checkResourceState(resourceName string, successReqs labels.Requirements, failReqs labels.Requirements) (bool, error) { +func checkResourceState(resourceNamespace string, resourceName string, successReqs labels.Requirements, failReqs labels.Requirements) (bool, error) { - cmd, reader, err := startKubectlWaitCmd(resourceName) + cmd, reader, err := startKubectlWaitCmd(resourceNamespace, resourceName) if err != nil { return false, err } @@ -179,8 +192,12 @@ func checkResourceState(resourceName string, successReqs labels.Requirements, fa } // Start Kubectl command Get with -w return error if unable to start command -func startKubectlWaitCmd(resourceName string) (*exec.Cmd, *bufio.Reader, error) { - cmd := exec.Command("kubectl", "get", resourceName, "-w", "-o", "json") +func startKubectlWaitCmd(resourceNamespace string, resourceName string) (*exec.Cmd, *bufio.Reader, error) { + args := []string{"get", resourceName, "-w", "-o", "json"} + if resourceNamespace != "" { + args = append(args, "-n", resourceNamespace) + } + cmd := exec.Command("kubectl", args...) stdout, err := cmd.StdoutPipe() if err != nil { return nil, nil, errors.InternalWrapError(err) @@ -216,7 +233,7 @@ func readJSON(reader *bufio.Reader) ([]byte, error) { } // SaveResourceParameters will save any resource output parameters -func (we *WorkflowExecutor) SaveResourceParameters(resourceName string) error { +func (we *WorkflowExecutor) SaveResourceParameters(resourceNamespace string, resourceName string) error { if len(we.Template.Outputs.Parameters) == 0 { log.Infof("No output parameters") return nil @@ -228,9 +245,17 @@ func (we *WorkflowExecutor) SaveResourceParameters(resourceName string) error { } var cmd *exec.Cmd if param.ValueFrom.JSONPath != "" { - cmd = exec.Command("kubectl", "get", resourceName, "-o", fmt.Sprintf("jsonpath='%s'", param.ValueFrom.JSONPath)) + args := []string{"get", resourceName, "-o", fmt.Sprintf("jsonpath='%s'", param.ValueFrom.JSONPath)} + if resourceNamespace != "" { + args = append(args, "-n", resourceNamespace) + } + cmd = exec.Command("kubectl", args...) } else if param.ValueFrom.JQFilter != "" { - cmdStr := fmt.Sprintf("kubectl get %s -o json | jq -c '%s'", resourceName, param.ValueFrom.JQFilter) + resArgs := []string{resourceName} + if resourceNamespace != "" { + resArgs = append(resArgs, "-n", resourceNamespace) + } + cmdStr := fmt.Sprintf("kubectl get %s -o json | jq -c '%s'", strings.Join(resArgs, " "), param.ValueFrom.JQFilter) cmd = exec.Command("sh", "-c", cmdStr) } else { continue From 93289b42f96cd49cdc048d84626cb28ef6932940 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Thu, 24 Jan 2019 17:33:53 -0800 Subject: [PATCH 062/145] Refactor Makefile/Dockerfile to remove volume binding in favor of build stages (#1189) --- .argo-ci/ci.yaml | 12 +-- .dockerignore | 8 +- CONTRIBUTING.md | 21 ++-- Dockerfile | 87 ++++++++++++++++ Dockerfile-argoexec | 16 --- Dockerfile-builder | 32 ------ Dockerfile-ci-builder | 12 --- Dockerfile-cli | 4 - Dockerfile-workflow-controller | 5 - Dockerfile.argoexec-dev | 12 +++ Dockerfile.workflow-controller-dev | 6 ++ Makefile | 159 +++++++++++++---------------- gometalinter.json | 3 +- workflow/executor/resource.go | 2 +- 14 files changed, 204 insertions(+), 175 deletions(-) create mode 100644 Dockerfile delete mode 100644 Dockerfile-argoexec delete mode 100644 Dockerfile-builder delete mode 100644 Dockerfile-ci-builder delete mode 100644 Dockerfile-cli delete mode 100644 Dockerfile-workflow-controller create mode 100644 Dockerfile.argoexec-dev create mode 100644 Dockerfile.workflow-controller-dev diff --git a/.argo-ci/ci.yaml b/.argo-ci/ci.yaml index b92020b4cd02..482c29aaf39d 100644 --- a/.argo-ci/ci.yaml +++ b/.argo-ci/ci.yaml @@ -22,16 +22,13 @@ spec: value: "{{item}}" withItems: - make controller-image executor-image - - make cli-linux - - make cli-darwin + - make release-clis - name: test template: ci-builder arguments: parameters: - name: cmd - value: "{{item}}" - withItems: - - dep ensure && make lint test verify-codegen + value: dep ensure && make lint test verify-codegen - name: ci-builder inputs: @@ -67,10 +64,11 @@ spec: env: - name: DOCKER_HOST value: 127.0.0.1 + - name: DOCKER_BUILDKIT + value: "1" sidecars: - name: dind - image: docker:17.10-dind + image: docker:18.09-dind securityContext: privileged: true mirrorVolumeMounts: true - diff --git a/.dockerignore b/.dockerignore index 848b59e797ff..f515f4519087 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,4 @@ -* -!dist -dist/pkg -!Gopkg.* \ No newline at end of file +# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from +# a user's workspace, and are only building off of what is locked by dep. +vendor +dist \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 58c833e48377..2732baaa1dda 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,17 +15,17 @@ Go to https://github.com/argoproj/ ## How to suggest a new feature -Go to https://groups.google.com/forum/#!forum/argoproj -* Create a new topic to discuss your feature. +Go to https://github.com/argoproj/ +* Open an issue and discuss it. ## How to setup your dev environment ### Requirements -* Golang 1.10 +* Golang 1.11 * Docker * dep v0.5 * Mac Install: `brew install dep` -* gometalinter v2.0.5 +* gometalinter v2.0.12 ### Quickstart ``` @@ -36,9 +36,16 @@ $ make ``` ### Build workflow-controller and executor images -The following will build the workflow-controller and executor images tagged with the `latest` tag, then push to a personal dockerhub repository: +The following will build the release versions of workflow-controller and executor images tagged +with the `latest` tag, then push to a personal dockerhub repository, `mydockerrepo`: +``` +$ make controller-image executor-image IMAGE_TAG=latest IMAGE_NAMESPACE=mydockerrepo DOCKER_PUSH=true +``` +Building release versions of the images will be slow during development, since the build happens +inside a docker build context, which cannot re-use the golang build cache between builds. To build +images quicker (for development purposes), images can be built by adding DEV_IMAGE=true. ``` -$ make controller-image executor-image IMAGE_TAG=latest IMAGE_NAMESPACE=jessesuen DOCKER_PUSH=true +$ make controller-image executor-image IMAGE_TAG=latest IMAGE_NAMESPACE=mydockerrepo DOCKER_PUSH=true DEV_IMAGE=true ``` ### Build argo cli @@ -49,6 +56,6 @@ $ ./dist/argo version ### Deploying controller with alternative controller/executor images ``` -$ helm install argo/argo --set images.namespace=jessesuen --set +$ helm install argo/argo --set images.namespace=mydockerrepo --set images.controller workflow-controller:latest ``` diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000000..79330882d7db --- /dev/null +++ b/Dockerfile @@ -0,0 +1,87 @@ +#################################################################################################### +# Builder image +# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image +# Also used as the image in CI jobs so needs all dependencies +#################################################################################################### +FROM golang:1.11.4 as builder + +RUN apt-get update && apt-get install -y \ + git \ + make \ + wget \ + gcc \ + zip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +WORKDIR /tmp + +# Install docker +ENV DOCKER_CHANNEL stable +ENV DOCKER_VERSION 18.09.1 +RUN wget -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/x86_64/docker-${DOCKER_VERSION}.tgz" && \ + tar --extract --file docker.tgz --strip-components 1 --directory /usr/local/bin/ && \ + rm docker.tgz + +# Install dep +ENV DEP_VERSION=0.5.0 +RUN wget https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -O /usr/local/bin/dep && \ + chmod +x /usr/local/bin/dep + +# Install gometalinter +ENV GOMETALINTER_VERSION=2.0.12 +RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v${GOMETALINTER_VERSION}/gometalinter-${GOMETALINTER_VERSION}-linux-amd64.tar.gz | \ + tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- && \ + ln -s $GOPATH/bin/gometalinter $GOPATH/bin/gometalinter.v2 + + +#################################################################################################### +# Argo Build stage which performs the actual build of Argo binaries +#################################################################################################### +FROM builder as argo-build + +# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep +# to install all the packages of our dep lock file +COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml +COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock + +RUN cd ${GOPATH}/src/dummy && \ + dep ensure -vendor-only && \ + mv vendor/* ${GOPATH}/src/ && \ + rmdir vendor + +# Perform the build +WORKDIR /go/src/github.com/argoproj/argo +COPY . . +ARG MAKE_TARGET="controller executor cli-linux-amd64" +RUN make $MAKE_TARGET + + +#################################################################################################### +# argoexec +#################################################################################################### +FROM debian:9.6-slim as argoexec +# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt +ENV KUBECTL_VERSION=1.13.1 +RUN apt-get update && \ + apt-get install -y curl jq procps git tar mime-support && \ + rm -rf /var/lib/apt/lists/* && \ + curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ + chmod +x /usr/local/bin/kubectl +COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argoexec /usr/local/bin/ + + +#################################################################################################### +# workflow-controller +#################################################################################################### +FROM scratch as workflow-controller +COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/workflow-controller /bin/ +ENTRYPOINT [ "workflow-controller" ] + + +#################################################################################################### +# argocli +#################################################################################################### +FROM scratch as argocli +COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argo-linux-amd64 /bin/argo +ENTRYPOINT [ "argo" ] diff --git a/Dockerfile-argoexec b/Dockerfile-argoexec deleted file mode 100644 index 159290c6f759..000000000000 --- a/Dockerfile-argoexec +++ /dev/null @@ -1,16 +0,0 @@ -FROM debian:9.5-slim - -RUN apt-get update && \ - apt-get install -y curl jq procps git tar mime-support && \ - rm -rf /var/lib/apt/lists/* && \ - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(uname -m|sed 's/x86_64/amd64/g')/kubectl && \ - chmod +x ./kubectl && \ - mv ./kubectl /bin/ - -ENV DOCKER_VERSION=18.06.0 -RUN curl -O https://download.docker.com/linux/static/stable/$(uname -m)/docker-${DOCKER_VERSION}-ce.tgz && \ - tar -xzf docker-${DOCKER_VERSION}-ce.tgz && \ - mv docker/docker /usr/local/bin/docker && \ - rm -rf ./docker - -COPY dist/argoexec /bin/ diff --git a/Dockerfile-builder b/Dockerfile-builder deleted file mode 100644 index 5eb5caacd77d..000000000000 --- a/Dockerfile-builder +++ /dev/null @@ -1,32 +0,0 @@ -FROM debian:9.5-slim - -RUN apt-get update && apt-get install -y \ - git \ - make \ - curl \ - wget && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Install go -ENV GO_VERSION 1.10.3 -ENV GOPATH /root/go -ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH} -RUN ARCH=$(uname -m|sed 's/x86_64/amd64/g') && \ - wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz && \ - tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${ARCH}.tar.gz && \ - rm /go${GO_VERSION}.linux-${ARCH}.tar.gz && \ - wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-${ARCH} -O /usr/local/bin/dep && \ - chmod +x /usr/local/bin/dep && \ - mkdir -p ${GOPATH}/bin && \ - curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-${ARCH}.tar.gz | \ - tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- - -# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep -# to install all the packages of our dep lock file -COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml -COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock -RUN cd ${GOPATH}/src/dummy && \ - dep ensure -vendor-only && \ - mv vendor/* ${GOPATH}/src/ && \ - rmdir vendor diff --git a/Dockerfile-ci-builder b/Dockerfile-ci-builder deleted file mode 100644 index 37a3c8a712cd..000000000000 --- a/Dockerfile-ci-builder +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:1.10.3 - -WORKDIR /tmp - -RUN curl -O https://download.docker.com/linux/static/stable/$(uname -m)/docker-18.06.0-ce.tgz && \ - tar -xzf docker-18.06.0-ce.tgz && \ - mv docker/docker /usr/local/bin/docker && \ - rm -rf ./docker && \ - wget https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-$(uname -m|sed 's/x86_64/amd64/g') -O /usr/local/bin/dep && \ - chmod +x /usr/local/bin/dep && \ - curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-$(uname -m|sed 's/x86_64/amd64/g').tar.gz | \ - tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- diff --git a/Dockerfile-cli b/Dockerfile-cli deleted file mode 100644 index 39f6c45b9523..000000000000 --- a/Dockerfile-cli +++ /dev/null @@ -1,4 +0,0 @@ -FROM alpine:3.7 - -COPY dist/argo-linux-amd64 /bin/argo -ENTRYPOINT [ "/bin/argo" ] diff --git a/Dockerfile-workflow-controller b/Dockerfile-workflow-controller deleted file mode 100644 index b7694f7d0dc6..000000000000 --- a/Dockerfile-workflow-controller +++ /dev/null @@ -1,5 +0,0 @@ -FROM debian:9.4 - -COPY dist/workflow-controller /bin/ - -ENTRYPOINT [ "/bin/workflow-controller" ] diff --git a/Dockerfile.argoexec-dev b/Dockerfile.argoexec-dev new file mode 100644 index 000000000000..06df127ffc8b --- /dev/null +++ b/Dockerfile.argoexec-dev @@ -0,0 +1,12 @@ +#################################################################################################### +# argoexec-dev +#################################################################################################### +FROM debian:9.6-slim as argoexec-dev +# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt +ENV KUBECTL_VERSION=1.13.1 +RUN apt-get update && \ + apt-get install -y curl jq procps git tar mime-support && \ + rm -rf /var/lib/apt/lists/* && \ + curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ + chmod +x /usr/local/bin/kubectl +COPY argoexec /usr/local/bin/ diff --git a/Dockerfile.workflow-controller-dev b/Dockerfile.workflow-controller-dev new file mode 100644 index 000000000000..9c9c6d1f4649 --- /dev/null +++ b/Dockerfile.workflow-controller-dev @@ -0,0 +1,6 @@ +#################################################################################################### +# workflow-controller-dev +#################################################################################################### +FROM scratch as workflow-controller-dev +COPY workflow-controller /bin/ +ENTRYPOINT [ "workflow-controller" ] diff --git a/Makefile b/Makefile index a9dee64bece5..1e0b730763ea 100644 --- a/Makefile +++ b/Makefile @@ -9,13 +9,13 @@ GIT_COMMIT=$(shell git rev-parse HEAD) GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi) GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) -BUILDER_IMAGE=argo-builder -# NOTE: the volume mount of ${DIST_DIR}/pkg below is optional and serves only -# to speed up subsequent builds by caching ${GOPATH}/pkg between builds. -BUILDER_CMD=docker run --rm \ - -v ${CURRENT_DIR}:/root/go/src/${PACKAGE} \ - -v ${DIST_DIR}/pkg:/root/go/pkg \ - -w /root/go/src/${PACKAGE} ${BUILDER_IMAGE} +# docker image publishing options +DOCKER_PUSH=false +IMAGE_TAG=latest +# perform static compilation +STATIC_BUILD=true +# build development images +DEV_IMAGE=false override LDFLAGS += \ -X ${PACKAGE}.version=${VERSION} \ @@ -23,22 +23,16 @@ override LDFLAGS += \ -X ${PACKAGE}.gitCommit=${GIT_COMMIT} \ -X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE} -# docker image publishing options -DOCKER_PUSH=false -IMAGE_TAG=latest +ifeq (${STATIC_BUILD}, true) +override LDFLAGS += -extldflags "-static" +endif ifneq (${GIT_TAG},) IMAGE_TAG=${GIT_TAG} override LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG} endif -ifneq (${IMAGE_NAMESPACE},) -override LDFLAGS += -X ${PACKAGE}/cmd/argo/commands.imageNamespace=${IMAGE_NAMESPACE} -endif -ifneq (${IMAGE_TAG},) -override LDFLAGS += -X ${PACKAGE}/cmd/argo/commands.imageTag=${IMAGE_TAG} -endif -ifeq (${DOCKER_PUSH},true) +ifeq (${DOCKER_PUSH}, true) ifndef IMAGE_NAMESPACE $(error IMAGE_NAMESPACE must be set to push images (e.g. IMAGE_NAMESPACE=argoproj)) endif @@ -50,99 +44,80 @@ endif # Build the project .PHONY: all -all: cli cli-image controller-image executor-image +all: cli controller-image executor-image -.PHONY: builder -builder: - docker build -t ${BUILDER_IMAGE} -f Dockerfile-builder . +.PHONY: builder-image +builder-image: + docker build -t $(IMAGE_PREFIX)argo-ci-builder:$(IMAGE_TAG) --target builder . .PHONY: cli cli: - CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${ARGO_CLI_NAME} ./cmd/argo + go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${ARGO_CLI_NAME} ./cmd/argo .PHONY: cli-linux-amd64 -cli-linux-amd64: builder - ${BUILDER_CMD} make cli \ - CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=amd64 \ - IMAGE_TAG=$(IMAGE_TAG) \ - IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ - LDFLAGS='-extldflags "-static"' \ - ARGO_CLI_NAME=argo-linux-amd64 +cli-linux-amd64: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-linux-amd64 ./cmd/argo .PHONY: cli-linux-ppc64le -cli-linux-ppc64le: builder - ${BUILDER_CMD} make cli \ - CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=ppc64le \ - IMAGE_TAG=$(IMAGE_TAG) \ - IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ - LDFLAGS='-extldflags "-static"' \ - ARGO_CLI_NAME=argo-linux-ppc64le +cli-linux-ppc64le: + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-linux-ppc64le ./cmd/argo .PHONY: cli-linux-s390x -cli-linux-s390x: builder - ${BUILDER_CMD} make cli \ - CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=s390x \ - IMAGE_TAG=$(IMAGE_TAG) \ - IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ - LDFLAGS='-extldflags "-static"' \ - ARGO_CLI_NAME=argo-linux-s390x +cli-linux-s390x: + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-linux-s390x ./cmd/argo .PHONY: cli-linux cli-linux: cli-linux-amd64 cli-linux-ppc64le cli-linux-s390x .PHONY: cli-darwin -cli-darwin: builder - ${BUILDER_CMD} make cli \ - GOOS=darwin \ - IMAGE_TAG=$(IMAGE_TAG) \ - IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ - ARGO_CLI_NAME=argo-darwin-amd64 +cli-darwin: + CGO_ENABLED=0 GOOS=darwin go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-darwin-amd64 ./cmd/argo .PHONY: cli-windows -cli-windows: builder - ${BUILDER_CMD} make cli \ - GOARCH=amd64 \ - GOOS=windows \ - IMAGE_TAG=$(IMAGE_TAG) \ - IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) \ - LDFLAGS='-extldflags "-static"' \ - ARGO_CLI_NAME=argo-windows-amd64 - -.PHONY: controller -controller: - go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/workflow-controller ./cmd/workflow-controller +cli-windows: + CGO_ENABLED=0 GOARCH=amd64 GOOS=windows go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-windows-amd64 ./cmd/argo .PHONY: cli-image -cli-image: cli-linux - docker build -t $(IMAGE_PREFIX)argocli:$(IMAGE_TAG) -f Dockerfile-cli . +cli-image: + docker build -t $(IMAGE_PREFIX)argocli:$(IMAGE_TAG) --target argocli . @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocli:$(IMAGE_TAG) ; fi -.PHONY: controller-linux -controller-linux: builder - ${BUILDER_CMD} make controller +.PHONY: controller +controller: + CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/workflow-controller ./cmd/workflow-controller .PHONY: controller-image -controller-image: controller-linux - docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) -f Dockerfile-workflow-controller . +controller-image: +ifeq ($(DEV_IMAGE), true) + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o workflow-controller ./cmd/workflow-controller + docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) -f Dockerfile.workflow-controller-dev --target workflow-controller-dev . + rm -f workflow-controller +else + docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) --target workflow-controller . +endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) ; fi .PHONY: executor executor: go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argoexec ./cmd/argoexec -.PHONY: executor-linux -executor-linux: builder - ${BUILDER_CMD} make executor - +# The DEV_IMAGE versions of controller-image and executor-image are speed optimized development +# builds of workflow-controller and argoexec images respectively. It allows for faster image builds +# by re-using the golang build cache of the desktop environment. Ideally, we would not need extra +# Dockerfiles for these, and the targets would be defined as new targets in the main Dockerfile, but +# intelligent skipping of docker build stages requires DOCKER_BUILDKIT=1 enabled, which not all +# docker daemons support (including the daemon currently used by minikube). +# TODO: move these targets to the main Dockerfile once DOCKER_BUILDKIT=1 is more pervasive. +# NOTE: have to output ouside of dist directory since dist is under .dockerignore .PHONY: executor-image -executor-image: executor-linux - docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) -f Dockerfile-argoexec . +executor-image: +ifeq ($(DEV_IMAGE), true) + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o argoexec ./cmd/argo + docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) -f Dockerfile.argoexec-dev --target argoexec-dev . + rm -f argoexec +else + docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) --target argoexec . +endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) ; fi .PHONY: lint @@ -153,8 +128,8 @@ lint: test: go test ./... -.PHONY: update-codegen -update-codegen: +.PHONY: codegen +codegen: ./hack/update-codegen.sh ./hack/update-openapigen.sh go run ./hack/gen-openapi-spec/main.go ${VERSION} > ${CURRENT_DIR}/api/openapi-spec/swagger.json @@ -167,8 +142,8 @@ verify-codegen: go run ./hack/gen-openapi-spec/main.go ${VERSION} > ${CURRENT_DIR}/dist/swagger.json diff ${CURRENT_DIR}/dist/swagger.json ${CURRENT_DIR}/api/openapi-spec/swagger.json -.PHONY: update-manifests -update-manifests: +.PHONY: manifests +manifests: ./hack/update-manifests.sh .PHONY: clean @@ -179,10 +154,22 @@ clean: precheckin: test lint verify-codegen .PHONY: release-precheck -release-precheck: precheckin +release-precheck: manifests codegen precheckin @if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi @if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi @if [ "$(GIT_TAG)" != "v$(VERSION)" ]; then echo 'git tag ($(GIT_TAG)) does not match VERSION (v$(VERSION))'; exit 1; fi +.PHONY: release-clis +release-clis: cli-image + docker build --iidfile /tmp/argo-cli-build --target argo-build --build-arg MAKE_TARGET="cli-darwin cli-windows" . + docker create --name tmp-cli `cat /tmp/argo-cli-build` + mkdir -p ${DIST_DIR} + docker cp tmp-cli:/go/src/github.com/argoproj/argo/dist/argo-darwin-amd64 ${DIST_DIR}/argo-darwin-amd64 + docker cp tmp-cli:/go/src/github.com/argoproj/argo/dist/argo-windows-amd64 ${DIST_DIR}/argo-windows-amd64 + docker rm tmp-cli + docker create --name tmp-cli $(IMAGE_PREFIX)argocli:$(IMAGE_TAG) + docker cp tmp-cli:/bin/argo ${DIST_DIR}/argo-linux-amd64 + docker rm tmp-cli + .PHONY: release -release: release-precheck controller-image cli-darwin cli-linux cli-windows executor-image cli-image +release: release-precheck controller-image executor-image cli-image release-clis diff --git a/gometalinter.json b/gometalinter.json index 408aa6a9c98a..42b62bf2758f 100644 --- a/gometalinter.json +++ b/gometalinter.json @@ -19,6 +19,7 @@ ], "Exclude": [ "pkg/client", - "vendor/" + "vendor/", + ".*warning.*fmt.Fprint" ] } diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index 2072eb23e215..fa671798e5ec 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -97,7 +97,7 @@ func (we *WorkflowExecutor) WaitResource(resourceNamespace string, resourceName // Start the condition result reader using PollImmediateInfinite // Poll intervall of 5 seconds serves as a backoff intervall in case of immediate result reader failure - err := wait.PollImmediateInfinite(time.Duration(time.Second*5), + err := wait.PollImmediateInfinite(time.Second*5, func() (bool, error) { isErrRetry, err := checkResourceState(resourceNamespace, resourceName, successReqs, failReqs) From 01ce5c3bcf0dde5536b596d48bd48a93b3f2eee0 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Thu, 24 Jan 2019 18:08:27 -0800 Subject: [PATCH 063/145] Add Docker Hub build hooks --- hooks/README.md | 16 ++++++++++++++++ hooks/build | 7 +++++++ hooks/push | 7 +++++++ 3 files changed, 30 insertions(+) create mode 100644 hooks/README.md create mode 100755 hooks/build create mode 100755 hooks/push diff --git a/hooks/README.md b/hooks/README.md new file mode 100644 index 000000000000..f9206a550b82 --- /dev/null +++ b/hooks/README.md @@ -0,0 +1,16 @@ +# Docker Hub Automated Build Hooks + +This directory contains Docker Hub [Automated Build](https://docs.docker.com/docker-hub/builds/advanced/) hooks. +This is needed since we publish multiple images as part of a single build: +* argoproj/workflow-controller:latest +* argoproj/argoexec:latest +* argoproj/argocli:latest + +It relies the DOCKER_REPO and DOCKER_TAG environment variables that are set by Docker Hub during +the build. + +Hooks can be tested using: +``` +DOCKER_REPO=index.docker.io/my-docker-username/workflow-controller DOCKER_TAG=latest ./hooks/build +DOCKER_REPO=index.docker.io/my-docker-username/workflow-controller DOCKER_TAG=latest ./hooks/push +``` diff --git a/hooks/build b/hooks/build new file mode 100755 index 000000000000..3fa6e5ef98d5 --- /dev/null +++ b/hooks/build @@ -0,0 +1,7 @@ +#!/bin/bash -e +docker_org=$(echo $DOCKER_REPO | rev | cut -d / -f 2- | rev) +targets="workflow-controller argoexec argocli" +for target in $targets; do + image_name="${docker_org}/${target}:${DOCKER_TAG}" + docker build --target $target -t $image_name . +done diff --git a/hooks/push b/hooks/push new file mode 100755 index 000000000000..b5c624c37080 --- /dev/null +++ b/hooks/push @@ -0,0 +1,7 @@ +#!/bin/bash -e +docker_org=$(echo $DOCKER_REPO | rev | cut -d / -f 2- | rev) +targets="workflow-controller argoexec argocli" +for target in $targets; do + image_name="${docker_org}/${target}:${DOCKER_TAG}" + docker push $image_name +done From 17250f3a51d545c49114882d0da6ca29eda7c6f2 Mon Sep 17 00:00:00 2001 From: Marcin Karkocha Date: Fri, 25 Jan 2019 07:11:58 +0100 Subject: [PATCH 064/145] Add documentation how to use parameter-file's (#1191) --- examples/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/examples/README.md b/examples/README.md index 1e2b8eb0066a..e2e87e068ba3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -148,6 +148,16 @@ The argo CLI provides a convenient way to override parameters used to invoke the ```sh argo submit arguments-parameters.yaml -p message="goodbye world" ``` +In case of multiple parameters that can be overriten argo CLI provides also command to load whole parameters files in yaml or json format. That files can look like that: + +```yaml +message: goodbye world +``` + +To run use following command: +```sh +argo submit arguments-parameters.yaml --parameter-file params.yaml +``` Command line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the `whalesay` template called `whalesay-caps` but you don't want to change the default entrypoint, you can invoke this from the command line as follows. ```sh From 831e2198e22503394acca1cce0dbcf8dcebb2931 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Thu, 24 Jan 2019 22:45:56 -0800 Subject: [PATCH 065/145] Issue #988 - Submit should not print logs to stdout unless output is 'wide' (#1192) --- cmd/argo/commands/submit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/argo/commands/submit.go b/cmd/argo/commands/submit.go index d9cd5c3a58ec..6210bf8d93f2 100644 --- a/cmd/argo/commands/submit.go +++ b/cmd/argo/commands/submit.go @@ -145,7 +145,7 @@ func unmarshalWorkflows(wfBytes []byte, strict bool) []wfv1.Workflow { func waitOrWatch(workflowNames []string, cliSubmitOpts cliSubmitOpts) { if cliSubmitOpts.wait { - WaitWorkflows(workflowNames, false, cliSubmitOpts.output == "json") + WaitWorkflows(workflowNames, false, !(cliSubmitOpts.output == "" || cliSubmitOpts.output == "wide")) } else if cliSubmitOpts.watch { watchWorkflow(workflowNames[0]) } From 311ad86f101c58a1de1cef313a1516b4c79e643f Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Fri, 25 Jan 2019 00:53:35 -0800 Subject: [PATCH 066/145] Fix missing docker binary in argoexec image. Improve reuse of image layers --- Dockerfile | 22 +++++++++++++++------- Dockerfile.argoexec-dev | 9 +-------- Dockerfile.workflow-controller-dev | 2 +- Makefile | 13 +++++++++---- 4 files changed, 26 insertions(+), 20 deletions(-) diff --git a/Dockerfile b/Dockerfile index 79330882d7db..0c983b5881d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,6 +35,21 @@ RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v${G ln -s $GOPATH/bin/gometalinter $GOPATH/bin/gometalinter.v2 +#################################################################################################### +# argoexec-base +# Used as the base for both the release and development version of argoexec +#################################################################################################### +FROM debian:9.6-slim as argoexec-base +# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt +ENV KUBECTL_VERSION=1.13.1 +RUN apt-get update && \ + apt-get install -y curl jq procps git tar mime-support && \ + rm -rf /var/lib/apt/lists/* && \ + curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ + chmod +x /usr/local/bin/kubectl +COPY --from=builder /usr/local/bin/docker /usr/local/bin/ + + #################################################################################################### # Argo Build stage which performs the actual build of Argo binaries #################################################################################################### @@ -61,13 +76,6 @@ RUN make $MAKE_TARGET # argoexec #################################################################################################### FROM debian:9.6-slim as argoexec -# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt -ENV KUBECTL_VERSION=1.13.1 -RUN apt-get update && \ - apt-get install -y curl jq procps git tar mime-support && \ - rm -rf /var/lib/apt/lists/* && \ - curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ - chmod +x /usr/local/bin/kubectl COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argoexec /usr/local/bin/ diff --git a/Dockerfile.argoexec-dev b/Dockerfile.argoexec-dev index 06df127ffc8b..e1437f7be80b 100644 --- a/Dockerfile.argoexec-dev +++ b/Dockerfile.argoexec-dev @@ -1,12 +1,5 @@ #################################################################################################### # argoexec-dev #################################################################################################### -FROM debian:9.6-slim as argoexec-dev -# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt -ENV KUBECTL_VERSION=1.13.1 -RUN apt-get update && \ - apt-get install -y curl jq procps git tar mime-support && \ - rm -rf /var/lib/apt/lists/* && \ - curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ - chmod +x /usr/local/bin/kubectl +FROM argoexec-base COPY argoexec /usr/local/bin/ diff --git a/Dockerfile.workflow-controller-dev b/Dockerfile.workflow-controller-dev index 9c9c6d1f4649..f2132614c852 100644 --- a/Dockerfile.workflow-controller-dev +++ b/Dockerfile.workflow-controller-dev @@ -1,6 +1,6 @@ #################################################################################################### # workflow-controller-dev #################################################################################################### -FROM scratch as workflow-controller-dev +FROM scratch COPY workflow-controller /bin/ ENTRYPOINT [ "workflow-controller" ] diff --git a/Makefile b/Makefile index 1e0b730763ea..d1d3142a4117 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,7 @@ controller: controller-image: ifeq ($(DEV_IMAGE), true) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o workflow-controller ./cmd/workflow-controller - docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) -f Dockerfile.workflow-controller-dev --target workflow-controller-dev . + docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) -f Dockerfile.workflow-controller-dev . rm -f workflow-controller else docker build -t $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) --target workflow-controller . @@ -101,6 +101,10 @@ endif executor: go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argoexec ./cmd/argoexec +.PHONY: executor-base-image +executor-base-image: + docker build -t argoexec-base --target argoexec-base . + # The DEV_IMAGE versions of controller-image and executor-image are speed optimized development # builds of workflow-controller and argoexec images respectively. It allows for faster image builds # by re-using the golang build cache of the desktop environment. Ideally, we would not need extra @@ -110,12 +114,13 @@ executor: # TODO: move these targets to the main Dockerfile once DOCKER_BUILDKIT=1 is more pervasive. # NOTE: have to output ouside of dist directory since dist is under .dockerignore .PHONY: executor-image -executor-image: ifeq ($(DEV_IMAGE), true) - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o argoexec ./cmd/argo - docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) -f Dockerfile.argoexec-dev --target argoexec-dev . +executor-image: executor-base-image + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o argoexec ./cmd/argoexec + docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) -f Dockerfile.argoexec-dev . rm -f argoexec else +executor-image: docker build -t $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) --target argoexec . endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argoexec:$(IMAGE_TAG) ; fi From 73504a24e885c6df9d1cceb4aa123c79eca7b7cd Mon Sep 17 00:00:00 2001 From: Julian Fischer Date: Mon, 28 Jan 2019 23:50:18 +0100 Subject: [PATCH 067/145] Fischerjulian adds ruby to rest docs (#1196) * Adds link to ruby kubernetes library. * Links to a ruby example on how to start a workflow --- docs/rest-api.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/rest-api.md b/docs/rest-api.md index ba0ae8f615bb..c88a5698de7c 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -29,6 +29,11 @@ The python kubernetes client has libraries for interacting with custom objects. The Java kubernetes client has libraries for interacting with custom objects. See: https://github.com/kubernetes-client/java/blob/master/kubernetes/docs/CustomObjectsApi.md +### Ruby +The Ruby kubernetes client has libraries for interacting with custom objects. See: +https://github.com/kubernetes-client/ruby/tree/master/kubernetes +See this [external Ruby example](https://github.com/fischerjulian/argo_workflows_ruby_example) on how to make use of this client. + ## OpenAPI An OpenAPI Spec is generated under [argoproj/argo/api/openapi-spec](https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json). This spec may be From eda7e08438d2314bb5eb178a1335a3c28555ab34 Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Tue, 29 Jan 2019 11:47:40 -0800 Subject: [PATCH 068/145] Updated OWNERS (#1198) --- OWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/OWNERS b/OWNERS index 244615e3a069..585b9d1aa85c 100644 --- a/OWNERS +++ b/OWNERS @@ -5,3 +5,6 @@ approvers: - alexmt - edlee2121 - jessesuen + +reviewers: +- dtaniwaki From 14a432e75119e37d42715b7e83992789c6dac454 Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Wed, 30 Jan 2019 03:13:03 -0800 Subject: [PATCH 069/145] Update community/README (#1197) --- community/README.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/community/README.md b/community/README.md index 49983f21548f..0a2e1d3518ea 100644 --- a/community/README.md +++ b/community/README.md @@ -4,15 +4,6 @@ Welcome to the Argo community! Argo is an open, community driven project to make it easy to use Kubernetes for getting useful work done. This document describes the organizational structure of the Argo Community including the roles, responsibilities and processes that govern Argo projects and community. - -We will have our **first community meeting on May 9th 10 am PST**. - -Map that to your local time with this [timezone table](https://www.google.com/search?q=1800+in+utc) - -See it on the web as [Google Calendar](https://calendar.google.com/calendar/embed?src=argoproj%40gmail.com&ctz=America%2FLos_Angeles) , or paste this [iCal url](https://calendar.google.com/calendar/ical/argoproj%40gmail.com/private-52229421c00ee71c176517df5bf1941e/basic.ics) into any iCal client. - -Meeting notes is available [here](https://docs.google.com/document/d/16aWGQ1Te5IRptFuAIFtg3rONRQqHC1Z3X9rdDHYhYfE/edit?usp=sharing). - ## Projects Argo is organized into a set of projects. Each project has at least one owner. The owner is responsible for publishing a roadmap and organizing community meetings for soliciting feedback, publishing meeting notes, and reporting on the current status of the project. @@ -20,9 +11,9 @@ Argo is organized into a set of projects. Each project has at least one owner. T The projects are: * Argo Workflows -* Argo CI * Argo CD * Argo Events +* Argo CI ## Community Roles and Responsibilities From bd249a83e119d6161fa1c593b09fb381db448a0d Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 30 Jan 2019 10:00:17 -0800 Subject: [PATCH 070/145] Issue #1128 - Use polling instead of fs notify to get annotation changes (#1194) --- Gopkg.lock | 9 -------- workflow/executor/executor.go | 42 +++++++++++++++++++++++++---------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 08a10cc5dbfd..97d653bdbe02 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -152,14 +152,6 @@ revision = "afac545df32f2287a079e2dfb7ba2745a643747e" version = "v3.0.0" -[[projects]] - digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - [[projects]] branch = "master" digest = "1:ac2bf6881c6a96d07773dee3b9b2b369bc209c988505bd6cb283a8d549cb8699" @@ -1168,7 +1160,6 @@ "github.com/argoproj/pkg/time", "github.com/colinmarc/hdfs", "github.com/evanphx/json-patch", - "github.com/fsnotify/fsnotify", "github.com/ghodss/yaml", "github.com/go-openapi/spec", "github.com/gorilla/websocket", diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index cc55964e265c..bba57373675c 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -31,7 +31,6 @@ import ( "github.com/argoproj/argo/workflow/artifacts/s3" "github.com/argoproj/argo/workflow/common" argofile "github.com/argoproj/pkg/file" - "github.com/fsnotify/fsnotify" log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -794,19 +793,38 @@ func (we *WorkflowExecutor) waitMainContainerStart() (string, error) { } } +func watchFileChanges(ctx context.Context, pollInterval time.Duration, filePath string) <-chan struct{} { + res := make(chan struct{}) + go func() { + defer close(res) + + var modTime *time.Time + for { + select { + case <-ctx.Done(): + return + default: + } + + file, err := os.Stat(filePath) + if err != nil { + log.Fatal(err) + } + newModTime := file.ModTime() + if modTime != nil && !modTime.Equal(file.ModTime()) { + res <- struct{}{} + } + modTime = &newModTime + time.Sleep(pollInterval) + } + }() + return res +} + // monitorAnnotations starts a goroutine which monitors for any changes to the pod annotations. // Emits an event on the returned channel upon any updates func (we *WorkflowExecutor) monitorAnnotations(ctx context.Context) <-chan struct{} { log.Infof("Starting annotations monitor") - // Create a fsnotify watcher on the local annotations file to listen for updates from the Downward API - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - err = watcher.Add(we.PodAnnotationsPath) - if err != nil { - log.Fatal(err) - } // Create a channel to listen for a SIGUSR2. Upon receiving of the signal, we force reload our annotations // directly from kubernetes API. The controller uses this to fast-track notification of annotations @@ -819,12 +837,12 @@ func (we *WorkflowExecutor) monitorAnnotations(ctx context.Context) <-chan struc // Create a channel which will notify a listener on new updates to the annotations annotationUpdateCh := make(chan struct{}) + annotationChanges := watchFileChanges(ctx, 10*time.Second, we.PodAnnotationsPath) go func() { for { select { case <-ctx.Done(): log.Infof("Annotations monitor stopped") - _ = watcher.Close() signal.Stop(sigs) close(sigs) close(annotationUpdateCh) @@ -833,7 +851,7 @@ func (we *WorkflowExecutor) monitorAnnotations(ctx context.Context) <-chan struc log.Infof("Received update signal. Reloading annotations from API") annotationUpdateCh <- struct{}{} we.setExecutionControl() - case <-watcher.Events: + case <-annotationChanges: log.Infof("%s updated", we.PodAnnotationsPath) err := we.LoadExecutionControl() if err != nil { From f92284d7108ebf92907008d8f12a0696ee467a43 Mon Sep 17 00:00:00 2001 From: Anna Winkler <3526523+annawinkler@users.noreply.github.com> Date: Fri, 1 Feb 2019 01:24:06 -0700 Subject: [PATCH 071/145] Minor spelling, formatting, and style updates. (#1193) --- examples/README.md | 237 +++++++++++++++++++++++++-------------------- 1 file changed, 130 insertions(+), 107 deletions(-) diff --git a/examples/README.md b/examples/README.md index e2e87e068ba3..078383c7655f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,13 +4,13 @@ Argo is an open source project that provides container-native workflows for Kubernetes. Each step in an Argo workflow is defined as a container. -Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a result, Argo workflows can be managed using kubectl and natively integrates with other Kubernetes services such as volumes, secrets, and RBAC. The new Argo software is lightweight and installs in under a minute but provides complete workflow features including parameter substitution, artifacts, fixtures, loops and recursive workflows. +Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a result, Argo workflows can be managed using `kubectl` and natively integrates with other Kubernetes services such as volumes, secrets, and RBAC. The new Argo software is light-weight and installs in under a minute, and provides complete workflow features including parameter substitution, artifacts, fixtures, loops and recursive workflows. -Many of the Argo examples used in this walkthrough are available at https://github.com/argoproj/argo/tree/master/examples. If you like this project, please give us a star! +Many of the Argo examples used in this walkthrough are available at https://github.com/argoproj/argo/tree/master/examples. If you like this project, please give us a star! For a complete description of the Argo workflow spec, please refer to https://github.com/argoproj/argo/blob/master/pkg/apis/workflow/v1alpha1/types.go -## Table of Content +## Table of Contents - [Argo CLI](#argo-cli) - [Hello World!](#hello-world) @@ -32,12 +32,12 @@ For a complete description of the Argo workflow spec, please refer to https://gi - [Sidecars](#sidecars) - [Hardwired Artifacts](#hardwired-artifacts) - [Kubernetes Resources](#kubernetes-resources) -- [Docker-in-Docker (aka. DinD) Using Sidecars](#docker-in-docker-aka-dind-using-sidecars) -- [Continuous integration example](#continuous-integration-example) +- [Docker-in-Docker Using Sidecars](#docker-in-docker-aka-dind-using-sidecars) +- [Continuous Integration Example](#continuous-integration-example) ## Argo CLI -In case you want to follow along with this walkthrough, here's a quick overview of the most useful argo CLI commands. +In case you want to follow along with this walkthrough, here's a quick overview of the most useful argo command line interface (CLI) commands. [Install Argo here](https://github.com/argoproj/argo/blob/master/demo.md) @@ -50,23 +50,24 @@ argo logs hello-world-xxx-yyy # get logs from a specific step in a workflow argo delete hello-world-xxx # delete workflow ``` -You can also run workflow specs directly using kubectl but the argo CLI provides syntax checking, nicer output, and requires less typing. +You can also run workflow specs directly using `kubectl` but the Argo CLI provides syntax checking, nicer output, and requires less typing. + ```sh kubectl create -f hello-world.yaml kubectl get wf kubectl get wf hello-world-xxx -kubectl get po --selector=workflows.argoproj.io/workflow=hello-world-xxx --show-all #similar to argo +kubectl get po --selector=workflows.argoproj.io/workflow=hello-world-xxx --show-all # similar to argo kubectl logs hello-world-xxx-yyy -c main kubectl delete wf hello-world-xxx ``` ## Hello World! -Let's start by creating a very simple workflow template to echo "hello world" using the docker/whalesay container image from DockerHub. +Let's start by creating a very simple workflow template to echo "hello world" using the docker/whalesay container image from DockerHub. -You can run this directly from your shell with a simple docker command. +You can run this directly from your shell with a simple docker command: ``` bash% docker run docker/whalesay cowsay "hello world" _____________ @@ -90,32 +91,33 @@ This message shows that your installation appears to be working correctly. ``` Below, we run the same container on a Kubernetes cluster using an Argo workflow template. -Be sure to read the comments. They provide useful explanations. +Be sure to read the comments as they provide useful explanations. + ```yaml apiVersion: argoproj.io/v1alpha1 -kind: Workflow #new type of k8s spec +kind: Workflow # new type of k8s spec metadata: - generateName: hello-world- #name of workflow spec + generateName: hello-world- # name of the workflow spec spec: - entrypoint: whalesay #invoke the whalesay template + entrypoint: whalesay # invoke the whalesay template templates: - - name: whalesay #name of template + - name: whalesay # name of the template container: image: docker/whalesay command: [cowsay] args: ["hello world"] - resources: #don't use too much resources + resources: # limit the resources limits: memory: 32Mi cpu: 100m ``` -Argo adds a new `kind` of Kubernetes spec called a `Workflow`. -The above spec contains a single `template` called `whalesay` which runs the `docker/whalesay` container and invokes `cowsay "hello world"`. -The `whalesay` template is denoted as the `entrypoint` for the spec. The entrypoint specifies the initial template that should be invoked when the workflow spec is executed by Kubernetes. Being able to specify the entrypoint is more useful when there are more than one template defined in the Kubernetes workflow spec :-) + +Argo adds a new `kind` of Kubernetes spec called a `Workflow`. The above spec contains a single `template` called `whalesay` which runs the `docker/whalesay` container and invokes `cowsay "hello world"`. The `whalesay` template is the `entrypoint` for the spec. The entrypoint specifies the initial template that should be invoked when the workflow spec is executed by Kubernetes. Being able to specify the entrypoint is more useful when there is more than one template defined in the Kubernetes workflow spec. :-) ## Parameters Let's look at a slightly more complex workflow spec with parameters. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -135,20 +137,22 @@ spec: - name: whalesay inputs: parameters: - - name: message #parameter declaration + - name: message # parameter declaration container: # run cowsay with that message input parameter as args image: docker/whalesay command: [cowsay] args: ["{{inputs.parameters.message}}"] ``` -This time, the `whalesay` template takes an input parameter named `message` which is passed as the `args` to the `cowsay` command. In order to reference parameters (e.g. "{{inputs.parameters.message}}"), the parameters must be enclosed in double quotes to escape the curly braces in YAML. + +This time, the `whalesay` template takes an input parameter named `message` that is passed as the `args` to the `cowsay` command. In order to reference parameters (e.g., ``"{{inputs.parameters.message}}"``), the parameters must be enclosed in double quotes to escape the curly braces in YAML. The argo CLI provides a convenient way to override parameters used to invoke the entrypoint. For example, the following command would bind the `message` parameter to "goodbye world" instead of the default "hello world". ```sh argo submit arguments-parameters.yaml -p message="goodbye world" ``` -In case of multiple parameters that can be overriten argo CLI provides also command to load whole parameters files in yaml or json format. That files can look like that: + +In case of multiple parameters that can be overriten, the argo CLI provides a command to load parameters files in YAML or JSON format. Here is an example of that kind of parameter file: ```yaml message: goodbye world @@ -159,14 +163,15 @@ To run use following command: argo submit arguments-parameters.yaml --parameter-file params.yaml ``` -Command line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the `whalesay` template called `whalesay-caps` but you don't want to change the default entrypoint, you can invoke this from the command line as follows. +Command-line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the `whalesay` template called `whalesay-caps` but you don't want to change the default entrypoint, you can invoke this from the command line as follows: + ```sh argo submit arguments-parameters.yaml --entrypoint whalesay-caps ``` -By using a combination of the `--entrypoint` and `-p` parameters, you can invoke any template in the workflow spec with any parameter that you like. +By using a combination of the `--entrypoint` and `-p` parameters, you can call any template in the workflow spec with any parameter that you like. -The values set in the `spec.arguments.parameters` are globally scoped and can be accessed via `{{workflow.parameters.parameter_name}}`. This can be useful to pass information to multiple steps in a workflow. For example, if you wanted to run your workflows with different logging levels, set in environment of each container, you could have a set up similar to this: +The values set in the `spec.arguments.parameters` are globally scoped and can be accessed via `{{workflow.parameters.parameter_name}}`. This can be useful to pass information to multiple steps in a workflow. For example, if you wanted to run your workflows with different logging levels that are set in the environment of each container, you could have a YAML file similar to this one: ```yaml apiVersion: argoproj.io/v1alpha1 @@ -197,11 +202,12 @@ spec: command: [runB] ``` -In this workflow, both steps `A` and `B` would have the same log level set to `INFO` and can easily be changed between workflow submissions using the `-p` flag. +In this workflow, both steps `A` and `B` would have the same log-level set to `INFO` and can easily be changed between workflow submissions using the `-p` flag. ## Steps -In this example, we'll see how to create multi-step workflows as well as how to define more than one template in a workflow spec and how to create nested workflows. Be sure to read the comments. They provide useful explanations. +In this example, we'll see how to create multi-step workflows, how to define more than one template in a workflow spec, and how to create nested workflows. Be sure to read the comments as they provide useful explanations. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -216,19 +222,19 @@ spec: # Instead of just running a container # This template has a sequence of steps steps: - - - name: hello1 #hello1 is run before the following steps + - - name: hello1 # hello1 is run before the following steps template: whalesay arguments: parameters: - name: message value: "hello1" - - - name: hello2a #double dash => run after previous step + - - name: hello2a # double dash => run after previous step template: whalesay arguments: parameters: - name: message value: "hello2a" - - name: hello2b #single dash => run in parallel with previous step + - name: hello2b # single dash => run in parallel with previous step template: whalesay arguments: parameters: @@ -245,10 +251,9 @@ spec: command: [cowsay] args: ["{{inputs.parameters.message}}"] ``` -The above workflow spec prints three different flavors of "hello". -The `hello-hello-hello` template consists of three `steps`. -The first step named `hello1` will be run in sequence whereas the next two steps named `hello2a` and `hello2b` will be run in parallel with each other. -Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named `hello2a` and `hello2b` ran in parallel with each other. + +The above workflow spec prints three different flavors of "hello". The `hello-hello-hello` template consists of three `steps`. The first step named `hello1` will be run in sequence whereas the next two steps named `hello2a` and `hello2b` will be run in parallel with each other. Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named `hello2a` and `hello2b` ran in parallel with each other. + ``` STEP PODNAME ✔ arguments-parameters-rbm92 @@ -259,12 +264,9 @@ STEP PODNAME ## DAG -As an alternative to specifying sequences of steps, you can define the workflow as a graph by specifying the dependencies of each task. -This can be simpler to maintain for complex workflows and allows for maximum parallelism when running tasks. +As an alternative to specifying sequences of steps, you can define the workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. This can be simpler to maintain for complex workflows and allows for maximum parallelism when running tasks. -In the following workflow, step `A` runs first, as it has no dependencies. -Once `A` has finished, steps `B` and `C` run in parallel. -Finally, once `B` and `C` have completed, step `D` can run. +In the following workflow, step `A` runs first, as it has no dependencies. Once `A` has finished, steps `B` and `C` run in parallel. Finally, once `B` and `C` have completed, step `D` can run. ```yaml apiVersion: argoproj.io/v1alpha1 @@ -305,18 +307,18 @@ spec: parameters: [{name: message, value: D}] ``` -The dependency graph may have [multiple roots](./dag-multiroot.yaml). -The templates called from a dag or steps template can themselves be dag or steps templates. This can allow for complex workflows to be split into manageable pieces. +The dependency graph may have [multiple roots](./dag-multiroot.yaml). The templates called from a DAG or steps template can themselves be DAG or steps templates. This can allow for complex workflows to be split into manageable pieces. ## Artifacts **Note:** -You will need to have configured an artifact repository to run this example. +You will need to configure an artifact repository to run this example. [Configuring an artifact repository here](https://github.com/argoproj/argo/blob/master/ARTIFACT_REPO.md). When running workflows, it is very common to have steps that generate or consume artifacts. Often, the output artifacts of one step may be used as input artifacts to a subsequent step. -The below workflow spec consists of two steps that run in sequence. The first step named `generate-artifact` will generate an artifact using the `whalesay` template which will be consumed by the second step named `print-message` that consumes the generated artifact. +The below workflow spec consists of two steps that run in sequence. The first step named `generate-artifact` will generate an artifact using the `whalesay` template that will be consumed by the second step named `print-message` that then consumes the generated artifact. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -362,14 +364,13 @@ spec: command: [sh, -c] args: ["cat /tmp/message"] ``` -The `whalesay` template uses the `cowsay` command to generate a file named `/tmp/hello-world.txt`. It then `outputs` this file as an artifact named `hello-art`. In general, the artifact's `path` may be a directory rather than just a file. -The `print-message` template takes an input artifact named `message`, unpacks it at the `path` named `/tmp/message` and then prints the contents of `/tmp/message` using the `cat` command. -The `artifact-example` template passes the `hello-art` artifact generated as an output of the `generate-artifact` step as the `message` input artifact to the `print-message` step. -DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-artifact.outputs.artifacts.hello-art}}`. + +The `whalesay` template uses the `cowsay` command to generate a file named `/tmp/hello-world.txt`. It then `outputs` this file as an artifact named `hello-art`. In general, the artifact's `path` may be a directory rather than just a file. The `print-message` template takes an input artifact named `message`, unpacks it at the `path` named `/tmp/message` and then prints the contents of `/tmp/message` using the `cat` command. +The `artifact-example` template passes the `hello-art` artifact generated as an output of the `generate-artifact` step as the `message` input artifact to the `print-message` step. DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-artifact.outputs.artifacts.hello-art}}`. ## The Structure of Workflow Specs -We now know enough about the basic components of a workflow spec to review its basic structure. +We now know enough about the basic components of a workflow spec to review its basic structure: - Kubernetes header including metadata - Spec body - Entrypoint invocation with optionally arguments @@ -384,11 +385,11 @@ We now know enough about the basic components of a workflow spec to review its b To summarize, workflow specs are composed of a set of Argo templates where each template consists of an optional input section, an optional output section and either a container invocation or a list of steps where each step invokes another template. -Note that the controller section of the workflow spec will accept the same options as the controller section of a pod spec, including but not limited to env vars, secrets, and volume mounts. Similarly, for volume claims and volumes. +Note that the controller section of the workflow spec will accept the same options as the controller section of a pod spec, including but not limited to environment variables, secrets, and volume mounts. Similarly, for volume claims and volumes. ## Secrets -Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. -- https://kubernetes.io/docs/concepts/configuration/secret/ + +Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the (Kubernetes documentation)[https://kubernetes.io/docs/concepts/configuration/secret/] for more information. ```yaml # To run this example, first create the secret by running: @@ -428,7 +429,8 @@ spec: ``` ## Scripts & Results -Often times, we just want a template that executes a script specified as a here-script (aka. here document) in the workflow spec. +Often, we just want a template that executes a script specified as a here-script (also known as a `here document`) in the workflow spec. This example shows how to do that: + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -481,13 +483,15 @@ spec: command: [sh, -c] args: ["echo result was: {{inputs.parameters.message}}"] ``` -The `script` keyword allows the specification of the script body using the `source` tag. This creates a temporary file containing the script body and then passes the name of the temporary file as the final parameter to `command`, which should be an interpreter that executes the script body.. + +The `script` keyword allows the specification of the script body using the `source` tag. This creates a temporary file containing the script body and then passes the name of the temporary file as the final parameter to `command`, which should be an interpreter that executes the script body. The use of the `script` feature also assigns the standard output of running the script to a special output parameter named `result`. This allows you to use the result of running the script itself in the rest of the workflow spec. In this example, the result is simply echoed by the print-message template. ## Output Parameters Output parameters provide a general mechanism to use the result of a step as a parameter rather than as an artifact. This allows you to use the result from any type of step, not just a `script`, for conditional tests, loops, and arguments. Output parameters work similarly to `script result` except that the value of the output parameter is set to the contents of a generated file rather than the contents of `stdout`. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -512,12 +516,12 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["echo -n hello world > /tmp/hello_world.txt"] #generate the content of hello_world.txt + args: ["echo -n hello world > /tmp/hello_world.txt"] # generate the content of hello_world.txt outputs: parameters: - - name: hello-param #name of output parameter + - name: hello-param # name of output parameter valueFrom: - path: /tmp/hello_world.txt #set the value of hello-param to the contents of this hello-world.txt + path: /tmp/hello_world.txt # set the value of hello-param to the contents of this hello-world.txt - name: print-message inputs: @@ -533,7 +537,8 @@ DAG templates use the tasks prefix to refer to another task, for example `{{task ## Loops -When writing workflows, it is often very useful to be able to iterate over a set of inputs. +When writing workflows, it is often very useful to be able to iterate over a set of inputs as shown in this example: + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -550,9 +555,9 @@ spec: parameters: - name: message value: "{{item}}" - withItems: #invoke whalesay once for each item in parallel - - hello world #item 1 - - goodbye world #item 2 + withItems: # invoke whalesay once for each item in parallel + - hello world # item 1 + - goodbye world # item 2 - name: whalesay inputs: @@ -564,7 +569,8 @@ spec: args: ["{{inputs.parameters.message}}"] ``` -We can also iterate over a sets of items. +We can also iterate over sets of items: + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -600,7 +606,8 @@ spec: args: [/etc/os-release] ``` -We can pass lists of items as parameters. +We can pass lists of items as parameters: + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -610,7 +617,7 @@ spec: entrypoint: loop-param-arg-example arguments: parameters: - - name: os-list #a list of items + - name: os-list # a list of items value: | [ { "image": "debian", "tag": "9.1" }, @@ -633,7 +640,7 @@ spec: value: "{{item.image}}" - name: tag value: "{{item.tag}}" - withParam: "{{inputs.parameters.os-list}}" #parameter specifies the list to iterate over + withParam: "{{inputs.parameters.os-list}}" # parameter specifies the list to iterate over # This template is the same as in the previous example - name: cat-os-release @@ -690,7 +697,9 @@ spec: ``` ## Conditionals -We also support conditional execution. + +We also support conditional execution as shown in this example: + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -706,10 +715,10 @@ spec: template: flip-coin # evaluate the result in parallel - - name: heads - template: heads #invoke heads template if "heads" + template: heads # call heads template if "heads" when: "{{steps.flip-coin.outputs.result}} == heads" - name: tails - template: tails #invoke tails template if "tails" + template: tails # call tails template if "tails" when: "{{steps.flip-coin.outputs.result}} == tails" # Return heads or tails based on a random number @@ -736,7 +745,9 @@ spec: ``` ## Recursion + Templates can recursively invoke each other! In this variation of the above coin-flip template, we continue to flip coins until it comes up heads. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -752,9 +763,9 @@ spec: template: flip-coin # evaluate the result in parallel - - name: heads - template: heads #invoke heads template if "heads" + template: heads # call heads template if "heads" when: "{{steps.flip-coin.outputs.result}} == heads" - - name: tails #keep flipping coins if "tails" + - name: tails # keep flipping coins if "tails" template: coinflip when: "{{steps.flip-coin.outputs.result}} == tails" @@ -775,6 +786,7 @@ spec: ``` Here's the result of a couple of runs of coinflip for comparison. + ``` argo get coinflip-recursive-tzcb5 @@ -799,17 +811,16 @@ STEP PODNAME MESSAGE └-·-✔ heads coinflip-recursive-tzcb5-4080323273 └-○ tails ``` -In the first run, the coin immediately comes up heads and we stop. -In the second run, the coin comes up tail three times before it finally comes up heads and we stop. +In the first run, the coin immediately comes up heads and we stop. In the second run, the coin comes up tail three times before it finally comes up heads and we stop. ## Exit handlers -An exit handler is a template that always executes, irrespective of success or failure, at the end of the workflow. +An exit handler is a template that *always* executes, irrespective of success or failure, at the end of the workflow. Some common use cases of exit handlers are: - cleaning up after a workflow runs -- sending notifications of workflow status (e.g. e-mail/slack) -- posting the pass/fail status to a webhook result (e.g. github build result) +- sending notifications of workflow status (e.g., e-mail/Slack) +- posting the pass/fail status to a webhook result (e.g. GitHub build result) - resubmitting or submitting another workflow ```yaml @@ -819,7 +830,7 @@ metadata: generateName: exit-handlers- spec: entrypoint: intentional-fail - onExit: exit-handler #invoke exit-hander template at end of the workflow + onExit: exit-handler # invoke exit-hander template at end of the workflow templates: # primary workflow template - name: intentional-fail @@ -860,7 +871,8 @@ spec: ``` ## Timeouts -To limit the elapsed time for a workflow, you can set `activeDeadlineSeconds`. +To limit the elapsed time for a workflow, you can set the variable `activeDeadlineSeconds`. + ```yaml # To enforce a timeout for a container template, specify a value for activeDeadlineSeconds. apiVersion: argoproj.io/v1alpha1 @@ -875,10 +887,11 @@ spec: image: alpine:latest command: [sh, -c] args: ["echo sleeping for 1m; sleep 60; echo done"] - activeDeadlineSeconds: 10 #terminate container template after 10 seconds + activeDeadlineSeconds: 10 # terminate container template after 10 seconds ``` ## Volumes + The following example dynamically creates a volume and then uses the volume in a two step workflow. ```yaml apiVersion: argoproj.io/v1alpha1 @@ -887,14 +900,14 @@ metadata: generateName: volumes-pvc- spec: entrypoint: volumes-pvc-example - volumeClaimTemplates: #define volume, same syntax as k8s Pod spec + volumeClaimTemplates: # define volume, same syntax as k8s Pod spec - metadata: - name: workdir #name of volume claim + name: workdir # name of volume claim spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi #Gi => 1024 * 1024 * 1024 + storage: 1Gi # Gi => 1024 * 1024 * 1024 templates: - name: volumes-pvc-example @@ -910,7 +923,7 @@ spec: command: [sh, -c] args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"] # Mount workdir volume at /mnt/vol before invoking docker/whalesay - volumeMounts: #same syntax as k8s Pod spec + volumeMounts: # same syntax as k8s Pod spec - name: workdir mountPath: /mnt/vol @@ -920,15 +933,16 @@ spec: command: [sh, -c] args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"] # Mount workdir volume at /mnt/vol before invoking docker/whalesay - volumeMounts: #same syntax as k8s Pod spec + volumeMounts: # same syntax as k8s Pod spec - name: workdir mountPath: /mnt/vol ``` -Volumes are a very useful way to move large amounts of data from one step in a workflow to another. -Depending on the system, some volumes may be accessible concurrently from multiple steps. + +Volumes are a very useful way to move large amounts of data from one step in a workflow to another. Depending on the system, some volumes may be accessible concurrently from multiple steps. In some cases, you want to access an already existing volume rather than creating/destroying one dynamically. + ```yaml # Define Kubernetes PVC kind: PersistentVolumeClaim @@ -983,7 +997,9 @@ spec: ``` ## Daemon Containers -Argo workflows can start containers that run in the background (aka. daemon containers) while the workflow itself continues execution. The daemons will be automatically destroyed when the workflow exits the template scope in which the daemon was invoked. Deamons containers are useful for starting up services to be tested or to be used in testing (aka. fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow. + +Argo workflows can start containers that run in the background (also known as `daemon containers`) while the workflow itself continues execution. Note that the daemons will be *automatically destroyed* when the workflow exits the template scope in which the daemon was invoked. Deamon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -995,35 +1011,35 @@ spec: - name: daemon-example steps: - - name: influx - template: influxdb #start an influxdb as a daemon (see the influxdb template spec below) + template: influxdb # start an influxdb as a daemon (see the influxdb template spec below) - - - name: init-database #initialize influxdb + - - name: init-database # initialize influxdb template: influxdb-client arguments: parameters: - name: cmd value: curl -XPOST 'http://{{steps.influx.ip}}:8086/query' --data-urlencode "q=CREATE DATABASE mydb" - - - name: producer-1 #add entries to influxdb + - - name: producer-1 # add entries to influxdb template: influxdb-client arguments: parameters: - name: cmd value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server01,region=uswest load=$i" ; sleep .5 ; done - - name: producer-2 #add entries to influxdb + - name: producer-2 # add entries to influxdb template: influxdb-client arguments: parameters: - name: cmd value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server02,region=uswest load=$((RANDOM % 100))" ; sleep .5 ; done - - name: producer-3 #add entries to influxdb + - name: producer-3 # add entries to influxdb template: influxdb-client arguments: parameters: - name: cmd value: curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d 'cpu,host=server03,region=useast load=15.4' - - - name: consumer #consume intries from influxdb + - - name: consumer # consume intries from influxdb template: influxdb-client arguments: parameters: @@ -1031,11 +1047,11 @@ spec: value: curl --silent -G http://{{steps.influx.ip}}:8086/query?pretty=true --data-urlencode "db=mydb" --data-urlencode "q=SELECT * FROM cpu" - name: influxdb - daemon: true #start influxdb as a daemon + daemon: true # start influxdb as a daemon container: image: influxdb:1.2 - restartPolicy: Always #restart container if it fails - readinessProbe: #wait for readinessProbe to succeed + restartPolicy: Always # restart container if it fails + readinessProbe: # wait for readinessProbe to succeed httpGet: path: /ping port: 8086 @@ -1057,8 +1073,9 @@ spec: DAG templates use the tasks prefix to refer to another task, for example `{{tasks.influx.ip}}`. ## Sidecars -A sidecar is another container that executes concurrently in the same pod as the "main" container and is useful -in creating multi-container pods. + +A sidecar is another container that executes concurrently in the same pod as the main container and is useful in creating multi-container pods. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -1078,10 +1095,13 @@ spec: - name: nginx image: nginx:1.13 ``` -In the above example, we create a sidecar container that runs nginx as a simple web server. The order in which containers may come up is random. This is why the 'main' container polls the nginx container until it is ready to service requests. This is a good design pattern when designing multi-container systems. Always wait for any services you need to come up before running your main code. + +In the above example, we create a sidecar container that runs nginx as a simple web server. The order in which containers come up is random, so in this example the main container polls the nginx container until it is ready to service requests. This is a good design pattern when designing multi-container systems: always wait for any services you need to come up before running your main code. ## Hardwired Artifacts -With Argo, you can use any container image that you like to generate any kind of artifact. In practice, however, we find certain types of artifacts are very common and provide a more convenient way to generate and use these artifacts. In particular, we have "hardwired" support for git, http and s3 artifacts. + +With Argo, you can use any container image that you like to generate any kind of artifact. In practice, however, we find certain types of artifacts are very common, so there is built-in support for git, http, and s3 artifacts. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -1129,6 +1149,7 @@ spec: ## Kubernetes Resources In many cases, you will want to manage Kubernetes resources from Argo workflows. The resource template allows you to create, delete or updated any type of Kubernetes resource. + ```yaml # in a workflow. The resource template type accepts any k8s manifest # (including CRDs) and can perform any kubectl action against it (e.g. create, @@ -1141,8 +1162,8 @@ spec: entrypoint: pi-tmpl templates: - name: pi-tmpl - resource: #indicates that this is a resource template - action: create #can be any kubectl action (e.g. create, delete, apply, patch) + resource: # indicates that this is a resource template + action: create # can be any kubectl action (e.g. create, delete, apply, patch) # The successCondition and failureCondition are optional expressions. # If failureCondition is true, the step is considered failed. # If successCondition is true, the step is considered successful. @@ -1172,9 +1193,10 @@ spec: Resources created in this way are independent of the workflow. If you want the resource to be deleted when the workflow is deleted then you can use [Kubernetes garbage collection](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) with the workflow resource as an owner reference ([example](./k8s-owner-reference.yaml)). -## Docker-in-Docker (aka. DinD) Using Sidecars -An application of sidecars is to implement DinD (Docker-in-Docker). -DinD is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the docker:dind container to run a Docker daemon in a sidecar and give the main container access to the daemon. +## Docker-in-Docker Using Sidecars + +An application of sidecars is to implement Docker-in-Docker (DinD). DinD is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the docker:dind container to run a Docker daemon in a sidecar and give the main container access to the daemon. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -1189,13 +1211,13 @@ spec: command: [sh, -c] args: ["until docker ps; do sleep 3; done; docker run --rm debian:latest cat /etc/os-release"] env: - - name: DOCKER_HOST #the docker daemon can be access on the standard port on localhost + - name: DOCKER_HOST # the docker daemon can be access on the standard port on localhost value: 127.0.0.1 sidecars: - name: dind - image: docker:17.10-dind #Docker already provides an image for running a Docker daemon + image: docker:17.10-dind # Docker already provides an image for running a Docker daemon securityContext: - privileged: true #the Docker daemon can only run in a privileged container + privileged: true # the Docker daemon can only run in a privileged container # mirrorVolumeMounts will mount the same volumes specified in the main container # to the sidecar (including artifacts), at the same mountPaths. This enables # dind daemon to (partially) see the same filesystem as the main container in @@ -1203,7 +1225,8 @@ spec: mirrorVolumeMounts: true ``` -## Continuous integration example +## Continuous Integration Example + Continuous integration is a popular application for workflows. Currently, Argo does not provide event triggers for automatically kicking off your CI jobs, but we plan to do so in the near future. Until then, you can easily write a cron job that checks for new commits and kicks off the needed workflow, or use your existing Jenkins server to kick off the workflow. A good example of a CI workflow spec is provided at https://github.com/argoproj/argo/tree/master/examples/influxdb-ci.yaml. Because it just uses the concepts that we've already covered and is somewhat long, we don't go into details here. From edcb56296255267a3c8fa639c3ad26a016caab80 Mon Sep 17 00:00:00 2001 From: Ilias K Date: Fri, 8 Feb 2019 23:57:53 +0200 Subject: [PATCH 072/145] Dockerfile: argoexec base image correction (fixes #1209) (#1213) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 0c983b5881d1..2aa861454c10 100644 --- a/Dockerfile +++ b/Dockerfile @@ -75,7 +75,7 @@ RUN make $MAKE_TARGET #################################################################################################### # argoexec #################################################################################################### -FROM debian:9.6-slim as argoexec +FROM argoexec-base as argoexec COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argoexec /usr/local/bin/ From 2ddae161037f603d2a3c12ba6b495dc422547b58 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sun, 10 Feb 2019 20:04:03 +0900 Subject: [PATCH 073/145] Set executor image pull policy for resource template (#1174) --- workflow/controller/operator.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index ba16443e5ed1..e72b82034573 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1478,9 +1478,10 @@ func (woc *wfOperationCtx) executeResource(nodeName string, tmpl *wfv1.Template, return node } mainCtr := apiv1.Container{ - Image: woc.controller.executorImage(), - Command: []string{"argoexec"}, - Args: []string{"resource", tmpl.Resource.Action}, + Image: woc.controller.executorImage(), + ImagePullPolicy: woc.controller.executorImagePullPolicy(), + Command: []string{"argoexec"}, + Args: []string{"resource", tmpl.Resource.Action}, VolumeMounts: []apiv1.VolumeMount{ volumeMountPodMetadata, }, From f1797f78044504dbf2e1f7285cc9c18ac79f5e81 Mon Sep 17 00:00:00 2001 From: houz Date: Wed, 13 Feb 2019 06:23:03 +0800 Subject: [PATCH 074/145] Add schedulerName to workflow and template spec (#1184) --- api/openapi-spec/swagger.json | 8 ++++++++ pkg/apis/workflow/v1alpha1/openapi_generated.go | 14 ++++++++++++++ pkg/apis/workflow/v1alpha1/types.go | 12 ++++++++++++ workflow/controller/workflowpod.go | 6 ++++++ 4 files changed, 40 insertions(+) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 023aec707924..315fd5854db7 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -955,6 +955,10 @@ "description": "RetryStrategy describes how to retry a template when it fails", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.RetryStrategy" }, + "schedulerName": { + "description": "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", + "type": "string" + }, "script": { "description": "Script runs a portion of code against an interpreter", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ScriptTemplate" @@ -1123,6 +1127,10 @@ "type": "integer", "format": "int32" }, + "schedulerName": { + "description": "Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.", + "type": "string" + }, "serviceAccountName": { "description": "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.", "type": "string" diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 47392e025d05..3094ace5b6ab 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1821,6 +1821,13 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"name"}, }, @@ -2128,6 +2135,13 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Format: "int32", }, }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"templates", "entrypoint"}, }, diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 02d9ea9067ab..1874f7211280 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -147,6 +147,12 @@ type WorkflowSpec struct { ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. Priority *int32 `json:"priority,omitempty"` + + // Set scheduler name for all pods. + // Will be overridden if container/script template's scheduler name is set. + // Default scheduler will be used if neither specified. + // +optional + SchedulerName string `json:"schedulerName,omitempty"` } // Template is a reusable and composable unit of execution in a workflow @@ -217,6 +223,12 @@ type Template struct { // Tolerations to apply to workflow pods. Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + + // If specified, the pod will be dispatched by specified scheduler. + // Or it will be dispatched by workflow scope scheduler if specified. + // If neither specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty"` } // Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index b205c515b168..5b416fa42fb0 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -381,6 +381,12 @@ func addSchedulingConstraints(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *w } else if len(wfSpec.Tolerations) > 0 { pod.Spec.Tolerations = wfSpec.Tolerations } + // Set scheduler name (if specified) + if tmpl.SchedulerName != "" { + pod.Spec.SchedulerName = tmpl.SchedulerName + } else if wfSpec.SchedulerName != "" { + pod.Spec.SchedulerName = wfSpec.SchedulerName + } } // addVolumeReferences adds any volumeMounts that a container/sidecar is referencing, to the pod.spec.volumes From 8aae29317a8cfef2edc084a4c74a44c83d845936 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 13 Feb 2019 08:44:37 -0800 Subject: [PATCH 075/145] Issue #1190 - Fix incorrect retry node handling (#1208) --- workflow/controller/dag.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 1f5601830751..420cf30b3999 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -106,6 +106,10 @@ func (d *dagContext) assessDAGPhase(targetTasks []string, nodes map[string]wfv1. } func hasMoreRetries(node *wfv1.NodeStatus, wf *wfv1.Workflow) bool { + if node.Phase == wfv1.NodeSucceeded { + return false + } + if len(node.Children) == 0 { return true } From 0bda53c77c54b037e7d91b18554053362b1e4d35 Mon Sep 17 00:00:00 2001 From: houz Date: Sat, 16 Feb 2019 01:58:17 +0800 Subject: [PATCH 076/145] fix dag retries (#1221) --- workflow/controller/dag.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 420cf30b3999..69af39859cc4 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -79,7 +79,9 @@ func (d *dagContext) assessDAGPhase(targetTasks []string, nodes map[string]wfv1. unsuccessfulPhase = node.Phase } if node.Type == wfv1.NodeTypeRetry { - if hasMoreRetries(&node, d.wf) { + if node.Successful() { + retriesExhausted = false + } else if hasMoreRetries(&node, d.wf) { retriesExhausted = false } } From f6b0c8f285217fd0e6089b0cf03ca4926d1b4758 Mon Sep 17 00:00:00 2001 From: houz Date: Sat, 16 Feb 2019 06:50:49 +0800 Subject: [PATCH 077/145] Executor can access the k8s apiserver with a out-of-cluster config file (#1134) Executor can access the k8s apiserver with a out-of-cluster config file --- docs/workflow-controller-configmap.yaml | 13 +++++++ workflow/common/common.go | 3 ++ workflow/controller/config.go | 18 +++++++++ workflow/controller/workflowpod.go | 49 ++++++++++++++++++------ workflow/controller/workflowpod_test.go | 50 +++++++++++++++++++++++++ 5 files changed, 122 insertions(+), 11 deletions(-) diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index b5d9c7b31471..e57d83c70afa 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -22,6 +22,19 @@ data: # (available since Argo v2.3) parallelism: 10 + # uncomment flowing lines if workflow controller runs in a different k8s cluster with the + # workflow workloads, or needs to communicate with the k8s apiserver using an out-of-cluster + # kubeconfig secret + # kubeConfig: + # # name of the kubeconfig secret, may not be empty when kubeConfig specified + # secretName: kubeconfig-secret + # # key of the kubeconfig secret, may not be empty when kubeConfig specified + # secretKey: kubeconfig + # # mounting path of the kubeconfig secret, default to /kube/config + # mountPath: /kubeconfig/mount/path + # # volume name when mounting the secret, default to kubeconfig + # volumeName: kube-config-volume + # artifactRepository defines the default location to be used as the artifact repository for # container artifacts. artifactRepository: diff --git a/workflow/common/common.go b/workflow/common/common.go index 7b326186464e..d228d9c0f835 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -110,6 +110,9 @@ const ( GlobalVarWorkflowCreationTimestamp = "workflow.creationTimestamp" // LocalVarPodName is a step level variable that references the name of the pod LocalVarPodName = "pod.name" + + KubeConfigDefaultMountPath = "/kube/config" + KubeConfigDefaultVolumeName = "kubeconfig" ) // ExecutionControl contains execution control parameters for executor to decide how to execute the container diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 31874163b581..7da737d145bf 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -30,6 +30,9 @@ type WorkflowControllerConfig struct { // ExecutorResources specifies the resource requirements that will be used for the executor sidecar ExecutorResources *apiv1.ResourceRequirements `json:"executorResources,omitempty"` + // KubeConfig specifies a kube config file for the wait & init containers + KubeConfig *KubeConfig `json:"kubeConfig,omitempty"` + // ContainerRuntimeExecutor specifies the container runtime interface to use, default is docker ContainerRuntimeExecutor string `json:"containerRuntimeExecutor,omitempty"` @@ -62,6 +65,21 @@ type WorkflowControllerConfig struct { Parallelism int `json:"parallelism,omitempty"` } +// KubeConfig is used for wait & init sidecar containers to communicate with a k8s apiserver by a outofcluster method, +// it is used when the workflow controller is in a different cluster with the workflow workloads +type KubeConfig struct { + // SecretName of the kubeconfig secret + // may not be empty if kuebConfig specified + SecretName string `json:"secretName"` + // SecretKey of the kubeconfig in the secret + // may not be empty if kubeConfig specified + SecretKey string `json:"secretKey"` + // VolumeName of kubeconfig, default to 'kubeconfig' + VolumeName string `json:"volumeName,omitempty"` + // MountPath of the kubeconfig secret, default to '/kube/config' + MountPath string `json:"mountPath,omitempty"` +} + // ArtifactRepository represents a artifact repository in which a controller will store its artifacts type ArtifactRepository struct { // ArchiveLogs enables log archiving diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 5b416fa42fb0..9f7081e4daf2 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -247,20 +247,14 @@ func substituteGlobals(pod *apiv1.Pod, globalParams map[string]string) (*apiv1.P } func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container { - ctr := woc.newExecContainer(common.InitContainerName, false) - ctr.Command = []string{"argoexec"} - ctr.Args = []string{"init"} - ctr.VolumeMounts = []apiv1.VolumeMount{ - volumeMountPodMetadata, - } + ctr := woc.newExecContainer(common.InitContainerName, false, "init") + ctr.VolumeMounts = append([]apiv1.VolumeMount{volumeMountPodMetadata}, ctr.VolumeMounts...) return *ctr } func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Container, error) { - ctr := woc.newExecContainer(common.WaitContainerName, false) - ctr.Command = []string{"argoexec"} - ctr.Args = []string{"wait"} - ctr.VolumeMounts = woc.createVolumeMounts() + ctr := woc.newExecContainer(common.WaitContainerName, false, "wait") + ctr.VolumeMounts = append(woc.createVolumeMounts(), ctr.VolumeMounts...) return ctr, nil } @@ -317,6 +311,20 @@ func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { volumes := []apiv1.Volume{ volumePodMetadata, } + if woc.controller.Config.KubeConfig != nil { + name := woc.controller.Config.KubeConfig.VolumeName + if name == "" { + name = common.KubeConfigDefaultVolumeName + } + volumes = append(volumes, apiv1.Volume{ + Name: name, + VolumeSource: apiv1.VolumeSource{ + Secret: &apiv1.SecretVolumeSource{ + SecretName: woc.controller.Config.KubeConfig.SecretName, + }, + }, + }) + } switch woc.controller.Config.ContainerRuntimeExecutor { case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI: return volumes @@ -325,7 +333,7 @@ func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { } } -func (woc *wfOperationCtx) newExecContainer(name string, privileged bool) *apiv1.Container { +func (woc *wfOperationCtx) newExecContainer(name string, privileged bool, subCommand string) *apiv1.Container { exec := apiv1.Container{ Name: name, Image: woc.controller.executorImage(), @@ -334,10 +342,29 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool) *apiv1 SecurityContext: &apiv1.SecurityContext{ Privileged: &privileged, }, + Command: []string{"argoexec"}, + Args: []string{subCommand}, } if woc.controller.Config.ExecutorResources != nil { exec.Resources = *woc.controller.Config.ExecutorResources } + if woc.controller.Config.KubeConfig != nil { + path := woc.controller.Config.KubeConfig.MountPath + if path == "" { + path = common.KubeConfigDefaultMountPath + } + name := woc.controller.Config.KubeConfig.VolumeName + if name == "" { + name = common.KubeConfigDefaultVolumeName + } + exec.VolumeMounts = []apiv1.VolumeMount{{ + Name: name, + MountPath: path, + ReadOnly: true, + SubPath: woc.controller.Config.KubeConfig.SecretKey, + }} + exec.Args = append(exec.Args, "--kubeconfig="+path) + } return &exec } diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index c5dfe4b8f121..cc7928bccc1b 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -266,3 +266,53 @@ func TestVolumeAndVolumeMounts(t *testing.T) { assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) } } + +func TestOutOfCluster(t *testing.T) { + // default mount path & volume name + { + woc := newWoc() + woc.controller.Config.KubeConfig = &KubeConfig{ + SecretName: "foo", + SecretKey: "bar", + } + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + + assert.Nil(t, err) + assert.Equal(t, "kubeconfig", pod.Spec.Volumes[1].Name) + assert.Equal(t, "foo", pod.Spec.Volumes[1].VolumeSource.Secret.SecretName) + + // kubeconfig volume is the last one + idx := len(pod.Spec.Containers[1].VolumeMounts) - 1 + assert.Equal(t, "kubeconfig", pod.Spec.Containers[1].VolumeMounts[idx].Name) + assert.Equal(t, "/kube/config", pod.Spec.Containers[1].VolumeMounts[idx].MountPath) + assert.Equal(t, "--kubeconfig=/kube/config", pod.Spec.Containers[1].Args[1]) + } + + // custom mount path & volume name, in case name collision + { + woc := newWoc() + woc.controller.Config.KubeConfig = &KubeConfig{ + SecretName: "foo", + SecretKey: "bar", + MountPath: "/some/path/config", + VolumeName: "kube-config-secret", + } + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + + assert.Nil(t, err) + assert.Equal(t, "kube-config-secret", pod.Spec.Volumes[1].Name) + assert.Equal(t, "foo", pod.Spec.Volumes[1].VolumeSource.Secret.SecretName) + + // kubeconfig volume is the last one + idx := len(pod.Spec.Containers[1].VolumeMounts) - 1 + assert.Equal(t, "kube-config-secret", pod.Spec.Containers[1].VolumeMounts[idx].Name) + assert.Equal(t, "/some/path/config", pod.Spec.Containers[1].VolumeMounts[idx].MountPath) + assert.Equal(t, "--kubeconfig=/some/path/config", pod.Spec.Containers[1].Args[1]) + } +} From baa3e622121e66c9fec7c612c88027b7cacbd1b2 Mon Sep 17 00:00:00 2001 From: jdfalko <43558452+jdfalko@users.noreply.github.com> Date: Fri, 15 Feb 2019 15:09:26 -0800 Subject: [PATCH 078/145] Update README with typo fixes (#1220) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3d7a5972bb07..9e9cd7e31c97 100644 --- a/README.md +++ b/README.md @@ -37,8 +37,8 @@ Argo Workflows is an open source container-native workflow engine for orchestrat ## Why Argo Workflows? * Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments. -* Cloud agnostic and can run on any kubernetes cluster. -* Easily ochestrate highly parallel jobs on Kubernets. +* Cloud agnostic and can run on any Kubernetes cluster. +* Easily orchestrate highly parallel jobs on Kubernetes. * Argo Workflows puts a cloud-scale supercomputer at your fingertips! ## Documentation From 8b67e1bfdc7ed5ea153cb17f9a740afe2bd4efa8 Mon Sep 17 00:00:00 2001 From: Greg Roodt Date: Tue, 26 Feb 2019 04:30:24 +1100 Subject: [PATCH 079/145] Update README.md (#1236) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 9e9cd7e31c97..26161c219be9 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ Currently **officially** using Argo: 1. [Adobe](https://www.adobe.com/) 1. [BlackRock](https://www.blackrock.com/) +1. [Canva](https://www.canva.com/) 1. [CoreFiling](https://www.corefiling.com/) 1. [Cratejoy](https://www.cratejoy.com/) 1. [Cyrus Biotechnology](https://cyrusbio.com/) From eeac5a0e11b4a6f4bc28757a3b0684598b8c4974 Mon Sep 17 00:00:00 2001 From: Ilias Katsakioris Date: Mon, 25 Feb 2019 19:57:53 +0200 Subject: [PATCH 080/145] Remove extra quotes around output parameter value (#1232) Ensure we do not insert extra single quotes when using valueFrom: jsonPath to set the value of an output parameter for resource templates. Signed-off-by: Ilias Katsakioris --- workflow/executor/resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index fa671798e5ec..e28886bbdb3b 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -245,7 +245,7 @@ func (we *WorkflowExecutor) SaveResourceParameters(resourceNamespace string, res } var cmd *exec.Cmd if param.ValueFrom.JSONPath != "" { - args := []string{"get", resourceName, "-o", fmt.Sprintf("jsonpath='%s'", param.ValueFrom.JSONPath)} + args := []string{"get", resourceName, "-o", fmt.Sprintf("jsonpath=%s", param.ValueFrom.JSONPath)} if resourceNamespace != "" { args = append(args, "-n", resourceNamespace) } From 2b1d56e7d4e583e2e06b37904714b350faf03d97 Mon Sep 17 00:00:00 2001 From: Naoto Migita Date: Tue, 26 Feb 2019 02:58:58 +0900 Subject: [PATCH 081/145] Update README.md (#1224) --- examples/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 078383c7655f..4d3473957d05 100644 --- a/examples/README.md +++ b/examples/README.md @@ -998,7 +998,7 @@ spec: ## Daemon Containers -Argo workflows can start containers that run in the background (also known as `daemon containers`) while the workflow itself continues execution. Note that the daemons will be *automatically destroyed* when the workflow exits the template scope in which the daemon was invoked. Deamon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow. +Argo workflows can start containers that run in the background (also known as `daemon containers`) while the workflow itself continues execution. Note that the daemons will be *automatically destroyed* when the workflow exits the template scope in which the daemon was invoked. Daemon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow. ```yaml apiVersion: argoproj.io/v1alpha1 From cde5cd320fa987ac6dd539a3126f29c73cd7277a Mon Sep 17 00:00:00 2001 From: shahin Date: Tue, 26 Feb 2019 02:07:18 -0800 Subject: [PATCH 082/145] Include stderr when retrieving docker logs (#1225) --- workflow/executor/docker/docker.go | 1 + 1 file changed, 1 insertion(+) diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 64ed6c87f734..980527c523b4 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -78,6 +78,7 @@ func (d *DockerExecutor) Logs(containerID string, path string) error { } defer util.Close(outfile) cmd.Stdout = outfile + cmd.Stderr = outfile err = cmd.Start() if err != nil { return errors.InternalWrapError(err) From 3f1fb9d5e61d300c4922e48a748dc17285e07f07 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Tue, 26 Feb 2019 11:10:22 +0100 Subject: [PATCH 083/145] Add Gardener to "Who uses Argo" (#1228) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 26161c219be9..10b2665a5567 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ Currently **officially** using Argo: 1. [Cyrus Biotechnology](https://cyrusbio.com/) 1. [Datadog](https://www.datadoghq.com/) 1. [Equinor](https://www.equinor.com/) +1. [Gardener](https://gardener.cloud/) 1. [Gladly](https://gladly.com/) 1. [GitHub](https://github.com/) 1. [Google](https://www.google.com/intl/en/about/our-company/) From 94cda3d53c6a72e3fc225ba08796bfd9420eccd6 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Wed, 27 Feb 2019 08:18:14 +0100 Subject: [PATCH 084/145] Add feature to continue workflow on failed/error steps/tasks (#1205) --- api/openapi-spec/swagger.json | 19 ++++++ examples/dag-continueOn-fail.yaml | 44 ++++++++++++ examples/workflow-continueOn-fail.yaml | 67 +++++++++++++++++++ .../workflow/v1alpha1/openapi_generated.go | 42 +++++++++++- pkg/apis/workflow/v1alpha1/types.go | 40 +++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 26 +++++++ workflow/controller/dag.go | 2 +- workflow/controller/steps.go | 8 ++- 8 files changed, 244 insertions(+), 4 deletions(-) create mode 100644 examples/dag-continueOn-fail.yaml create mode 100644 examples/workflow-continueOn-fail.yaml diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 315fd5854db7..fc912a0d6453 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -163,6 +163,17 @@ } } }, + "io.argoproj.workflow.v1alpha1.ContinueOn": { + "description": "ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.", + "properties": { + "error": { + "type": "boolean" + }, + "failed": { + "type": "boolean" + } + } + }, "io.argoproj.workflow.v1alpha1.DAGTask": { "description": "DAGTask represents a node in the graph during DAG execution", "required": [ @@ -174,6 +185,10 @@ "description": "Arguments are the parameter and artifact arguments to the template", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments" }, + "continueOn": { + "description": "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ContinueOn" + }, "dependencies": { "description": "Dependencies are name of other targets which this depends on", "type": "array", @@ -1181,6 +1196,10 @@ "description": "Arguments hold arguments to the template", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments" }, + "continueOn": { + "description": "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ContinueOn" + }, "name": { "description": "Name of the step", "type": "string" diff --git a/examples/dag-continueOn-fail.yaml b/examples/dag-continueOn-fail.yaml new file mode 100644 index 000000000000..2bb2f78b893b --- /dev/null +++ b/examples/dag-continueOn-fail.yaml @@ -0,0 +1,44 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: dag-contiueOn-fail- +spec: + entrypoint: workflow + templates: + - name: workflow + dag: + tasks: + - name: A + template: whalesay + - name: B + dependencies: [A] + template: intentional-fail + continueOn: + failed: true + - name: C + dependencies: [A] + template: whalesay + - name: D + dependencies: [B, C] + template: whalesay + - name: E + dependencies: [A] + template: intentional-fail + - name: F + dependencies: [A] + template: whalesay + - name: G + dependencies: [E, F] + template: whalesay + + - name: whalesay + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] + + - name: intentional-fail + container: + image: alpine:latest + command: [sh, -c] + args: ["echo intentional failure; exit 1"] \ No newline at end of file diff --git a/examples/workflow-continueOn-fail.yaml b/examples/workflow-continueOn-fail.yaml new file mode 100644 index 000000000000..83a1442a617d --- /dev/null +++ b/examples/workflow-continueOn-fail.yaml @@ -0,0 +1,67 @@ +# Example on specifying parallelism on the outer workflow and limiting the number of its +# children workflowss to be run at the same time. +# +# As the parallelism of A is 1, the four steps of seq-step will run sequentially. + +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-continueOn-fail- +spec: + entrypoint: workflow + templates: + - name: workflow + steps: + - - name: wf-ignore + template: workflow-ignore + - name: wf-not-ignore + template: workflow-not-ignore + + - name: workflow-ignore + steps: + - - name: A + template: whalesay + - - name: B + template: whalesay + - name: C + template: intentional-fail + continueOn: + failed: true + - - name: D + template: whalesay + + - name: workflow-not-ignore + steps: + - - name: E + template: whalesay + - - name: F + template: whalesay + - name: G + template: intentional-fail + - - name: H + template: whalesay + + # - name: B + # inputs: + # parameters: + # - name: seq-id + # steps: + # - - name: jobs + # template: one-job + # arguments: + # parameters: + # - name: seq-id + # value: "{{inputs.parameters.seq-id}}" + # withParam: "[1, 2]" + + - name: whalesay + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] + + - name: intentional-fail + container: + image: alpine:latest + command: [sh, -c] + args: ["echo intentional failure; exit 1"] diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 3094ace5b6ab..07a92a1759b2 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -19,6 +19,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation": schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactoryAuth": schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn": schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), @@ -331,6 +332,31 @@ func schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref common.ReferenceCallb } } +func schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.", + Properties: map[string]spec.Schema{ + "error": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + } +} + func schema_pkg_apis_workflow_v1alpha1_DAGTask(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -404,12 +430,18 @@ func schema_pkg_apis_workflow_v1alpha1_DAGTask(ref common.ReferenceCallback) com Format: "", }, }, + "continueOn": { + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn"), + }, + }, }, Required: []string{"name", "template"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sequence"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sequence"}, } } @@ -2210,10 +2242,16 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref common.ReferenceCallback Format: "", }, }, + "continueOn": { + SchemaProps: spec.SchemaProps{ + Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sequence"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sequence"}, } } diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 1874f7211280..783b2594cd79 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -384,6 +384,10 @@ type WorkflowStep struct { // When is an expression in which the step should conditionally execute When string `json:"when,omitempty"` + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + ContinueOn *ContinueOn `json:"continueOn,omitempty"` } // Item expands a single workflow step into multiple parallel steps @@ -845,6 +849,10 @@ type DAGTask struct { // When is an expression in which the task should conditionally execute When string `json:"when,omitempty"` + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + ContinueOn *ContinueOn `json:"continueOn,omitempty"` } // SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time @@ -940,3 +948,35 @@ func (wf *Workflow) NodeID(name string) string { _, _ = h.Write([]byte(name)) return fmt.Sprintf("%s-%v", wf.ObjectMeta.Name, h.Sum32()) } + +// ContinueOn defines if a workflow should continue even if a task or step fails/errors. +// It can be specified if the workflow should continue when the pod errors, fails or both. +type ContinueOn struct { + // +optional + Error bool `json:"error,omitempty"` + // +optional + Failed bool `json:"failed,omitempty"` +} + +func continues(c *ContinueOn, phase NodePhase) bool { + if c == nil { + return false + } + if c.Error == true && phase == NodeError { + return true + } + if c.Failed == true && phase == NodeFailed { + return true + } + return false +} + +// ContinuesOn returns whether the DAG should be proceeded if the task fails or errors. +func (t *DAGTask) ContinuesOn(phase NodePhase) bool { + return continues(t.ContinueOn, phase) +} + +// ContinuesOn returns whether the StepGroup should be proceeded if the task fails or errors. +func (s *WorkflowStep) ContinuesOn(phase NodePhase) bool { + return continues(s.ContinueOn, phase) +} diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 72e00d152e1c..9bce224c205b 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -186,6 +186,22 @@ func (in *ArtifactoryAuth) DeepCopy() *ArtifactoryAuth { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContinueOn) DeepCopyInto(out *ContinueOn) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContinueOn. +func (in *ContinueOn) DeepCopy() *ContinueOn { + if in == nil { + return nil + } + out := new(ContinueOn) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DAGTask) DeepCopyInto(out *DAGTask) { *out = *in @@ -207,6 +223,11 @@ func (in *DAGTask) DeepCopyInto(out *DAGTask) { *out = new(Sequence) **out = **in } + if in.ContinueOn != nil { + in, out := &in.ContinueOn, &out.ContinueOn + *out = new(ContinueOn) + **out = **in + } return } @@ -1070,6 +1091,11 @@ func (in *WorkflowStep) DeepCopyInto(out *WorkflowStep) { *out = new(Sequence) **out = **in } + if in.ContinueOn != nil { + in, out := &in.ContinueOn, &out.ContinueOn + *out = new(ContinueOn) + **out = **in + } return } diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 69af39859cc4..1a3f5cb40ccc 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -233,7 +233,7 @@ func (woc *wfOperationCtx) executeDAGTask(dagCtx *dagContext, taskName string) { depNode := dagCtx.getTaskNode(depName) if depNode != nil { if depNode.Completed() { - if !depNode.Successful() { + if !depNode.Successful() && !dagCtx.getTask(depName).ContinuesOn(depNode.Phase) { dependenciesSuccessful = false } continue diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index a4b5c8adff63..e54ae2bb852a 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -155,6 +155,7 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod woc.log.Debugf("Step group node %v already marked completed", node) return node } + // First, resolve any references to outputs from previous steps, and perform substitution stepGroup, err := woc.resolveReferences(stepGroup, stepsCtx.scope) if err != nil { @@ -167,6 +168,9 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod return woc.markNodeError(sgNodeName, err) } + // Maps nodes to their steps + nodeSteps := make(map[string]wfv1.WorkflowStep) + // Kick off all parallel steps in the group for _, step := range stepGroup { childNodeName := fmt.Sprintf("%s.%s", sgNodeName, step.Name) @@ -202,6 +206,7 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod } } if childNode != nil { + nodeSteps[childNodeName] = step woc.addChildNode(sgNodeName, childNodeName) } } @@ -216,7 +221,8 @@ func (woc *wfOperationCtx) executeStepGroup(stepGroup []wfv1.WorkflowStep, sgNod // All children completed. Determine step group status as a whole for _, childNodeID := range node.Children { childNode := woc.wf.Status.Nodes[childNodeID] - if !childNode.Successful() { + step := nodeSteps[childNode.Name] + if !childNode.Successful() && !step.ContinuesOn(childNode.Phase) { failMessage := fmt.Sprintf("child '%s' failed", childNodeID) woc.log.Infof("Step group node %s deemed failed: %s", node, failMessage) return woc.markNodePhase(node.Name, wfv1.NodeFailed, failMessage) From 2b0b8f1c3f46aa41e4b4ddaf14ad1fdebccfaf8a Mon Sep 17 00:00:00 2001 From: Matthew Coleman Date: Wed, 27 Feb 2019 02:50:37 -0500 Subject: [PATCH 085/145] Fix the Prometheus address references (#1237) --- workflow/metrics/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/workflow/metrics/server.go b/workflow/metrics/server.go index 65153e23a044..f2ed0bf63d66 100644 --- a/workflow/metrics/server.go +++ b/workflow/metrics/server.go @@ -2,6 +2,7 @@ package metrics import ( "context" + "fmt" "net/http" "github.com/prometheus/client_golang/prometheus" @@ -20,7 +21,7 @@ type PrometheusConfig struct { func RunServer(ctx context.Context, config PrometheusConfig, registry *prometheus.Registry) { mux := http.NewServeMux() mux.Handle(config.Path, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - srv := &http.Server{Addr: config.Port, Handler: mux} + srv := &http.Server{Addr: fmt.Sprintf(":%s", config.Port), Handler: mux} defer func() { if cerr := srv.Close(); cerr != nil { @@ -28,7 +29,7 @@ func RunServer(ctx context.Context, config PrometheusConfig, registry *prometheu } }() - log.Infof("Starting prometheus metrics server at 0.0.0.0%s%s", config.Port, config.Path) + log.Infof("Starting prometheus metrics server at 0.0.0.0:%s%s", config.Port, config.Path) if err := srv.ListenAndServe(); err != nil { panic(err) } From 1cb88baee9ded1ede27a9d3f1e31f06f4369443d Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Mon, 4 Mar 2019 14:53:16 -0800 Subject: [PATCH 086/145] Fixed Issue#1223 Kubernetes Resource action: patch is not supported (#1245) * Fixed Issue#1223 Kubernetes Resource action: patch is not supported This PR is fixed the Issue#1223 reported by @shanesiebken . Argo kubernetes resource workflow failed on patch action. --patch or -p option is required for kubectl patch action. This PR is including the manifest yaml as patch argument for kubectl. This Fix will support the Patch action in Argo kubernetes resource workflow. This Fix will support only JSON merge strategic in patch action * udpated formating --- examples/README.md | 3 ++- workflow/executor/resource.go | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 4d3473957d05..57d8bdcdb119 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1163,7 +1163,8 @@ spec: templates: - name: pi-tmpl resource: # indicates that this is a resource template - action: create # can be any kubectl action (e.g. create, delete, apply, patch) + action: create # can be any kubectl action (e.g. create, delete, apply, patch) + # Patch action will support only **json merge strategic** # The successCondition and failureCondition are optional expressions. # If failureCondition is true, the step is considered failed. # If successCondition is true, the step is considered successful. diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index e28886bbdb3b..9866d858040b 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -5,6 +5,7 @@ import ( "bytes" "encoding/json" "fmt" + "io/ioutil" "os/exec" "strings" "time" @@ -28,6 +29,19 @@ func (we *WorkflowExecutor) ExecResource(action string, manifestPath string, isD args = append(args, "--ignore-not-found") output = "name" } + + if action == "patch" { + + args = append(args, "-p") + buff, err := ioutil.ReadFile(manifestPath) + + if err != nil { + return "", "", errors.New(errors.CodeBadRequest, err.Error()) + } + + args = append(args, string(buff)) + } + args = append(args, "-f") args = append(args, manifestPath) args = append(args, "-o") From fa042aa285947c5fa365ef06a9565d0b4e20da0e Mon Sep 17 00:00:00 2001 From: Nick Stott Date: Tue, 5 Mar 2019 03:25:04 -0500 Subject: [PATCH 087/145] typo, executo -> executor (#1243) --- docs/workflow-controller-configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index e57d83c70afa..3e27ea2c1c70 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -81,7 +81,7 @@ data: # kubelet port when using kubelet executor (default: 10250) kubeletPort: 10250 - # disable the TLS verification of the kubelet executo (default: false) + # disable the TLS verification of the kubelet executor (default: false) kubeletInsecure: false # executorResources specifies the resource requirements that will be used for the executor From 3f06385b129c02e23ea283f7c66d347cb8899564 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Wed, 6 Mar 2019 15:15:17 -0800 Subject: [PATCH 088/145] Issue#1165 fake outputs don't notify and task completes successfully (#1247) * Issue#1165 fake outputs don't notify and task completes successfully This PR is addressing the Issue#1165 reported by @alexfrieden. Issue/Bug: Argo is finishing the task successfully even artifact /file does exist. Fix: Validate the created gzip contains artifact or file. if file/artifact doesn't exist, Current step/stage/task will be failed with log message . Sample Log: ''' INFO[0029] Updating node artifact-passing-lkvj8[0].generate-artifact (artifact-passing-lkvj8-1949982165) status Running -> Error INFO[0029] Updating node artifact-passing-lkvj8[0].generate-artifact (artifact-passing-lkvj8-1949982165) message: failed to save outputs: File or Artifact does not exist. /tmp/hello_world.txt INFO[0029] Step group node artifact-passing-lkvj8[0] (artifact-passing-lkvj8-1067333159) deemed failed: child 'artifact-passing-lkvj8-1949982165' failed namespace=default workflow=artifact-passing-lkvj8 INFO[0029] node artifact-passing-lkvj8[0] (artifact-passing-lkvj8-1067333159) phase Running -> Failed namespace=default workflow=artifact-passing-lkvj8 ''' * fixed gometalinter errcheck issue --- workflow/executor/docker/docker.go | 7 ++++ workflow/util/file/fileutil.go | 52 ++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 workflow/util/file/fileutil.go diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 980527c523b4..625359435f60 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -7,6 +7,8 @@ import ( "strings" "time" + "github.com/argoproj/argo/workflow/util/file" + "github.com/argoproj/argo/util" "github.com/argoproj/argo/errors" @@ -51,6 +53,11 @@ func (d *DockerExecutor) CopyFile(containerID string, sourcePath string, destPat if err != nil { return err } + if !file.IsFileOrDirExistInGZip(sourcePath, destPath) { + errMsg := fmt.Sprintf("File or Artifact does not exist. %s", sourcePath) + log.Warn(errMsg) + return errors.InternalError(errMsg) + } log.Infof("Archiving completed") return nil } diff --git a/workflow/util/file/fileutil.go b/workflow/util/file/fileutil.go new file mode 100644 index 000000000000..ab3b325adee9 --- /dev/null +++ b/workflow/util/file/fileutil.go @@ -0,0 +1,52 @@ +package file + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "strings" + + log "github.com/sirupsen/logrus" +) + +//IsFileOrDirExistInGZip return true if file or directory exists in GZip file +func IsFileOrDirExistInGZip(sourcePath string, gzipFilePath string) bool { + + fi, err := os.Open(gzipFilePath) + + if os.IsNotExist(err) { + return false + } + defer closeFile(fi) + + fz, err := gzip.NewReader(fi) + if err != nil { + return false + } + tr := tar.NewReader(fz) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + + return false + } + if hdr.FileInfo().IsDir() && strings.Contains(strings.Trim(hdr.Name, "/"), strings.Trim(sourcePath, "/")) { + return true + } + if strings.Contains(sourcePath, hdr.Name) && hdr.Size > 0 { + return true + } + } + return false +} + +func closeFile(f *os.File) { + err := f.Close() + if err != nil { + log.Warn("Failed to close the file. v%", err) + } +} From b03841297e4b0dab0380b441cf41f5ed34db44bf Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Fri, 15 Mar 2019 02:02:48 -0700 Subject: [PATCH 089/145] Git cloning via SSH was not verifying host public key (#1261) --- Dockerfile | 1 + Gopkg.lock | 1 + api/openapi-spec/swagger.json | 4 +++ hack/ssh_known_hosts | 8 ++++++ hack/update-ssh-known-hosts.sh | 24 ++++++++++++++++++ .../workflow/v1alpha1/openapi_generated.go | 7 ++++++ pkg/apis/workflow/v1alpha1/types.go | 4 +++ workflow/artifacts/git/git.go | 25 +++++++++++-------- workflow/executor/executor.go | 4 ++- 9 files changed, 67 insertions(+), 11 deletions(-) create mode 100644 hack/ssh_known_hosts create mode 100755 hack/update-ssh-known-hosts.sh diff --git a/Dockerfile b/Dockerfile index 2aa861454c10..7408df80352b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -47,6 +47,7 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* && \ curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ chmod +x /usr/local/bin/kubectl +COPY hack/ssh_known_hosts /etc/ssh/ssh_known_hosts COPY --from=builder /usr/local/bin/docker /usr/local/bin/ diff --git a/Gopkg.lock b/Gopkg.lock index 97d653bdbe02..ec3e23ccef9e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1162,6 +1162,7 @@ "github.com/evanphx/json-patch", "github.com/ghodss/yaml", "github.com/go-openapi/spec", + "github.com/golang/glog", "github.com/gorilla/websocket", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index fc912a0d6453..4d21e4f96e7e 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -250,6 +250,10 @@ "repo" ], "properties": { + "insecureIgnoreHostKey": { + "description": "InsecureIgnoreHostKey disables SSH strict host key checking during git clone", + "type": "boolean" + }, "passwordSecret": { "description": "PasswordSecret is the secret selector to the repository password", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" diff --git a/hack/ssh_known_hosts b/hack/ssh_known_hosts new file mode 100644 index 000000000000..31a7bae3fce5 --- /dev/null +++ b/hack/ssh_known_hosts @@ -0,0 +1,8 @@ +# This file was automatically generated. DO NOT EDIT +bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== +github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== +gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= +gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf +gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 +ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H +vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H diff --git a/hack/update-ssh-known-hosts.sh b/hack/update-ssh-known-hosts.sh new file mode 100755 index 000000000000..aa74c6489add --- /dev/null +++ b/hack/update-ssh-known-hosts.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +KNOWN_HOSTS_FILE=$(dirname "$0")/ssh_known_hosts +HEADER="# This file was automatically generated. DO NOT EDIT" +echo "$HEADER" > $KNOWN_HOSTS_FILE +ssh-keyscan github.com gitlab.com bitbucket.org ssh.dev.azure.com vs-ssh.visualstudio.com | sort -u >> $KNOWN_HOSTS_FILE +chmod 0644 $KNOWN_HOSTS_FILE + +# Public SSH keys can be verified at the following URLs: +# - github.com: https://help.github.com/articles/github-s-ssh-key-fingerprints/ +# - gitlab.com: https://docs.gitlab.com/ee/user/gitlab_com/#ssh-host-keys-fingerprints +# - bitbucket.org: https://confluence.atlassian.com/bitbucket/ssh-keys-935365775.html +# - ssh.dev.azure.com, vs-ssh.visualstudio.com: https://docs.microsoft.com/en-us/azure/devops/repos/git/use-ssh-keys-to-authenticate?view=azure-devops +diff - <(ssh-keygen -l -f $KNOWN_HOSTS_FILE | sort -k 3) < Date: Fri, 15 Mar 2019 19:48:30 +0200 Subject: [PATCH 090/145] Update versions (#1218) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7408df80352b..34adc2f90cdf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image # Also used as the image in CI jobs so needs all dependencies #################################################################################################### -FROM golang:1.11.4 as builder +FROM golang:1.11.5 as builder RUN apt-get update && apt-get install -y \ git \ @@ -41,7 +41,7 @@ RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v${G #################################################################################################### FROM debian:9.6-slim as argoexec-base # NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt -ENV KUBECTL_VERSION=1.13.1 +ENV KUBECTL_VERSION=1.13.4 RUN apt-get update && \ apt-get install -y curl jq procps git tar mime-support && \ rm -rf /var/lib/apt/lists/* && \ From b2743f30c411f5ad8f8c8b481a5d6b6ff83c33bd Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 16 Mar 2019 02:50:59 +0900 Subject: [PATCH 091/145] Proxy Priority and PriorityClassName to pods (#1179) --- api/openapi-spec/swagger.json | 18 ++++++++++++ .../workflow/v1alpha1/openapi_generated.go | 28 +++++++++++++++++++ pkg/apis/workflow/v1alpha1/types.go | 13 +++++++++ .../v1alpha1/zz_generated.deepcopy.go | 10 +++++++ workflow/controller/workflowpod.go | 19 +++++++++++++ workflow/controller/workflowpod_test.go | 25 +++++++++++++++++ 6 files changed, 113 insertions(+) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 4d21e4f96e7e..eb0e362aad5b 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -966,6 +966,15 @@ "type": "integer", "format": "int64" }, + "priority": { + "description": "Priority to apply to workflow pods.", + "type": "integer", + "format": "int32" + }, + "priorityClassName": { + "description": "PriorityClassName to apply to workflow pods.", + "type": "string" + }, "resource": { "description": "Resource template subtype which can run k8s resources", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ResourceTemplate" @@ -1141,6 +1150,15 @@ "type": "integer", "format": "int64" }, + "podPriority": { + "description": "Priority to apply to workflow pods.", + "type": "integer", + "format": "int32" + }, + "podPriorityClassName": { + "description": "PriorityClassName to apply to workflow pods.", + "type": "string" + }, "priority": { "description": "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", "type": "integer", diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index ddf9df314585..de74605e3487 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1867,6 +1867,20 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co Format: "", }, }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName to apply to workflow pods.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority to apply to workflow pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, }, Required: []string{"name"}, }, @@ -2181,6 +2195,20 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Format: "", }, }, + "podPriorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName to apply to workflow pods.", + Type: []string{"string"}, + Format: "", + }, + }, + "podPriority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority to apply to workflow pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, }, Required: []string{"templates", "entrypoint"}, }, diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 7a81ff7d0b17..9fb3cbf019d2 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -145,6 +145,7 @@ type WorkflowSpec struct { // allowed to run before the controller terminates the workflow. A value of zero is used to // terminate a Running workflow ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. Priority *int32 `json:"priority,omitempty"` @@ -153,6 +154,12 @@ type WorkflowSpec struct { // Default scheduler will be used if neither specified. // +optional SchedulerName string `json:"schedulerName,omitempty"` + + // PriorityClassName to apply to workflow pods. + PodPriorityClassName string `json:"podPriorityClassName,omitempty"` + + // Priority to apply to workflow pods. + PodPriority *int32 `json:"podPriority,omitempty"` } // Template is a reusable and composable unit of execution in a workflow @@ -229,6 +236,12 @@ type Template struct { // If neither specified, the pod will be dispatched by default scheduler. // +optional SchedulerName string `json:"schedulerName,omitempty"` + + // PriorityClassName to apply to workflow pods. + PriorityClassName string `json:"priorityClassName,omitempty"` + + // Priority to apply to workflow pods. + Priority *int32 `json:"priority,omitempty"` } // Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 9bce224c205b..60db0280d209 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -851,6 +851,11 @@ func (in *Template) DeepCopyInto(out *Template) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } return } @@ -1025,6 +1030,11 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { *out = new(int32) **out = **in } + if in.PodPriority != nil { + in, out := &in.PodPriority, &out.PodPriority + *out = new(int32) + **out = **in + } return } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 9f7081e4daf2..4efd896ee2f4 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -408,12 +408,31 @@ func addSchedulingConstraints(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *w } else if len(wfSpec.Tolerations) > 0 { pod.Spec.Tolerations = wfSpec.Tolerations } + // Set scheduler name (if specified) if tmpl.SchedulerName != "" { pod.Spec.SchedulerName = tmpl.SchedulerName } else if wfSpec.SchedulerName != "" { pod.Spec.SchedulerName = wfSpec.SchedulerName } + // Set priorityClass (if specified) + if tmpl.PriorityClassName != "" { + pod.Spec.PriorityClassName = tmpl.PriorityClassName + } else if wfSpec.PodPriorityClassName != "" { + pod.Spec.PriorityClassName = wfSpec.PodPriorityClassName + } + // Set priority (if specified) + if tmpl.Priority != nil { + pod.Spec.Priority = tmpl.Priority + } else if wfSpec.PodPriority != nil { + pod.Spec.Priority = wfSpec.PodPriority + } + // Set schedulerName (if specified) + if tmpl.SchedulerName != "" { + pod.Spec.SchedulerName = tmpl.SchedulerName + } else if wfSpec.SchedulerName != "" { + pod.Spec.SchedulerName = wfSpec.SchedulerName + } } // addVolumeReferences adds any volumeMounts that a container/sidecar is referencing, to the pod.spec.volumes diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index cc7928bccc1b..59a8750b573c 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -316,3 +316,28 @@ func TestOutOfCluster(t *testing.T) { assert.Equal(t, "--kubeconfig=/some/path/config", pod.Spec.Containers[1].Args[1]) } } + +// TestPriority verifies the ability to carry forward priorityClassName and priority. +func TestPriority(t *testing.T) { + priority := int32(15) + woc := newWoc() + woc.wf.Spec.Templates[0].PriorityClassName = "foo" + woc.wf.Spec.Templates[0].Priority = &priority + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, pod.Spec.PriorityClassName, "foo") + assert.Equal(t, pod.Spec.Priority, &priority) +} + +// TestSchedulerName verifies the ability to carry forward schedulerName. +func TestSchedulerName(t *testing.T) { + woc := newWoc() + woc.wf.Spec.Templates[0].SchedulerName = "foo" + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, pod.Spec.SchedulerName, "foo") +} From 4bfbb20bc23f8bf4611a6314fb80f8138b17b9b9 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Mon, 18 Mar 2019 11:26:46 -0700 Subject: [PATCH 092/145] Error running 1000s of tasks: "etcdserver: request is too large" #1186 (#1264) * Error running 1000s of tasks: "etcdserver: request is too large" #1186 This PR is addressing the feature request #1186. Issue: Nodestatus element keeps growing for big workflow. Workflow will fail once the workflow total size reachs 1 MB (maz size limit in ETCD) . Solution: Compressing the Nodestatus once size reachs the 1 MB which increasing 60% to 80% more steps to execute in compress mode. Latest: Argo cli and Argo UI will able to decode and print nodestatus from compressednoode. Limitation: Kubectl willl not decode the compressedNode element * added Operator.go * revert the testing yaml * Fixed the lint issue * fixed * fixed lint * Fixed Testcase * incorporated the review comments * Reverted the change * incorporated review comments * fixing gometalinter checks * incorporated review comments * Update pod-limits.yaml * updated few comments * updated error message format * reverted unwanted files --- cmd/argo/commands/get.go | 24 ++++++- cmd/argo/commands/list.go | 4 ++ cmd/argo/commands/logs.go | 12 +++- cmd/argo/commands/watch.go | 2 + pkg/apis/workflow/v1alpha1/types.go | 3 + util/file/fileutil.go | 97 +++++++++++++++++++++++++++++ util/file/fileutil_test.go | 21 +++++++ workflow/controller/controller.go | 10 +++ workflow/controller/operator.go | 95 +++++++++++++++++++++++++++- workflow/executor/docker/docker.go | 2 +- workflow/util/file/fileutil.go | 52 ---------------- 11 files changed, 264 insertions(+), 58 deletions(-) create mode 100644 util/file/fileutil.go create mode 100644 util/file/fileutil_test.go delete mode 100644 workflow/util/file/fileutil.go diff --git a/cmd/argo/commands/get.go b/cmd/argo/commands/get.go index e57f52bba706..b0badcb001ee 100644 --- a/cmd/argo/commands/get.go +++ b/cmd/argo/commands/get.go @@ -8,12 +8,13 @@ import ( "strings" "text/tabwriter" + "github.com/argoproj/argo/errors" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util/file" "github.com/argoproj/pkg/humanize" "github.com/ghodss/yaml" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" ) const onExitSuffix = "onExit" @@ -36,6 +37,10 @@ func NewGetCommand() *cobra.Command { if err != nil { log.Fatal(err) } + err = CheckAndDecompress(wf) + if err != nil { + log.Fatal(err) + } printWorkflow(wf, output) }, } @@ -45,6 +50,21 @@ func NewGetCommand() *cobra.Command { return command } +func CheckAndDecompress(wf *wfv1.Workflow) error { + if wf.Status.CompressedNodes != "" { + nodeContent, err := file.DecodeDecompressString(wf.Status.CompressedNodes) + if err != nil { + return errors.InternalWrapError(err) + } + err = json.Unmarshal([]byte(nodeContent), &wf.Status.Nodes) + if err != nil { + log.Fatal(err) + } + wf.Status.CompressedNodes = "" + } + return nil +} + func printWorkflow(wf *wfv1.Workflow, outFmt string) { switch outFmt { case "name": diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 096426f8ecf6..9a0d04e3d6e9 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -134,6 +134,10 @@ func countPendingRunningCompleted(wf *wfv1.Workflow) (int, int, int) { pending := 0 running := 0 completed := 0 + err := CheckAndDecompress(wf) + if err != nil { + log.Fatal(err) + } for _, node := range wf.Status.Nodes { tmpl := wf.GetTemplate(node.TemplateName) if tmpl == nil || !tmpl.IsPodType() { diff --git a/cmd/argo/commands/logs.go b/cmd/argo/commands/logs.go index a41ba6830b9c..cbc7a86c5db2 100644 --- a/cmd/argo/commands/logs.go +++ b/cmd/argo/commands/logs.go @@ -27,7 +27,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" ) type logEntry struct { @@ -136,6 +136,11 @@ func (p *logPrinter) PrintPodLogs(podName string) error { // Prints logs for workflow pod steps and return most recent log timestamp per pod name func (p *logPrinter) printRecentWorkflowLogs(wf *v1alpha1.Workflow) map[string]*time.Time { var podNodes []v1alpha1.NodeStatus + err := CheckAndDecompress(wf) + if err != nil { + log.Warn(err) + return nil + } for _, node := range wf.Status.Nodes { if node.Type == v1alpha1.NodeTypePod && node.Phase != v1alpha1.NodeError { podNodes = append(podNodes, node) @@ -193,6 +198,11 @@ func (p *logPrinter) printLiveWorkflowLogs(workflowName string, wfClient workflo defer cancel() processPods := func(wf *v1alpha1.Workflow) { + err := CheckAndDecompress(wf) + if err != nil { + log.Warn(err) + return + } for id := range wf.Status.Nodes { node := wf.Status.Nodes[id] if node.Type == v1alpha1.NodeTypePod && node.Phase != v1alpha1.NodeError && streamedPods[node.ID] == false { diff --git a/cmd/argo/commands/watch.go b/cmd/argo/commands/watch.go index 133ee033a5ee..abdcaaee5ae1 100644 --- a/cmd/argo/commands/watch.go +++ b/cmd/argo/commands/watch.go @@ -45,6 +45,8 @@ func watchWorkflow(name string) { select { case next := <-watchIf.ResultChan(): wf, _ = next.Object.(*wfv1.Workflow) + err := CheckAndDecompress(wf) + errors.CheckError(err) case <-ticker.C: } if wf == nil { diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 9fb3cbf019d2..ff659353d360 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -490,6 +490,9 @@ type WorkflowStatus struct { // A human readable message indicating details about why the workflow is in this condition. Message string `json:"message,omitempty"` + // Compressed and base64 decoded Nodes map + CompressedNodes string `json:"compressedNodes,omitempty"` + // Nodes is a mapping between a node ID and the node's status. Nodes map[string]NodeStatus `json:"nodes,omitempty"` diff --git a/util/file/fileutil.go b/util/file/fileutil.go new file mode 100644 index 000000000000..f99b2e5dea34 --- /dev/null +++ b/util/file/fileutil.go @@ -0,0 +1,97 @@ +package file + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/base64" + "io" + "io/ioutil" + "os" + "strings" + + log "github.com/sirupsen/logrus" +) + +// IsFileOrDirExistInGZip return true if file or directory exists in GZip file +func IsFileOrDirExistInGZip(sourcePath string, gzipFilePath string) bool { + + fi, err := os.Open(gzipFilePath) + + if os.IsNotExist(err) { + return false + } + defer close(fi) + + fz, err := gzip.NewReader(fi) + if err != nil { + return false + } + tr := tar.NewReader(fz) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + + return false + } + if hdr.FileInfo().IsDir() && strings.Contains(strings.Trim(hdr.Name, "/"), strings.Trim(sourcePath, "/")) { + return true + } + if strings.Contains(sourcePath, hdr.Name) && hdr.Size > 0 { + return true + } + } + return false +} + +//Close the file +func close(f io.Closer) { + err := f.Close() + if err != nil { + log.Warnf("Failed to close the file/writer/reader. %v", err) + } +} + +// CompressEncodeString will return the compressed string with base64 encoded +func CompressEncodeString(content string) string { + return base64.StdEncoding.EncodeToString(CompressContent([]byte(content))) +} + +// DecodeDecompressString will return decode and decompress the +func DecodeDecompressString(content string) (string, error) { + + buf, err := base64.StdEncoding.DecodeString(content) + if err != nil { + return "", err + } + dBuf, err := DecompressContent(buf) + if err != nil { + return "", err + } + return string(dBuf), nil +} + +// CompressContent will compress the byte array using zip writer +func CompressContent(content []byte) []byte { + var buf bytes.Buffer + zipWriter := gzip.NewWriter(&buf) + + _, err := zipWriter.Write(content) + if err != nil { + log.Warnf("Error in compressing: %v", err) + } + close(zipWriter) + return buf.Bytes() +} + +// DecompressContent will return the uncompressed content +func DecompressContent(content []byte) ([]byte, error) { + + buf := bytes.NewReader(content) + gZipReader, _ := gzip.NewReader(buf) + defer close(gZipReader) + return ioutil.ReadAll(gZipReader) +} diff --git a/util/file/fileutil_test.go b/util/file/fileutil_test.go new file mode 100644 index 000000000000..c6a6ecc7b8d2 --- /dev/null +++ b/util/file/fileutil_test.go @@ -0,0 +1,21 @@ +package file + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestResubmitWorkflowWithOnExit ensures we do not carry over the onExit node even if successful +func TestCompressContentString(t *testing.T) { + content := "{\"pod-limits-rrdm8-591645159\":{\"id\":\"pod-limits-rrdm8-591645159\",\"name\":\"pod-limits-rrdm8[0]." + + "run-pod(0:0)\",\"displayName\":\"run-pod(0:0)\",\"type\":\"Pod\",\"templateName\":\"run-pod\",\"phase\":" + + "\"Succeeded\",\"boundaryID\":\"pod-limits-rrdm8\",\"startedAt\":\"2019-03-07T19:14:50Z\",\"finishedAt\":" + + "\"2019-03-07T19:14:55Z\"}}" + + compString := CompressEncodeString(content) + + resultString, _ := DecodeDecompressString(compString) + + assert.Equal(t, content, resultString) +} diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index c58c8d3117b1..d74036a4422d 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -243,6 +243,16 @@ func (wfc *WorkflowController) processNextItem() bool { } woc := newWorkflowOperationCtx(wf, wfc) + //Decompress the node if it is compressed + + err = woc.checkAndDecompress() + if err != nil { + log.Warnf("Failed to decompress '%s' to workflow object: %v", key, err) + woc.markWorkflowFailed(fmt.Sprintf("invalid spec: %s", err.Error())) + woc.persistUpdates() + wfc.throttler.Remove(key) + return true + } woc.operate() if woc.wf.Status.Completed() { wfc.throttler.Remove(key) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index e72b82034573..b584d8da6d37 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -24,6 +24,7 @@ import ( "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" + "github.com/argoproj/argo/util/file" "github.com/argoproj/argo/util/retry" "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/util" @@ -72,6 +73,9 @@ var ( // for before requeuing the workflow onto the workqueue. const maxOperationTime time.Duration = 10 * time.Second +//maxWorkflowSize is the maximum size for workflow.yaml +const maxWorkflowSize int = 1024 * 1024 + // newWorkflowOperationCtx creates and initializes a new wfOperationCtx object. func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOperationCtx { // NEVER modify objects from the store. It's a read-only, local cache. @@ -275,9 +279,17 @@ func (woc *wfOperationCtx) persistUpdates() { return } wfClient := woc.controller.wfclientset.ArgoprojV1alpha1().Workflows(woc.wf.ObjectMeta.Namespace) - _, err := wfClient.Update(woc.wf) + err := woc.checkAndCompress() if err != nil { - woc.log.Warnf("Error updating workflow: %v", err) + woc.log.Warnf("Error compressing workflow: %v", err) + } + if woc.wf.Status.CompressedNodes != "" { + woc.wf.Status.Nodes = nil + } + + _, err = wfClient.Update(woc.wf) + if err != nil { + woc.log.Warnf("Error updating workflow: %v %s", err, apierr.ReasonForError(err)) if argokubeerr.IsRequestEntityTooLargeErr(err) { woc.persistWorkflowSizeLimitErr(wfClient, err) return @@ -450,11 +462,24 @@ func (woc *wfOperationCtx) podReconciliation() error { } for _, pod := range podList.Items { + origNodeStatus := *woc.wf.Status.DeepCopy() performAssessment(&pod) err = woc.applyExecutionControl(&pod) if err != nil { woc.log.Warnf("Failed to apply execution control to pod %s", pod.Name) } + err = woc.checkAndCompress() + if err != nil { + woc.wf.Status = origNodeStatus + nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName] + woc.log.Warnf("%v", err) + woc.markNodeErrorClearOuput(nodeNameForPod, err) + err = woc.checkAndCompress() + if err != nil { + woc.markWorkflowError(err, true) + } + } + } // Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in @@ -1138,6 +1163,14 @@ func (woc *wfOperationCtx) markNodePhase(nodeName string, phase wfv1.NodePhase, return node } +// markNodeErrorClearOuput is a convenience method to mark a node with an error and clear the output +func (woc *wfOperationCtx) markNodeErrorClearOuput(nodeName string, err error) *wfv1.NodeStatus { + nodeStatus := woc.markNodeError(nodeName, err) + nodeStatus.Outputs = nil + woc.wf.Status.Nodes[nodeStatus.ID] = *nodeStatus + return nodeStatus +} + // markNodeError is a convenience method to mark a node with an error and set the message from the error func (woc *wfOperationCtx) markNodeError(nodeName string, err error) *wfv1.NodeStatus { return woc.markNodePhase(nodeName, wfv1.NodeError, err.Error()) @@ -1576,3 +1609,61 @@ func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) { } return items, nil } + +// getSize return the entire workflow json string size +func (woc *wfOperationCtx) getSize() int { + nodeContent, err := json.Marshal(woc.wf) + if err != nil { + return -1 + } + + compressNodeSize := len(woc.wf.Status.CompressedNodes) + + if compressNodeSize > 0 { + nodeStatus, err := json.Marshal(woc.wf.Status.Nodes) + if err != nil { + return -1 + } + return len(nodeContent) - len(nodeStatus) + } + return len(nodeContent) +} + +// checkAndCompress will check the workflow size and compress node status if total workflow size is more than maxWorkflowSize. +// The compressed content will be assign to compressedNodes element and clear the nodestatus map. +func (woc *wfOperationCtx) checkAndCompress() error { + + if woc.wf.Status.CompressedNodes != "" || (woc.wf.Status.CompressedNodes == "" && woc.getSize() >= maxWorkflowSize) { + + nodeContent, err := json.Marshal(woc.wf.Status.Nodes) + if err != nil { + return errors.InternalWrapError(err) + } + buff := string(nodeContent) + woc.wf.Status.CompressedNodes = file.CompressEncodeString(buff) + + } + if woc.wf.Status.CompressedNodes != "" && woc.getSize() >= maxWorkflowSize { + return errors.InternalError(fmt.Sprintf("Workflow is longer than maximum allowed size. Size=%d", woc.getSize())) + } + return nil +} + +// checkAndDecompress will decompress the compressednode and assign to workflow.status.nodes map. +func (woc *wfOperationCtx) checkAndDecompress() error { + if woc.wf.Status.CompressedNodes != "" { + nodeContent, err := file.DecodeDecompressString(woc.wf.Status.CompressedNodes) + if err != nil { + return errors.InternalWrapError(err) + } + var tempNodes map[string]wfv1.NodeStatus + + err = json.Unmarshal([]byte(nodeContent), &tempNodes) + if err != nil { + woc.log.Warn(err) + return err + } + woc.wf.Status.Nodes = tempNodes + } + return nil +} diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 625359435f60..0d4084062f81 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/argoproj/argo/workflow/util/file" + "github.com/argoproj/argo/util/file" "github.com/argoproj/argo/util" diff --git a/workflow/util/file/fileutil.go b/workflow/util/file/fileutil.go deleted file mode 100644 index ab3b325adee9..000000000000 --- a/workflow/util/file/fileutil.go +++ /dev/null @@ -1,52 +0,0 @@ -package file - -import ( - "archive/tar" - "compress/gzip" - "io" - "os" - "strings" - - log "github.com/sirupsen/logrus" -) - -//IsFileOrDirExistInGZip return true if file or directory exists in GZip file -func IsFileOrDirExistInGZip(sourcePath string, gzipFilePath string) bool { - - fi, err := os.Open(gzipFilePath) - - if os.IsNotExist(err) { - return false - } - defer closeFile(fi) - - fz, err := gzip.NewReader(fi) - if err != nil { - return false - } - tr := tar.NewReader(fz) - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - - return false - } - if hdr.FileInfo().IsDir() && strings.Contains(strings.Trim(hdr.Name, "/"), strings.Trim(sourcePath, "/")) { - return true - } - if strings.Contains(sourcePath, hdr.Name) && hdr.Size > 0 { - return true - } - } - return false -} - -func closeFile(f *os.File) { - err := f.Close() - if err != nil { - log.Warn("Failed to close the file. v%", err) - } -} From e6105243c785d9f53aef6fcfd344e855ad4f7d84 Mon Sep 17 00:00:00 2001 From: Xianlu Bird Date: Thu, 21 Mar 2019 16:22:20 +0800 Subject: [PATCH 093/145] Reduce redundancy pod label action (#1271) --- workflow/controller/operator.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index b584d8da6d37..417a460c7936 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -456,6 +456,11 @@ func (woc *wfOperationCtx) podReconciliation() error { } node := woc.wf.Status.Nodes[pod.ObjectMeta.Name] if node.Completed() && !node.IsDaemoned() { + if tmpVal, tmpOk := pod.Labels[common.LabelKeyCompleted]; tmpOk { + if tmpVal == "true" { + return + } + } woc.completedPods[pod.ObjectMeta.Name] = true } } From 73a37f2b2a12d74ddf6a4b54e04b50fa1a7c68a1 Mon Sep 17 00:00:00 2001 From: Ian Howell Date: Fri, 22 Mar 2019 15:40:19 -0500 Subject: [PATCH 094/145] Add the `mergeStrategy` option to resource patching (#1269) * This adds the ability to pass a mergeStrategy to a patch resource. this is valuable because the default merge strategy for kubernetes is 'strategic', which does not work with Custom Resources. * This also updates the resource example to demonstrate how it is used --- api/openapi-spec/swagger.json | 4 +++ examples/README.md | 34 ++++++++++++++++++- .../workflow/v1alpha1/openapi_generated.go | 7 ++++ pkg/apis/workflow/v1alpha1/types.go | 4 +++ workflow/executor/resource.go | 7 ++++ 5 files changed, 55 insertions(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index eb0e362aad5b..f42eaec91ec0 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -538,6 +538,10 @@ "description": "Manifest contains the kubernetes manifest", "type": "string" }, + "mergeStrategy": { + "description": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json", + "type": "string" + }, "successCondition": { "description": "SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step", "type": "string" diff --git a/examples/README.md b/examples/README.md index 57d8bdcdb119..34398e741696 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1164,7 +1164,6 @@ spec: - name: pi-tmpl resource: # indicates that this is a resource template action: create # can be any kubectl action (e.g. create, delete, apply, patch) - # Patch action will support only **json merge strategic** # The successCondition and failureCondition are optional expressions. # If failureCondition is true, the step is considered failed. # If successCondition is true, the step is considered successful. @@ -1194,6 +1193,39 @@ spec: Resources created in this way are independent of the workflow. If you want the resource to be deleted when the workflow is deleted then you can use [Kubernetes garbage collection](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) with the workflow resource as an owner reference ([example](./k8s-owner-reference.yaml)). +**Note:** +When patching, the resource will accept another attribute, `mergeStrategy`, which can either be `strategic`, `merge`, or `json`. If this attribute is not supplied, it will default to `strategic`. Keep in mind that Custom Resources cannot be patched with `strategic`, so a different strategy must be chosen. For example, suppose you have the [CronTab CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#create-a-customresourcedefinition) defined, and the following instance of a CronTab: + +```yaml +apiVersion: "stable.example.com/v1" +kind: CronTab +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image +``` + +This Crontab can be modified using the following Argo Workflow: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: k8s-patch- +spec: + entrypoint: cront-tmpl + templates: + - name: cront-tmpl + resource: + action: patch + mergeStrategy: merge # Must be one of [strategic merge json] + manifest: | + apiVersion: "stable.example.com/v1" + kind: CronTab + spec: + cronSpec: "* * * * */10" + image: my-awesome-cron-image +``` + ## Docker-in-Docker Using Sidecars An application of sidecars is to implement Docker-in-Docker (DinD). DinD is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the docker:dind container to run a Docker daemon in a sidecar and give the main container access to the daemon. diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index de74605e3487..b203430f3f8f 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1005,6 +1005,13 @@ func schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref common.ReferenceCall Format: "", }, }, + "mergeStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json", + Type: []string{"string"}, + Format: "", + }, + }, "manifest": { SchemaProps: spec.SchemaProps{ Description: "Manifest contains the kubernetes manifest", diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index ff659353d360..852746236326 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -781,6 +781,10 @@ type ResourceTemplate struct { // Must be one of: get, create, apply, delete, replace Action string `json:"action"` + // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" + // Must be one of: strategic, merge, json + MergeStrategy string `json:"mergeStrategy,omitempty"` + // Manifest contains the kubernetes manifest Manifest string `json:"manifest"` diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index 9866d858040b..270ab69298dc 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -31,6 +31,13 @@ func (we *WorkflowExecutor) ExecResource(action string, manifestPath string, isD } if action == "patch" { + mergeStrategy := "strategic" + if we.Template.Resource.MergeStrategy != "" { + mergeStrategy = we.Template.Resource.MergeStrategy + } + + args = append(args, "--type") + args = append(args, mergeStrategy) args = append(args, "-p") buff, err := ioutil.ReadFile(manifestPath) From 1e111caa1d2cc672b3b53c202b96a5f660a7e9b2 Mon Sep 17 00:00:00 2001 From: Fred Dubois <169247+duboisf@users.noreply.github.com> Date: Fri, 22 Mar 2019 16:42:38 -0400 Subject: [PATCH 095/145] Fix bug with DockerExecutor's CopyFile (#1275) The check to see if the source path was in the tgz archive was wrong when source path was a folder, the arguments to strings.Contains were inverted. --- util/file/fileutil.go | 26 +++---- util/file/fileutil_test.go | 110 +++++++++++++++++++++++++++-- workflow/executor/docker/docker.go | 17 +++-- 3 files changed, 126 insertions(+), 27 deletions(-) diff --git a/util/file/fileutil.go b/util/file/fileutil.go index f99b2e5dea34..37f6a56179c2 100644 --- a/util/file/fileutil.go +++ b/util/file/fileutil.go @@ -7,37 +7,27 @@ import ( "encoding/base64" "io" "io/ioutil" - "os" "strings" log "github.com/sirupsen/logrus" ) -// IsFileOrDirExistInGZip return true if file or directory exists in GZip file -func IsFileOrDirExistInGZip(sourcePath string, gzipFilePath string) bool { - - fi, err := os.Open(gzipFilePath) - - if os.IsNotExist(err) { - return false - } - defer close(fi) +type TarReader interface { + Next() (*tar.Header, error) +} - fz, err := gzip.NewReader(fi) - if err != nil { - return false - } - tr := tar.NewReader(fz) +// ExistsInTar return true if file or directory exists in tar +func ExistsInTar(sourcePath string, tarReader TarReader) bool { + sourcePath = strings.Trim(sourcePath, "/") for { - hdr, err := tr.Next() + hdr, err := tarReader.Next() if err == io.EOF { break } if err != nil { - return false } - if hdr.FileInfo().IsDir() && strings.Contains(strings.Trim(hdr.Name, "/"), strings.Trim(sourcePath, "/")) { + if hdr.FileInfo().IsDir() && strings.Contains(sourcePath, strings.Trim(hdr.Name, "/")) { return true } if strings.Contains(sourcePath, hdr.Name) && hdr.Size > 0 { diff --git a/util/file/fileutil_test.go b/util/file/fileutil_test.go index c6a6ecc7b8d2..32379866afb2 100644 --- a/util/file/fileutil_test.go +++ b/util/file/fileutil_test.go @@ -1,9 +1,12 @@ -package file +package file_test import ( - "testing" - + "archive/tar" + "bytes" + "github.com/argoproj/argo/util/file" "github.com/stretchr/testify/assert" + "os" + "testing" ) // TestResubmitWorkflowWithOnExit ensures we do not carry over the onExit node even if successful @@ -13,9 +16,106 @@ func TestCompressContentString(t *testing.T) { "\"Succeeded\",\"boundaryID\":\"pod-limits-rrdm8\",\"startedAt\":\"2019-03-07T19:14:50Z\",\"finishedAt\":" + "\"2019-03-07T19:14:55Z\"}}" - compString := CompressEncodeString(content) + compString := file.CompressEncodeString(content) - resultString, _ := DecodeDecompressString(compString) + resultString, _ := file.DecodeDecompressString(compString) assert.Equal(t, content, resultString) } + +func TestExistsInTar(t *testing.T) { + type fakeFile struct { + name, body string + isDir bool + } + + newTarReader := func(t *testing.T, files []fakeFile) *tar.Reader { + var buf bytes.Buffer + writer := tar.NewWriter(&buf) + for _, f := range files { + mode := os.FileMode(0600) + if f.isDir { + mode |= os.ModeDir + } + hdr := tar.Header{Name: f.name, Mode: int64(mode), Size: int64(len(f.body))} + err := writer.WriteHeader(&hdr) + assert.Nil(t, err) + _, err = writer.Write([]byte(f.body)) + assert.Nil(t, err) + } + err := writer.Close() + assert.Nil(t, err) + return tar.NewReader(&buf) + } + + type TestCase struct { + sourcePath string + expected bool + files []fakeFile + } + + tests := []TestCase{ + { + sourcePath: "/root.txt", expected: true, + files: []fakeFile{{name: "root.txt", body: "file in the root"}}, + }, + { + sourcePath: "/tmp/file/in/subfolder.txt", expected: true, + files: []fakeFile{{name: "subfolder.txt", body: "a file in a subfolder"}}, + }, + { + sourcePath: "/root", expected: true, + files: []fakeFile{ + {name: "root/", isDir: true}, + {name: "root/a.txt", body: "a"}, + {name: "root/b.txt", body: "b"}, + }, + }, + { + sourcePath: "/tmp/subfolder", expected: true, + files: []fakeFile{ + {name: "subfolder/", isDir: true}, + {name: "subfolder/a.txt", body: "a"}, + {name: "subfolder/b.txt", body: "b"}, + }, + }, + { + // should an empty tar return true?? + sourcePath: "/tmp/empty", expected: true, + files: []fakeFile{ + {name: "empty/", isDir: true}, + }, + }, + { + sourcePath: "/tmp/folder/that", expected: false, + files: []fakeFile{ + {name: "this/", isDir: true}, + {name: "this/a.txt", body: "a"}, + {name: "this/b.txt", body: "b"}, + }, + }, + { + sourcePath: "/empty.txt", expected: false, + files: []fakeFile{ + // fails because empty.txt is empty + {name: "empty.txt", body: ""}, + }, + }, + { + sourcePath: "/tmp/empty.txt", expected: false, + files: []fakeFile{ + // fails because empty.txt is empty + {name: "empty.txt", body: ""}, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run("source path "+tc.sourcePath, func(t *testing.T) { + t.Parallel() + tarReader := newTarReader(t, tc.files) + actual := file.ExistsInTar(tc.sourcePath, tarReader) + assert.Equalf(t, tc.expected, actual, "sourcePath %s not found", tc.sourcePath) + }) + } +} diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 0d4084062f81..020ae938d1ea 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -1,6 +1,8 @@ package docker import ( + "archive/tar" + "compress/gzip" "fmt" "os" "os/exec" @@ -53,10 +55,17 @@ func (d *DockerExecutor) CopyFile(containerID string, sourcePath string, destPat if err != nil { return err } - if !file.IsFileOrDirExistInGZip(sourcePath, destPath) { - errMsg := fmt.Sprintf("File or Artifact does not exist. %s", sourcePath) - log.Warn(errMsg) - return errors.InternalError(errMsg) + copiedFile, err := os.Open(destPath) + if err != nil { + return err + } + defer util.Close(copiedFile) + gzipReader, err := gzip.NewReader(copiedFile) + if err != nil { + return err + } + if !file.ExistsInTar(sourcePath, tar.NewReader(gzipReader)) { + return errors.InternalErrorf("path %s does not exist (or %s is empty) in archive %s", sourcePath, sourcePath, destPath) } log.Infof("Archiving completed") return nil From 59fcc5cc33ce67c057064dc37a463707501615e1 Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Mon, 25 Mar 2019 18:50:20 +0100 Subject: [PATCH 096/145] Add workflow labels and annotations global vars (#1280) --- docs/variables.md | 2 + workflow/controller/operator.go | 6 +++ workflow/controller/operator_test.go | 61 ++++++++++++++++++++++++++++ workflow/validate/validate.go | 9 ++++ 4 files changed, 78 insertions(+) diff --git a/docs/variables.md b/docs/variables.md index cabf49084437..98dc34c067c4 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -43,6 +43,8 @@ The following variables are made available to reference various metadata of a wo | `workflow.uid` | Workflow UID. Useful for setting ownership reference to a resource, or a unique artifact location | | `workflow.parameters.` | Input parameter to the workflow | | `workflow.outputs.parameters.` | Input artifact to the workflow | +| `workflow.annotations.` | Workflow annotations | +| `workflow.labels.` | Workflow labels | | `workflow.creationTimestamp` | Workflow creation timestamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) | | `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character | diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 417a460c7936..f2cb6e4fcaa8 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -254,6 +254,12 @@ func (woc *wfOperationCtx) setGlobalParameters() { for _, param := range woc.wf.Spec.Arguments.Parameters { woc.globalParams["workflow.parameters."+param.Name] = *param.Value } + for k, v := range woc.wf.ObjectMeta.Annotations { + woc.globalParams["workflow.annotations."+k] = v + } + for k, v := range woc.wf.ObjectMeta.Labels { + woc.globalParams["workflow.labels."+k] = v + } if woc.wf.Status.Outputs != nil { for _, param := range woc.wf.Status.Outputs.Parameters { woc.globalParams["workflow.outputs.parameters."+param.Name] = *param.Value diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 42e43e22fa1e..b8b5b3f543e9 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -886,3 +886,64 @@ func TestExpandWithSequence(t *testing.T) { assert.Equal(t, "testuser01", items[0].Value.(string)) assert.Equal(t, "testuser0A", items[9].Value.(string)) } + +var metadataTemplate = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: metadata-template + labels: + image: foo:bar + annotations: + k8s-webhook-handler.io/repo: "git@github.com:argoproj/argo.git" + k8s-webhook-handler.io/revision: 1e111caa1d2cc672b3b53c202b96a5f660a7e9b2 +spec: + entrypoint: foo + templates: + - name: foo + container: + image: "{{workflow.labels.image}}" + env: + - name: REPO + value: "{{workflow.annotations.k8s-webhook-handler.io/repo}}" + - name: REVISION + value: "{{workflow.annotations.k8s-webhook-handler.io/revision}}" + command: [sh, -c] + args: ["echo hello world"] +` + +func TestMetadataPassing(t *testing.T) { + controller := newController() + wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") + wf := unmarshalWF(metadataTemplate) + wf, err := wfcset.Create(wf) + assert.Nil(t, err) + wf, err = wfcset.Get(wf.ObjectMeta.Name, metav1.GetOptions{}) + assert.Nil(t, err) + woc := newWorkflowOperationCtx(wf, controller) + woc.operate() + assert.Equal(t, wfv1.NodeRunning, woc.wf.Status.Phase) + pods, err := controller.kubeclientset.CoreV1().Pods(wf.ObjectMeta.Namespace).List(metav1.ListOptions{}) + assert.Nil(t, err) + assert.True(t, len(pods.Items) > 0, "pod was not created successfully") + + var ( + pod = pods.Items[0] + container = pod.Spec.Containers[0] + foundRepo = false + foundRev = false + ) + for _, ev := range container.Env { + switch ev.Name { + case "REPO": + assert.Equal(t, "git@github.com:argoproj/argo.git", ev.Value) + foundRepo = true + case "REVISION": + assert.Equal(t, "1e111caa1d2cc672b3b53c202b96a5f660a7e9b2", ev.Value) + foundRev = true + } + } + assert.True(t, foundRepo) + assert.True(t, foundRev) + assert.Equal(t, "foo:bar", container.Image) +} diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 95524577d8ea..8f5085fb466f 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -66,6 +66,14 @@ func ValidateWorkflow(wf *wfv1.Workflow, lint ...bool) error { for _, param := range ctx.wf.Spec.Arguments.Parameters { ctx.globalParams["workflow.parameters."+param.Name] = placeholderValue } + + for k := range ctx.wf.ObjectMeta.Annotations { + ctx.globalParams["workflow.annotations."+k] = placeholderValue + } + for k := range ctx.wf.ObjectMeta.Labels { + ctx.globalParams["workflow.labels."+k] = placeholderValue + } + if ctx.wf.Spec.Entrypoint == "" { return errors.New(errors.CodeBadRequest, "spec.entrypoint is required") } @@ -111,6 +119,7 @@ func (ctx *wfValidationCtx) validateTemplate(tmpl *wfv1.Template, args wfv1.Argu localParams[common.LocalVarPodName] = placeholderValue scope[common.LocalVarPodName] = placeholderValue } + _, err = common.ProcessArgs(tmpl, args, ctx.globalParams, localParams, true) if err != nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s %s", tmpl.Name, err) From adab9ed6bc4f8f337105182c56abad39bccb9676 Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Mon, 25 Mar 2019 21:42:51 -0700 Subject: [PATCH 097/145] Argo CI is current inactive (#1285) --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 10b2665a5567..6558b49a28de 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,6 @@ Argoproj is a collection of tools for getting work done with Kubernetes. * [Argo Workflows](https://github.com/argoproj/argo) - Container-native Workflow Engine * [Argo CD](https://github.com/argoproj/argo-cd) - Declarative GitOps Continuous Delivery * [Argo Events](https://github.com/argoproj/argo-events) - Event-based Dependency Manager -* [Argo CI](https://github.com/argoproj/argo-ci) - Simple CI based on GitHUb and Argo Workflows - ## What is Argo Workflows? Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). @@ -91,9 +89,9 @@ Currently **officially** using Argo: 1. [Localytics](https://www.localytics.com/) 1. [NVIDIA](https://www.nvidia.com/) 1. [Preferred Networks](https://www.preferred-networks.jp/en/) +1. [Quantibio](http://quantibio.com/us/en/) 1. [SAP Hybris](https://cx.sap.com/) 1. [Styra](https://www.styra.com/) -1. [Quantibio](http://quantibio.com/us/en/) ## Community Blogs and Presentations * [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/) From 9b555cdb30f6092d5f53891f318fb74b8371c039 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Wed, 27 Mar 2019 07:34:29 -0700 Subject: [PATCH 098/145] Issue#896 Workflow steps with non-existant output artifact path will succeed (#1277) * Issue#896 Workflow steps with non-existant output artifact path will succeed Issue: https://github.com/argoproj/argo/issues/897 Solution: Added new element "optional" in Artifact. The default is false. This flag will make artifact as optional and existence check will be ignored if input/output artifact has optional=true. Output Artifact ( optional=true ): Artifact existence check will be ignored during the save artifact in destination and continued workflow Input Artifact ( optional=true ): Artifact exist check will be ignored during load artifact from source and continued workflow * added end of line * removed unwanted whitespace * Deleted test code * go formatted * added formatting directives * updated Codegen * Fixed format on merge conflict * format fix * updated comments * improved error case --- api/openapi-spec/swagger.json | 4 ++ errors/errors.go | 2 +- .../workflow/v1alpha1/openapi_generated.go | 7 ++++ pkg/apis/workflow/v1alpha1/types.go | 3 ++ .../input-artifact-not-optional.yaml | 22 ++++++++++ .../output-artifact-not-optional.yaml | 24 +++++++++++ .../functional/input-artifact-optional.yaml | 22 ++++++++++ .../functional/output-artifact-optional.yaml | 24 +++++++++++ .../output-input-artifact-optional.yaml | 40 +++++++++++++++++++ workflow/common/util.go | 4 +- workflow/executor/docker/docker.go | 4 +- workflow/executor/executor.go | 15 +++++++ 12 files changed, 167 insertions(+), 4 deletions(-) create mode 100644 test/e2e/expectedfailures/input-artifact-not-optional.yaml create mode 100644 test/e2e/expectedfailures/output-artifact-not-optional.yaml create mode 100644 test/e2e/functional/input-artifact-optional.yaml create mode 100644 test/e2e/functional/output-artifact-optional.yaml create mode 100644 test/e2e/functional/output-input-artifact-optional.yaml diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f42eaec91ec0..17bf45108f47 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -83,6 +83,10 @@ "description": "name of the artifact. must be unique within a template's inputs/outputs.", "type": "string" }, + "optional": { + "description": "Make Artifacts optional, if Artifacts doesn't generate or exist", + "type": "boolean" + }, "path": { "description": "Path is the container path to the artifact", "type": "string" diff --git a/errors/errors.go b/errors/errors.go index 22177ccaa4f1..1fbe662557a7 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -14,7 +14,7 @@ const ( CodeBadRequest = "ERR_BAD_REQUEST" CodeForbidden = "ERR_FORBIDDEN" CodeNotFound = "ERR_NOT_FOUND" - CodeNotImplemented = "ERR_NOT_INPLEMENTED" + CodeNotImplemented = "ERR_NOT_IMPLEMENTED" CodeTimeout = "ERR_TIMEOUT" CodeInternal = "ERR_INTERNAL" ) diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index b203430f3f8f..ade406b7685e 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -206,6 +206,13 @@ func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) co Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), }, }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"name"}, }, diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 852746236326..3380be49fbc6 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -319,6 +319,9 @@ type Artifact struct { // Archive controls how the artifact will be saved to the artifact repository. Archive *ArchiveStrategy `json:"archive,omitempty"` + + // Make Artifacts optional, if Artifacts doesn't generate or exist + Optional bool `json:"optional,omitempty"` } // ArchiveStrategy describes how to archive files/directory when saving artifacts diff --git a/test/e2e/expectedfailures/input-artifact-not-optional.yaml b/test/e2e/expectedfailures/input-artifact-not-optional.yaml new file mode 100644 index 000000000000..e1a3615c71ad --- /dev/null +++ b/test/e2e/expectedfailures/input-artifact-not-optional.yaml @@ -0,0 +1,22 @@ +# This example demonstrates the input artifacts not optionals +# from one step to the next. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: input-artifact-not-optional- +spec: + entrypoint: http-artifact-example + templates: + - name: http-artifact-example + inputs: + artifacts: + - name: kubectl + path: /bin/kubectl + mode: 0755 + optional: false + http: + url: "" + container: + image: debian:9.4 + command: [sh, -c] + args: ["echo NoKubectl"] diff --git a/test/e2e/expectedfailures/output-artifact-not-optional.yaml b/test/e2e/expectedfailures/output-artifact-not-optional.yaml new file mode 100644 index 000000000000..d6fe97da86b6 --- /dev/null +++ b/test/e2e/expectedfailures/output-artifact-not-optional.yaml @@ -0,0 +1,24 @@ +# This example demonstrates the output artifacts not optionals +# from one step to the next. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: output-artifact-not-optional- +spec: + entrypoint: artifact-example + templates: + - name: artifact-example + steps: + - - name: generate-artifact + template: whalesay + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay hello world | tee /tmp/hello_world12.txt"] + outputs: + artifacts: + - name: hello-art + optional: false + path: /tmp/hello_world.txt diff --git a/test/e2e/functional/input-artifact-optional.yaml b/test/e2e/functional/input-artifact-optional.yaml new file mode 100644 index 000000000000..9b7a8a051b19 --- /dev/null +++ b/test/e2e/functional/input-artifact-optional.yaml @@ -0,0 +1,22 @@ +# This example demonstrates the input artifacts optionals +# from one step to the next. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: input-artifact-optional- +spec: + entrypoint: http-artifact-example + templates: + - name: http-artifact-example + inputs: + artifacts: + - name: kubectl + path: /bin/kubectl + mode: 0755 + optional: true + http: + url: "" + container: + image: debian:9.4 + command: [sh, -c] + args: ["echo NoKubectl"] diff --git a/test/e2e/functional/output-artifact-optional.yaml b/test/e2e/functional/output-artifact-optional.yaml new file mode 100644 index 000000000000..3713b45450de --- /dev/null +++ b/test/e2e/functional/output-artifact-optional.yaml @@ -0,0 +1,24 @@ +# This example demonstrates the output artifacts optionals +# from one step to the next. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: output-artifact-optional- +spec: + entrypoint: artifact-example + templates: + - name: artifact-example + steps: + - - name: generate-artifact + template: whalesay + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay hello world | tee /tmp/hello_world12.txt"] + outputs: + artifacts: + - name: hello-art + optional: true + path: /tmp/hello_world.txt diff --git a/test/e2e/functional/output-input-artifact-optional.yaml b/test/e2e/functional/output-input-artifact-optional.yaml new file mode 100644 index 000000000000..a29959fed04d --- /dev/null +++ b/test/e2e/functional/output-input-artifact-optional.yaml @@ -0,0 +1,40 @@ +# This example demonstrates the output and input artifacts are optionals +# from one step to the next. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: output-input-artifact-optional- +spec: + entrypoint: artifact-example + templates: + - name: artifact-example + steps: + - - name: generate-artifact + template: whalesay + - - name: consume-artifact + template: print-message + arguments: + artifacts: + - name: message + from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}" + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay hello world | tee /tmp/hello_world123.txt"] + outputs: + artifacts: + - name: hello-art + optional: true + path: /tmp/hello_world.txt + + - name: print-message + inputs: + artifacts: + - name: message + path: /tmp/message + optional: true + container: + image: alpine:latest + command: [sh, -c] + args: ["echo /tmp/message"] diff --git a/workflow/common/util.go b/workflow/common/util.go index 88f3431ecd0e..860c6b47370d 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -148,10 +148,10 @@ func ProcessArgs(tmpl *wfv1.Template, args wfv1.Arguments, globalParams, localPa } // artifact must be supplied argArt := args.GetArtifactByName(inArt.Name) - if argArt == nil { + if !inArt.Optional && argArt == nil { return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s was not supplied", inArt.Name) } - if !argArt.HasLocation() && !validateOnly { + if !inArt.Optional && !argArt.HasLocation() && !validateOnly { return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s missing location information", inArt.Name) } argArt.Path = inArt.Path diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 020ae938d1ea..9b0c7d9266bb 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -65,7 +65,9 @@ func (d *DockerExecutor) CopyFile(containerID string, sourcePath string, destPat return err } if !file.ExistsInTar(sourcePath, tar.NewReader(gzipReader)) { - return errors.InternalErrorf("path %s does not exist (or %s is empty) in archive %s", sourcePath, sourcePath, destPath) + errMsg := fmt.Sprintf("path %s does not exist (or %s is empty) in archive %s", sourcePath, sourcePath, destPath) + log.Warn(errMsg) + return errors.Errorf(errors.CodeNotFound, errMsg) } log.Infof("Archiving completed") return nil diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 955c4e7927b3..c361f3f3abc4 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -114,7 +114,17 @@ func (we *WorkflowExecutor) LoadArtifacts() error { log.Infof("Start loading input artifacts...") for _, art := range we.Template.Inputs.Artifacts { + log.Infof("Downloading artifact: %s", art.Name) + + if !art.HasLocation() { + if art.Optional { + log.Warnf("Artifact %s is not supplied. Artifact configured as an optional so, Artifact will be ignored", art.Name) + continue + } else { + return errors.New("required artifact %s not supplied", art.Name) + } + } artDriver, err := we.InitDriver(art) if err != nil { return err @@ -232,6 +242,10 @@ func (we *WorkflowExecutor) saveArtifact(tempOutArtDir string, mainCtrID string, localArtPath := path.Join(tempOutArtDir, fileName) err := we.RuntimeExecutor.CopyFile(mainCtrID, art.Path, localArtPath) if err != nil { + if art.Optional && errors.IsCode(errors.CodeNotFound, err) { + log.Warnf("Error in saving Artifact. Artifact configured as an optional so, Error will be ignored. Error= %v", err) + return nil + } return err } fileName, localArtPath, err = stageArchiveFile(fileName, localArtPath, art) @@ -502,6 +516,7 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv if art.Raw != nil { return &raw.RawArtifactDriver{}, nil } + return nil, errors.Errorf(errors.CodeBadRequest, "Unsupported artifact driver for %s", art.Name) } From d5f4b428ce02de34a37d5cb2fdba4dfa9fd16e75 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Wed, 27 Mar 2019 16:41:43 -0700 Subject: [PATCH 099/145] Fix for Resource creation where template has same parameter templating (#1283) * Fix for Resource creation where template has same parameter templating This PR will enable to support the custom template variable reference. Soulltion: Workflow variable reference resolve will check the Workflow variable prefix. * added test * fixed gofmt issue * fixed format * fixed gofmt on common.go * fixed testcase * fixed gofmt * Added unit testcase and documented * fixed Gofmt format * updated comments --- examples/README.md | 42 ++++++++++++++++++- .../functional/custom_template_variable.yaml | 32 ++++++++++++++ workflow/common/common.go | 3 ++ workflow/validate/validate.go | 15 +++++++ workflow/validate/validate_test.go | 20 +++++++++ 5 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 test/e2e/functional/custom_template_variable.yaml diff --git a/examples/README.md b/examples/README.md index 34398e741696..c65aacc103b7 100644 --- a/examples/README.md +++ b/examples/README.md @@ -34,7 +34,7 @@ For a complete description of the Argo workflow spec, please refer to https://gi - [Kubernetes Resources](#kubernetes-resources) - [Docker-in-Docker Using Sidecars](#docker-in-docker-aka-dind-using-sidecars) - [Continuous Integration Example](#continuous-integration-example) - +- [Custom Template Variable Referrence](#Custom Template Variable Referrence) ## Argo CLI In case you want to follow along with this walkthrough, here's a quick overview of the most useful argo command line interface (CLI) commands. @@ -1258,6 +1258,46 @@ spec: mirrorVolumeMounts: true ``` +## Custom Template Variable Referrence +In this example, we can see how we can use the other template language variable reference (E.g: Jinja) in Argo workflow template. +Argo will validate and resolve only the variable that starts with Argo allowed prefix +{***"item", "steps", "inputs", "outputs", "workflow", "tasks"***} + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: custom-template-variable- +spec: + entrypoint: hello-hello-hello + + templates: + - name: hello-hello-hello + steps: + - - name: hello1 + template: whalesay + arguments: + parameters: [{name: message, value: "hello1"}] + - - name: hello2a + template: whalesay + arguments: + parameters: [{name: message, value: "hello2a"}] + - name: hello2b + template: whalesay + arguments: + parameters: [{name: message, value: "hello2b"}] + + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{user.username}}"] + +``` + ## Continuous Integration Example Continuous integration is a popular application for workflows. Currently, Argo does not provide event triggers for automatically kicking off your CI jobs, but we plan to do so in the near future. Until then, you can easily write a cron job that checks for new commits and kicks off the needed workflow, or use your existing Jenkins server to kick off the workflow. diff --git a/test/e2e/functional/custom_template_variable.yaml b/test/e2e/functional/custom_template_variable.yaml new file mode 100644 index 000000000000..f9ee8fca8df2 --- /dev/null +++ b/test/e2e/functional/custom_template_variable.yaml @@ -0,0 +1,32 @@ +# This template demonstrates the customer variable suppport. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: custom-template-variable- +spec: + entrypoint: hello-hello-hello + + templates: + - name: hello-hello-hello + steps: + - - name: hello1 + template: whalesay + arguments: + parameters: [{name: message, value: "hello1"}] + - - name: hello2a + template: whalesay + arguments: + parameters: [{name: message, value: "hello2a"}] + - name: hello2b + template: whalesay + arguments: + parameters: [{name: message, value: "hello2b"}] + + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{custom.variable}}"] diff --git a/workflow/common/common.go b/workflow/common/common.go index d228d9c0f835..66275638cd55 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -115,6 +115,9 @@ const ( KubeConfigDefaultVolumeName = "kubeconfig" ) +// GlobalVarWorkflowRootTags is a list of root tags in workflow which could be used for variable reference +var GlobalVarValidWorkflowVariablePrefix = []string{"item.", "steps.", "inputs.", "pod.", "workflow.", "tasks."} + // ExecutionControl contains execution control parameters for executor to decide how to execute the container type ExecutionControl struct { // Deadline is a max timestamp in which an executor can run the container before terminating it diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 8f5085fb466f..4bdaf20d2d24 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -230,6 +230,11 @@ func resolveAllVariables(scope map[string]interface{}, tmplStr string) error { fstTmpl := fasttemplate.New(tmplStr, "{{", "}}") fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + + // Skip the custom variable references + if !checkValidWorkflowVariablePrefix(tag) { + return 0, nil + } _, ok := scope[tag] if !ok && unresolvedErr == nil { if (tag == "item" || strings.HasPrefix(tag, "item.")) && allowAllItemRefs { @@ -245,6 +250,16 @@ func resolveAllVariables(scope map[string]interface{}, tmplStr string) error { return unresolvedErr } +// checkValidWorkflowVariablePrefix is a helper methood check variable starts workflow root elements +func checkValidWorkflowVariablePrefix(tag string) bool { + for _, rootTag := range common.GlobalVarValidWorkflowVariablePrefix { + if strings.HasPrefix(tag, rootTag) { + return true + } + } + return false +} + func validateNonLeaf(tmpl *wfv1.Template) error { if tmpl.ActiveDeadlineSeconds != nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s.activeDeadlineSeconds is only valid for leaf templates", tmpl.Name) diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 886dfd126bbf..252b1db5191c 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -1142,3 +1142,23 @@ func TestSpecBadSequenceCountAndEnd(t *testing.T) { err := ValidateWorkflow(wf, true) assert.Error(t, err) } + +var customVariableInput = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: hello-world- +spec: + entrypoint: whalesay + templates: + - name: whalesay + container: + image: docker/whalesay:{{user.username}} +` + +// TestCustomTemplatVariable verifies custom template variable +func TestCustomTemplatVariable(t *testing.T) { + wf := unmarshalWf(customVariableInput) + err := ValidateWorkflow(wf, true) + assert.Equal(t, err, nil) +} From 850f3f15dd1965e99cd636711a5e3306bc4bd0c0 Mon Sep 17 00:00:00 2001 From: Adrien Trouillaud Date: Wed, 27 Mar 2019 16:47:26 -0700 Subject: [PATCH 100/145] Admiralty: add link to blog post, add user (#1295) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 6558b49a28de..f86700b383ab 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ As the Argo Community grows, we'd like to keep track of our users. Please send a Currently **officially** using Argo: +1. [Admiralty](https://admiralty.io/) 1. [Adobe](https://www.adobe.com/) 1. [BlackRock](https://www.blackrock.com/) 1. [Canva](https://www.canva.com/) @@ -94,6 +95,7 @@ Currently **officially** using Argo: 1. [Styra](https://www.styra.com/) ## Community Blogs and Presentations +* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/) * [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/) * [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) * [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html) From 934511192e4045b87be1675ff7e9dfa79faa9fcb Mon Sep 17 00:00:00 2001 From: Xianlu Bird Date: Wed, 3 Apr 2019 04:26:27 +0800 Subject: [PATCH 101/145] Add dns config support (#1301) --- api/openapi-spec/swagger.json | 4 ++++ examples/dns-config.yaml | 22 +++++++++++++++++++ .../workflow/v1alpha1/openapi_generated.go | 8 ++++++- pkg/apis/workflow/v1alpha1/types.go | 4 ++++ .../v1alpha1/zz_generated.deepcopy.go | 5 +++++ workflow/controller/workflowpod.go | 4 ++++ 6 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 examples/dns-config.yaml diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 17bf45108f47..aa91f7fa3a3f 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1123,6 +1123,10 @@ "description": "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{workflow.parameters.myparam}}", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments" }, + "dnsConfig": { + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfig" + }, "dnsPolicy": { "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", "type": "string" diff --git a/examples/dns-config.yaml b/examples/dns-config.yaml new file mode 100644 index 000000000000..35a621864827 --- /dev/null +++ b/examples/dns-config.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow # new type of k8s spec +metadata: + generateName: test-dns-config- # name of the workflow spec +spec: + entrypoint: whalesay # invoke the whalesay template + templates: + - name: whalesay # name of the template + container: + image: docker/whalesay + command: [cowsay] + args: ["hello world"] + resources: # limit the resources + limits: + memory: 32Mi + cpu: 100m + dnsConfig: + nameservers: + - 1.2.3.4 + options: + - name: ndots + value: "2" \ No newline at end of file diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index ade406b7685e..3392a7543836 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -2174,6 +2174,12 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Format: "", }, }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, "onExit": { SchemaProps: spec.SchemaProps{ Description: "OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary workflow.", @@ -2228,7 +2234,7 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 3380be49fbc6..c8548ff07a6e 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -129,6 +129,10 @@ type WorkflowSpec struct { // explicitly to 'ClusterFirstWithHostNet'. DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty"` + // PodDNSConfig defines the DNS parameters of a pod in addition to + // those generated from DNSPolicy. + DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + // OnExit is a template reference which is invoked at the end of the // workflow, irrespective of the success, failure, or error of the // primary workflow. diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 60db0280d209..d04a30b8f7a5 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -1015,6 +1015,11 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { *out = new(v1.DNSPolicy) **out = **in } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(v1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } if in.TTLSecondsAfterFinished != nil { in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished *out = new(int32) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 4efd896ee2f4..286e78388bfd 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -124,6 +124,10 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont pod.Spec.DNSPolicy = *woc.wf.Spec.DNSPolicy } + if woc.wf.Spec.DNSConfig != nil { + pod.Spec.DNSConfig = woc.wf.Spec.DNSConfig + } + if woc.controller.Config.InstanceID != "" { pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } From a8a55579131605d4dc769cb599bc99c06350dfb7 Mon Sep 17 00:00:00 2001 From: Xianlu Bird Date: Wed, 3 Apr 2019 04:55:14 +0800 Subject: [PATCH 102/145] Speed up podReconciliation using parallel goroutine (#1286) * Speed up podReconciliation using parallel goroutine * Fix make lint issue * put checkandcompress back --- workflow/controller/exec_control.go | 8 ++++- workflow/controller/operator.go | 50 ++++++++++++++++++++--------- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/workflow/controller/exec_control.go b/workflow/controller/exec_control.go index bc438c713e06..b1ad7f12ddf0 100644 --- a/workflow/controller/exec_control.go +++ b/workflow/controller/exec_control.go @@ -3,6 +3,7 @@ package controller import ( "encoding/json" "fmt" + "sync" "time" apiv1 "k8s.io/api/core/v1" @@ -15,7 +16,10 @@ import ( // applyExecutionControl will ensure a pod's execution control annotation is up-to-date // kills any pending pods when workflow has reached it's deadline -func (woc *wfOperationCtx) applyExecutionControl(pod *apiv1.Pod) error { +func (woc *wfOperationCtx) applyExecutionControl(pod *apiv1.Pod, wfNodesLock *sync.RWMutex) error { + if pod == nil { + return nil + } switch pod.Status.Phase { case apiv1.PodSucceeded, apiv1.PodFailed: // Skip any pod which are already completed @@ -27,6 +31,8 @@ func (woc *wfOperationCtx) applyExecutionControl(pod *apiv1.Pod) error { woc.log.Infof("Deleting Pending pod %s/%s which has exceeded workflow deadline %s", pod.Namespace, pod.Name, woc.workflowDeadline) err := woc.controller.kubeclientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) if err == nil { + wfNodesLock.Lock() + defer wfNodesLock.Unlock() node := woc.wf.Status.Nodes[pod.Name] var message string if woc.workflowDeadline.IsZero() { diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index f2cb6e4fcaa8..bfc1ed6b52df 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -9,6 +9,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" argokubeerr "github.com/argoproj/pkg/kube/errors" @@ -449,11 +450,21 @@ func (woc *wfOperationCtx) podReconciliation() error { return err } seenPods := make(map[string]bool) + seenPodLock := &sync.Mutex{} + wfNodesLock := &sync.RWMutex{} performAssessment := func(pod *apiv1.Pod) { + if pod == nil { + return + } nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName] nodeID := woc.wf.NodeID(nodeNameForPod) + seenPodLock.Lock() seenPods[nodeID] = true + seenPodLock.Unlock() + + wfNodesLock.Lock() + defer wfNodesLock.Unlock() if node, ok := woc.wf.Status.Nodes[nodeID]; ok { if newState := assessNodeStatus(pod, &node); newState != nil { woc.wf.Status.Nodes[nodeID] = *newState @@ -472,27 +483,36 @@ func (woc *wfOperationCtx) podReconciliation() error { } } + parallelPodNum := make(chan string, 500) + var wg sync.WaitGroup for _, pod := range podList.Items { - origNodeStatus := *woc.wf.Status.DeepCopy() - performAssessment(&pod) - err = woc.applyExecutionControl(&pod) - if err != nil { - woc.log.Warnf("Failed to apply execution control to pod %s", pod.Name) - } - err = woc.checkAndCompress() - if err != nil { - woc.wf.Status = origNodeStatus - nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName] - woc.log.Warnf("%v", err) - woc.markNodeErrorClearOuput(nodeNameForPod, err) + parallelPodNum <- pod.Name + wg.Add(1) + go func(tmpPod apiv1.Pod) { + defer wg.Done() + wfNodesLock.Lock() + origNodeStatus := *woc.wf.Status.DeepCopy() + wfNodesLock.Unlock() + performAssessment(&tmpPod) + err = woc.applyExecutionControl(&tmpPod, wfNodesLock) + if err != nil { + woc.log.Warnf("Failed to apply execution control to pod %s", tmpPod.Name) + } + wfNodesLock.Lock() + defer wfNodesLock.Unlock() err = woc.checkAndCompress() if err != nil { - woc.markWorkflowError(err, true) + woc.wf.Status = origNodeStatus + nodeNameForPod := tmpPod.Annotations[common.AnnotationKeyNodeName] + woc.log.Warnf("%v", err) + woc.markNodeErrorClearOuput(nodeNameForPod, err) + err = woc.checkAndCompress() } - } - + <-parallelPodNum + }(pod) } + wg.Wait() // Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in // the seen list it implies that the pod was deleted without the controller seeing the event. // It is now impossible to infer pod status. The only thing we can do at this point is to mark From de779f36122205790915622f5ee91c9a9d5b9086 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Wed, 3 Apr 2019 11:30:16 -0700 Subject: [PATCH 103/145] Add community meeting notes link (#1304) --- community/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/community/README.md b/community/README.md index 0a2e1d3518ea..b051276b1a2d 100644 --- a/community/README.md +++ b/community/README.md @@ -4,6 +4,8 @@ Welcome to the Argo community! Argo is an open, community driven project to make it easy to use Kubernetes for getting useful work done. This document describes the organizational structure of the Argo Community including the roles, responsibilities and processes that govern Argo projects and community. +Community meeting notes is available [here](https://docs.google.com/document/d/16aWGQ1Te5IRptFuAIFtg3rONRQqHC1Z3X9rdDHYhYfE/edit?usp=sharing). + ## Projects Argo is organized into a set of projects. Each project has at least one owner. The owner is responsible for publishing a roadmap and organizing community meetings for soliciting feedback, publishing meeting notes, and reporting on the current status of the project. From 928e4df81c4b33f0c0750f01b3aa3c4fc7ff256c Mon Sep 17 00:00:00 2001 From: xubofei1983 <39540637+xubofei1983@users.noreply.github.com> Date: Thu, 4 Apr 2019 02:42:48 -0700 Subject: [PATCH 104/145] Add Karius to users in README.md (#1305) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f86700b383ab..ec0cc9cca489 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,7 @@ Currently **officially** using Argo: 1. [Google](https://www.google.com/intl/en/about/our-company/) 1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) 1. [Intuit](https://www.intuit.com/) +1. [Karius](https://www.kariusdx.com/) 1. [KintoHub](https://www.kintohub.com/) 1. [Localytics](https://www.localytics.com/) 1. [NVIDIA](https://www.nvidia.com/) From 4591e44fe0e4de543f4c4339de0808346e0807e3 Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Fri, 5 Apr 2019 02:04:47 -0700 Subject: [PATCH 105/145] Added support for artifact path references (#1300) * Added support for artifact path references Adds new `{{inputs.artifacts..path}}` and `{{outputs.artifacts..path}}` placeholders. --- docs/variables.md | 3 + examples/artifact-path-placeholders.yaml | 40 ++++++++++++++ workflow/common/common.go | 2 +- workflow/common/util.go | 16 ++++++ workflow/controller/operator_test.go | 54 ++++++++++++++++++ workflow/validate/validate.go | 13 +++++ workflow/validate/validate_test.go | 70 ++++++++++++++++++++++++ 7 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 examples/artifact-path-placeholders.yaml diff --git a/docs/variables.md b/docs/variables.md index 98dc34c067c4..95e078135e17 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -28,6 +28,9 @@ The following variables are made available to reference various metadata of a wo | Variable | Description| |----------|------------| | `pod.name` | Pod name of the container/script | +| `inputs.artifacts..path` | Local path of the input artifact | +| `outputs.artifacts..path` | Local path of the output artifact | +| `outputs.parameters..path` | Local path of the output parameter | ## Loops (withItems / withParam) | Variable | Description| diff --git a/examples/artifact-path-placeholders.yaml b/examples/artifact-path-placeholders.yaml new file mode 100644 index 000000000000..3371b5e893c5 --- /dev/null +++ b/examples/artifact-path-placeholders.yaml @@ -0,0 +1,40 @@ +# This example demonstrates the how to refer to input and output artifact paths. +# Referring to the path instead of copy/pasting it prevents errors when paths change. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: artifact-path-placeholders- +spec: + entrypoint: head-lines + arguments: + parameters: + - name: lines-count + value: 3 + artifacts: + - name: text + raw: + data: | + 1 + 2 + 3 + 4 + 5 + templates: + - name: head-lines + inputs: + parameters: + - name: lines-count + artifacts: + - name: text + path: /inputs/text/data + outputs: + parameters: + - name: actual-lines-count + valueFrom: + path: /outputs/actual-lines-count/data + artifacts: + - name: text + path: /outputs/text/data + container: + image: busybox + command: [sh, -c, 'head -n {{inputs.parameters.lines-count}} <"{{inputs.artifacts.text.path}}" | tee "{{outputs.artifacts.text.path}}" | wc -l > "{{outputs.parameters.actual-lines-count.path}}"'] diff --git a/workflow/common/common.go b/workflow/common/common.go index 66275638cd55..6a55552ebf05 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -116,7 +116,7 @@ const ( ) // GlobalVarWorkflowRootTags is a list of root tags in workflow which could be used for variable reference -var GlobalVarValidWorkflowVariablePrefix = []string{"item.", "steps.", "inputs.", "pod.", "workflow.", "tasks."} +var GlobalVarValidWorkflowVariablePrefix = []string{"item.", "steps.", "inputs.", "outputs.", "pod.", "workflow.", "tasks."} // ExecutionControl contains execution control parameters for executor to decide how to execute the container type ExecutionControl struct { diff --git a/workflow/common/util.go b/workflow/common/util.go index 860c6b47370d..72432016c08f 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -195,6 +195,22 @@ func substituteParams(tmpl *wfv1.Template, globalParams, localParams map[string] } replaceMap["inputs.parameters."+inParam.Name] = *inParam.Value } + for _, inArt := range globalReplacedTmpl.Inputs.Artifacts { + if inArt.Path != "" { + replaceMap["inputs.artifacts."+inArt.Name+".path"] = inArt.Path + } + } + for _, outArt := range globalReplacedTmpl.Outputs.Artifacts { + if outArt.Path != "" { + replaceMap["outputs.artifacts."+outArt.Name+".path"] = outArt.Path + } + } + for _, param := range globalReplacedTmpl.Outputs.Parameters { + if param.ValueFrom != nil && param.ValueFrom.Path != "" { + replaceMap["outputs.parameters."+param.Name+".path"] = param.ValueFrom.Path + } + } + fstTmpl = fasttemplate.New(globalReplacedTmplStr, "{{", "}}") s, err := Replace(fstTmpl, replaceMap, true) if err != nil { diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index b8b5b3f543e9..cf5d95b348b0 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -947,3 +947,57 @@ func TestMetadataPassing(t *testing.T) { assert.True(t, foundRev) assert.Equal(t, "foo:bar", container.Image) } + +var ioPathPlaceholders = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: artifact-path-placeholders- +spec: + entrypoint: head-lines + arguments: + parameters: + - name: lines-count + value: 3 + artifacts: + - name: text + raw: + data: | + 1 + 2 + 3 + 4 + 5 + templates: + - name: head-lines + inputs: + parameters: + - name: lines-count + artifacts: + - name: text + path: /inputs/text/data + outputs: + parameters: + - name: actual-lines-count + valueFrom: + path: /outputs/actual-lines-count/data + artifacts: + - name: text + path: /outputs/text/data + container: + image: busybox + command: [sh, -c, 'head -n {{inputs.parameters.lines-count}} <"{{inputs.artifacts.text.path}}" | tee "{{outputs.artifacts.text.path}}" | wc -l > "{{outputs.parameters.actual-lines-count.path}}"'] +` + +func TestResolveIOPathPlaceholders(t *testing.T) { + wf := unmarshalWF(ioPathPlaceholders) + woc := newWoc(*wf) + woc.controller.Config.ArtifactRepository.S3 = new(S3ArtifactRepository) + woc.operate() + assert.Equal(t, wfv1.NodeRunning, woc.wf.Status.Phase) + pods, err := woc.controller.kubeclientset.CoreV1().Pods(wf.ObjectMeta.Namespace).List(metav1.ListOptions{}) + assert.Nil(t, err) + assert.True(t, len(pods.Items) > 0, "pod was not created successfully") + + assert.Equal(t, []string{"sh", "-c", "head -n 3 <\"/inputs/text/data\" | tee \"/outputs/text/data\" | wc -l > \"/outputs/actual-lines-count/data\""}, pods.Items[0].Spec.Containers[0].Command) +} diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 4bdaf20d2d24..acaabc593eae 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -119,6 +119,18 @@ func (ctx *wfValidationCtx) validateTemplate(tmpl *wfv1.Template, args wfv1.Argu localParams[common.LocalVarPodName] = placeholderValue scope[common.LocalVarPodName] = placeholderValue } + if tmpl.IsLeaf() { + for _, art := range tmpl.Outputs.Artifacts { + if art.Path != "" { + scope[fmt.Sprintf("outputs.artifacts.%s.path", art.Name)] = true + } + } + for _, param := range tmpl.Outputs.Parameters { + if param.ValueFrom != nil && param.ValueFrom.Path != "" { + scope[fmt.Sprintf("outputs.parameters.%s.path", param.Name)] = true + } + } + } _, err = common.ProcessArgs(tmpl, args, ctx.globalParams, localParams, true) if err != nil { @@ -190,6 +202,7 @@ func validateInputs(tmpl *wfv1.Template) (map[string]interface{}, error) { if art.Path == "" { return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.path not specified", tmpl.Name, artRef) } + scope[fmt.Sprintf("inputs.artifacts.%s.path", art.Name)] = true } else { if art.Path != "" { return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.path only valid in container/script templates", tmpl.Name, artRef) diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 252b1db5191c..fd9eee10bc72 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -163,6 +163,76 @@ func TestUnresolved(t *testing.T) { } } +var ioArtifactPaths = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: artifact-path-placeholders- +spec: + entrypoint: head-lines + arguments: + parameters: + - name: lines-count + value: 3 + artifacts: + - name: text + raw: + data: | + 1 + 2 + 3 + 4 + 5 + templates: + - name: head-lines + inputs: + parameters: + - name: lines-count + artifacts: + - name: text + path: /inputs/text/data + outputs: + parameters: + - name: actual-lines-count + valueFrom: + path: /outputs/actual-lines-count/data + artifacts: + - name: text + path: /outputs/text/data + container: + image: busybox + command: [sh, -c, 'head -n {{inputs.parameters.lines-count}} <"{{inputs.artifacts.text.path}}" | tee "{{outputs.artifacts.text.path}}" | wc -l > "{{outputs.parameters.actual-lines-count.path}}"'] +` + +func TestResolveIOArtifactPathPlaceholders(t *testing.T) { + err := validate(ioArtifactPaths) + assert.Nil(t, err) +} + +var outputParameterPath = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: get-current-date- +spec: + entrypoint: get-current-date + templates: + - name: get-current-date + outputs: + parameters: + - name: current-date + valueFrom: + path: /tmp/current-date + container: + image: busybox + command: [sh, -c, 'date > {{outputs.parameters.current-date.path}}'] +` + +func TestResolveOutputParameterPathPlaceholder(t *testing.T) { + err := validate(outputParameterPath) + assert.Nil(t, err) +} + var stepOutputReferences = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow From e34024a3ca285d1af3b5ba3b3235dc7adc0472b7 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Fri, 5 Apr 2019 18:25:35 +0900 Subject: [PATCH 106/145] Add support for init containers (#1183) --- api/openapi-spec/swagger.json | 260 +++++----- examples/init-container.yaml | 22 + .../workflow/v1alpha1/openapi_generated.go | 462 +++++++++--------- pkg/apis/workflow/v1alpha1/types.go | 14 +- .../v1alpha1/zz_generated.deepcopy.go | 60 ++- test/e2e/functional/init-container.yaml | 1 + workflow/controller/workflowpod.go | 68 ++- workflow/controller/workflowpod_test.go | 78 +++ 8 files changed, 583 insertions(+), 382 deletions(-) create mode 100644 examples/init-container.yaml create mode 120000 test/e2e/functional/init-container.yaml diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index aa91f7fa3a3f..c30116a25eda 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -784,8 +784,142 @@ } } }, - "io.argoproj.workflow.v1alpha1.Sidecar": { - "description": "Sidecar is a container which runs alongside the main container", + "io.argoproj.workflow.v1alpha1.SuspendTemplate": { + "description": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" + }, + "io.argoproj.workflow.v1alpha1.TarStrategy": { + "description": "TarStrategy will tar and gzip the file or directory when saving" + }, + "io.argoproj.workflow.v1alpha1.Template": { + "description": "Template is a reusable and composable unit of execution in a workflow", + "required": [ + "name" + ], + "properties": { + "activeDeadlineSeconds": { + "description": "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", + "type": "integer", + "format": "int64" + }, + "affinity": { + "description": "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "archiveLocation": { + "description": "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the \u003cworkflowname\u003e/\u003cnodename\u003e in the key.", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactLocation" + }, + "container": { + "description": "Container is the main container image to run in the pod", + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "daemon": { + "description": "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness", + "type": "boolean" + }, + "dag": { + "description": "DAG template subtype which runs a DAG", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.DAGTemplate" + }, + "initContainers": { + "description": "InitContainers is a list of containers which run before the main container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.UserContainer" + } + }, + "inputs": { + "description": "Inputs describe what inputs parameters and artifacts are supplied to this template", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Inputs" + }, + "metadata": { + "description": "Metdata sets the pods's metadata, i.e. annotations and labels", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata" + }, + "name": { + "description": "Name is the name of the template", + "type": "string" + }, + "nodeSelector": { + "description": "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "outputs": { + "description": "Outputs describe the parameters and artifacts that this template produces", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Outputs" + }, + "parallelism": { + "description": "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", + "type": "integer", + "format": "int64" + }, + "priority": { + "description": "Priority to apply to workflow pods.", + "type": "integer", + "format": "int32" + }, + "priorityClassName": { + "description": "PriorityClassName to apply to workflow pods.", + "type": "string" + }, + "resource": { + "description": "Resource template subtype which can run k8s resources", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ResourceTemplate" + }, + "retryStrategy": { + "description": "RetryStrategy describes how to retry a template when it fails", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.RetryStrategy" + }, + "schedulerName": { + "description": "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", + "type": "string" + }, + "script": { + "description": "Script runs a portion of code against an interpreter", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ScriptTemplate" + }, + "sidecars": { + "description": "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.UserContainer" + } + }, + "steps": { + "description": "Steps define a series of sequential/parallel workflow steps", + "type": "array", + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep" + } + } + }, + "suspend": { + "description": "Suspend template subtype which can suspend a workflow when reaching the step", + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.SuspendTemplate" + }, + "tolerations": { + "description": "Tolerations to apply to workflow pods.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + } + }, + "volumes": { + "description": "Volumes is a list of volumes that can be mounted by containers in a template.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + } + } + } + }, + "io.argoproj.workflow.v1alpha1.UserContainer": { + "description": "UserContainer is a container specified by a user.", "required": [ "name" ], @@ -837,7 +971,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, "mirrorVolumeMounts": { - "description": "MirrorVolumeMounts will mount the same volumes specified in the main container to the sidecar (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", + "description": "MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", "type": "boolean" }, "name": { @@ -909,126 +1043,6 @@ } } }, - "io.argoproj.workflow.v1alpha1.SuspendTemplate": { - "description": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" - }, - "io.argoproj.workflow.v1alpha1.TarStrategy": { - "description": "TarStrategy will tar and gzip the file or directory when saving" - }, - "io.argoproj.workflow.v1alpha1.Template": { - "description": "Template is a reusable and composable unit of execution in a workflow", - "required": [ - "name" - ], - "properties": { - "activeDeadlineSeconds": { - "description": "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", - "type": "integer", - "format": "int64" - }, - "affinity": { - "description": "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", - "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" - }, - "archiveLocation": { - "description": "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the \u003cworkflowname\u003e/\u003cnodename\u003e in the key.", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactLocation" - }, - "container": { - "description": "Container is the main container image to run in the pod", - "$ref": "#/definitions/io.k8s.api.core.v1.Container" - }, - "daemon": { - "description": "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness", - "type": "boolean" - }, - "dag": { - "description": "DAG template subtype which runs a DAG", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.DAGTemplate" - }, - "inputs": { - "description": "Inputs describe what inputs parameters and artifacts are supplied to this template", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Inputs" - }, - "metadata": { - "description": "Metdata sets the pods's metadata, i.e. annotations and labels", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata" - }, - "name": { - "description": "Name is the name of the template", - "type": "string" - }, - "nodeSelector": { - "description": "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "outputs": { - "description": "Outputs describe the parameters and artifacts that this template produces", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Outputs" - }, - "parallelism": { - "description": "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", - "type": "integer", - "format": "int64" - }, - "priority": { - "description": "Priority to apply to workflow pods.", - "type": "integer", - "format": "int32" - }, - "priorityClassName": { - "description": "PriorityClassName to apply to workflow pods.", - "type": "string" - }, - "resource": { - "description": "Resource template subtype which can run k8s resources", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ResourceTemplate" - }, - "retryStrategy": { - "description": "RetryStrategy describes how to retry a template when it fails", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.RetryStrategy" - }, - "schedulerName": { - "description": "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", - "type": "string" - }, - "script": { - "description": "Script runs a portion of code against an interpreter", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ScriptTemplate" - }, - "sidecars": { - "description": "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", - "type": "array", - "items": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Sidecar" - } - }, - "steps": { - "description": "Steps define a series of sequential/parallel workflow steps", - "type": "array", - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep" - } - } - }, - "suspend": { - "description": "Suspend template subtype which can suspend a workflow when reaching the step", - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.SuspendTemplate" - }, - "tolerations": { - "description": "Tolerations to apply to workflow pods.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" - } - } - } - }, "io.argoproj.workflow.v1alpha1.ValueFrom": { "description": "ValueFrom describes a location in which to obtain the value to a parameter", "properties": { diff --git a/examples/init-container.yaml b/examples/init-container.yaml new file mode 100644 index 000000000000..a113fce55f18 --- /dev/null +++ b/examples/init-container.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: init-container- +spec: + entrypoint: init-container-example + templates: + - name: init-container-example + container: + image: alpine:latest + command: ["echo", "bye"] + volumeMounts: + - name: foo + mountPath: /foo + initContainers: + - name: hello + image: alpine:latest + command: ["echo", "hello"] + mirrorVolumeMounts: true + volumes: + - name: foo + emptyDir: diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 3392a7543836..51974196de83 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -40,10 +40,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.S3Bucket": schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate": schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sequence": schema_pkg_apis_workflow_v1alpha1_Sequence(ref), - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sidecar": schema_pkg_apis_workflow_v1alpha1_Sidecar(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate": schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TarStrategy": schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template": schema_pkg_apis_workflow_v1alpha1_Template(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer": schema_pkg_apis_workflow_v1alpha1_UserContainer(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ValueFrom": schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Workflow": schema_pkg_apis_workflow_v1alpha1_Workflow(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowList": schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref), @@ -1458,11 +1458,251 @@ func schema_pkg_apis_workflow_v1alpha1_Sequence(ref common.ReferenceCallback) co } } -func schema_pkg_apis_workflow_v1alpha1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time", + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TarStrategy will tar and gzip the file or directory when saving", + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Template is a reusable and composable unit of execution in a workflow", + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the template", + Type: []string{"string"}, + Format: "", + }, + }, + "inputs": { + SchemaProps: spec.SchemaProps{ + Description: "Inputs describe what inputs parameters and artifacts are supplied to this template", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs"), + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs describe the parameters and artifacts that this template produces", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metdata sets the pods's metadata, i.e. annotations and labels", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata"), + }, + }, + "daemon": { + SchemaProps: spec.SchemaProps{ + Description: "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness", + Type: []string{"boolean"}, + Format: "", + }, + }, + "steps": { + SchemaProps: spec.SchemaProps{ + Description: "Steps define a series of sequential/parallel workflow steps", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep"), + }, + }, + }, + }, + }, + }, + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container is the main container image to run in the pod", + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script runs a portion of code against an interpreter", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate"), + }, + }, + "resource": { + SchemaProps: spec.SchemaProps{ + Description: "Resource template subtype which can run k8s resources", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate"), + }, + }, + "dag": { + SchemaProps: spec.SchemaProps{ + Description: "DAG template subtype which runs a DAG", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate"), + }, + }, + "suspend": { + SchemaProps: spec.SchemaProps{ + Description: "Suspend template subtype which can suspend a workflow when reaching the step", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate"), + }, + }, + "volumes": { + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a list of volumes that can be mounted by containers in a template.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "initContainers": { + SchemaProps: spec.SchemaProps{ + Description: "InitContainers is a list of containers which run before the main container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer"), + }, + }, + }, + }, + }, + "sidecars": { + SchemaProps: spec.SchemaProps{ + Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer"), + }, + }, + }, + }, + }, + "archiveLocation": { + SchemaProps: spec.SchemaProps{ + Description: "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation"), + }, + }, + "activeDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "retryStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy describes how to retry a template when it fails", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy"), + }, + }, + "parallelism": { + SchemaProps: spec.SchemaProps{ + Description: "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "Tolerations to apply to workflow pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName to apply to workflow pods.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "Priority to apply to workflow pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Sidecar is a container which runs alongside the main container", + Description: "UserContainer is a container specified by a user.", Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ @@ -1676,7 +1916,7 @@ func schema_pkg_apis_workflow_v1alpha1_Sidecar(ref common.ReferenceCallback) com }, "mirrorVolumeMounts": { SchemaProps: spec.SchemaProps{ - Description: "MirrorVolumeMounts will mount the same volumes specified in the main container to the sidecar (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", + Description: "MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", Type: []string{"boolean"}, Format: "", }, @@ -1690,220 +1930,6 @@ func schema_pkg_apis_workflow_v1alpha1_Sidecar(ref common.ReferenceCallback) com } } -func schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TarStrategy will tar and gzip the file or directory when saving", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Template is a reusable and composable unit of execution in a workflow", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the template", - Type: []string{"string"}, - Format: "", - }, - }, - "inputs": { - SchemaProps: spec.SchemaProps{ - Description: "Inputs describe what inputs parameters and artifacts are supplied to this template", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs"), - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs describe the parameters and artifacts that this template produces", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metdata sets the pods's metadata, i.e. annotations and labels", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "daemon": { - SchemaProps: spec.SchemaProps{ - Description: "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness", - Type: []string{"boolean"}, - Format: "", - }, - }, - "steps": { - SchemaProps: spec.SchemaProps{ - Description: "Steps define a series of sequential/parallel workflow steps", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep"), - }, - }, - }, - }, - }, - }, - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the main container image to run in the pod", - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - "script": { - SchemaProps: spec.SchemaProps{ - Description: "Script runs a portion of code against an interpreter", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate"), - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Description: "Resource template subtype which can run k8s resources", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate"), - }, - }, - "dag": { - SchemaProps: spec.SchemaProps{ - Description: "DAG template subtype which runs a DAG", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate"), - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend template subtype which can suspend a workflow when reaching the step", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate"), - }, - }, - "sidecars": { - SchemaProps: spec.SchemaProps{ - Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sidecar"), - }, - }, - }, - }, - }, - "archiveLocation": { - SchemaProps: spec.SchemaProps{ - Description: "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation"), - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry a template when it fails", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy"), - }, - }, - "parallelism": { - SchemaProps: spec.SchemaProps{ - Description: "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "tolerations": { - SchemaProps: spec.SchemaProps{ - Description: "Tolerations to apply to workflow pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "PriorityClassName to apply to workflow pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority to apply to workflow pods.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Sidecar", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.Toleration"}, - } -} - func schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index c8548ff07a6e..3bfc57f8eb80 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -209,9 +209,15 @@ type Template struct { // Suspend template subtype which can suspend a workflow when reaching the step Suspend *SuspendTemplate `json:"suspend,omitempty"` + // Volumes is a list of volumes that can be mounted by containers in a template. + Volumes []apiv1.Volume `json:"volumes,omitempty"` + + // InitContainers is a list of containers which run before the main container. + InitContainers []UserContainer `json:"initContainers,omitempty"` + // Sidecars is a list of containers which run alongside the main container // Sidecars are automatically killed when the main container completes - Sidecars []Sidecar `json:"sidecars,omitempty"` + Sidecars []UserContainer `json:"sidecars,omitempty"` // Location in which all files related to the step will be stored (logs, artifacts, etc...). // Can be overridden by individual items in Outputs. If omitted, will use the default @@ -471,12 +477,12 @@ type Arguments struct { Artifacts []Artifact `json:"artifacts,omitempty"` } -// Sidecar is a container which runs alongside the main container -type Sidecar struct { +// UserContainer is a container specified by a user. +type UserContainer struct { apiv1.Container `json:",inline"` // MirrorVolumeMounts will mount the same volumes specified in the main container - // to the sidecar (including artifacts), at the same mountPaths. This enables + // to the container (including artifacts), at the same mountPaths. This enables // dind daemon to partially see the same filesystem as the main container in // order to use features such as docker volume binding MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty"` diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index d04a30b8f7a5..9bd20892eb6b 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -702,28 +702,6 @@ func (in *Sequence) DeepCopy() *Sequence { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sidecar) DeepCopyInto(out *Sidecar) { - *out = *in - in.Container.DeepCopyInto(&out.Container) - if in.MirrorVolumeMounts != nil { - in, out := &in.MirrorVolumeMounts, &out.MirrorVolumeMounts - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. -func (in *Sidecar) DeepCopy() *Sidecar { - if in == nil { - return nil - } - out := new(Sidecar) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SuspendTemplate) DeepCopyInto(out *SuspendTemplate) { *out = *in @@ -817,9 +795,23 @@ func (in *Template) DeepCopyInto(out *Template) { *out = new(SuspendTemplate) **out = **in } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]UserContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Sidecars != nil { in, out := &in.Sidecars, &out.Sidecars - *out = make([]Sidecar, len(*in)) + *out = make([]UserContainer, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -869,6 +861,28 @@ func (in *Template) DeepCopy() *Template { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserContainer) DeepCopyInto(out *UserContainer) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + if in.MirrorVolumeMounts != nil { + in, out := &in.MirrorVolumeMounts, &out.MirrorVolumeMounts + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserContainer. +func (in *UserContainer) DeepCopy() *UserContainer { + if in == nil { + return nil + } + out := new(UserContainer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { *out = *in diff --git a/test/e2e/functional/init-container.yaml b/test/e2e/functional/init-container.yaml new file mode 120000 index 000000000000..fe78772b05ed --- /dev/null +++ b/test/e2e/functional/init-container.yaml @@ -0,0 +1 @@ +../../../examples/init-container.yaml \ No newline at end of file diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 286e78388bfd..2cd2b7fe3b50 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -172,6 +172,13 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont addExecutorStagingVolume(pod) } + // addInitContainers should be called after all volumes have been manipulated + // in the main container (in case sidecar requires volume mount mirroring) + err = addInitContainers(pod, tmpl) + if err != nil { + return nil, err + } + // addSidecars should be called after all volumes have been manipulated // in the main container (in case sidecar requires volume mount mirroring) err = addSidecars(pod, tmpl) @@ -706,31 +713,40 @@ func addExecutorStagingVolume(pod *apiv1.Pod) { } } +// addInitContainers adds all init containers to the pod spec of the step +// Optionally volume mounts from the main container to the init containers +func addInitContainers(pod *apiv1.Pod, tmpl *wfv1.Template) error { + if len(tmpl.InitContainers) == 0 { + return nil + } + mainCtr := findMainContainer(pod) + if mainCtr == nil { + panic("Unable to locate main container") + } + for _, ctr := range tmpl.InitContainers { + log.Debugf("Adding init container %s", ctr.Name) + if ctr.MirrorVolumeMounts != nil && *ctr.MirrorVolumeMounts { + mirrorVolumeMounts(mainCtr, &ctr.Container) + } + pod.Spec.InitContainers = append(pod.Spec.InitContainers, ctr.Container) + } + return nil +} + // addSidecars adds all sidecars to the pod spec of the step. // Optionally volume mounts from the main container to the sidecar func addSidecars(pod *apiv1.Pod, tmpl *wfv1.Template) error { if len(tmpl.Sidecars) == 0 { return nil } - var mainCtr *apiv1.Container - for _, ctr := range pod.Spec.Containers { - if ctr.Name != common.MainContainerName { - continue - } - mainCtr = &ctr - break - } + mainCtr := findMainContainer(pod) if mainCtr == nil { panic("Unable to locate main container") } for _, sidecar := range tmpl.Sidecars { + log.Debugf("Adding sidecar container %s", sidecar.Name) if sidecar.MirrorVolumeMounts != nil && *sidecar.MirrorVolumeMounts { - for _, volMnt := range mainCtr.VolumeMounts { - if sidecar.VolumeMounts == nil { - sidecar.VolumeMounts = make([]apiv1.VolumeMount, 0) - } - sidecar.VolumeMounts = append(sidecar.VolumeMounts, volMnt) - } + mirrorVolumeMounts(mainCtr, &sidecar.Container) } pod.Spec.Containers = append(pod.Spec.Containers, sidecar.Container) } @@ -751,3 +767,27 @@ func verifyResolvedVariables(obj interface{}) error { }) return unresolvedErr } + +// findMainContainer finds main container +func findMainContainer(pod *apiv1.Pod) *apiv1.Container { + var mainCtr *apiv1.Container + for _, ctr := range pod.Spec.Containers { + if ctr.Name != common.MainContainerName { + continue + } + mainCtr = &ctr + break + } + return mainCtr +} + +// mirrorVolumeMounts mirrors volumeMounts of source container to target container +func mirrorVolumeMounts(sourceContainer, targetContainer *apiv1.Container) { + for _, volMnt := range sourceContainer.VolumeMounts { + if targetContainer.VolumeMounts == nil { + targetContainer.VolumeMounts = make([]apiv1.VolumeMount, 0) + } + log.Debugf("Adding volume mount %v to container %v", volMnt.Name, targetContainer.Name) + targetContainer.VolumeMounts = append(targetContainer.VolumeMounts, volMnt) + } +} diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index 59a8750b573c..f7c031e11d36 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -341,3 +341,81 @@ func TestSchedulerName(t *testing.T) { assert.Nil(t, err) assert.Equal(t, pod.Spec.SchedulerName, "foo") } + +// TestInitContainers verifies the ability to set up initContainers +func TestInitContainers(t *testing.T) { + volumes := []apiv1.Volume{ + { + Name: "volume-name", + VolumeSource: apiv1.VolumeSource{ + EmptyDir: &apiv1.EmptyDirVolumeSource{}, + }, + }, + } + volumeMounts := []apiv1.VolumeMount{ + { + Name: "volume-name", + MountPath: "/test", + }, + } + mirrorVolumeMounts := true + + woc := newWoc() + woc.wf.Spec.Volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.wf.Spec.Templates[0].InitContainers = []wfv1.UserContainer{ + { + MirrorVolumeMounts: &mirrorVolumeMounts, + Container: apiv1.Container{ + Name: "init-foo", + }, + }, + } + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 1, len(pod.Spec.InitContainers)) + assert.Equal(t, "init-foo", pod.Spec.InitContainers[0].Name) +} + +// TestSidecars verifies the ability to set up sidecars +func TestSidecars(t *testing.T) { + volumes := []apiv1.Volume{ + { + Name: "volume-name", + VolumeSource: apiv1.VolumeSource{ + EmptyDir: &apiv1.EmptyDirVolumeSource{}, + }, + }, + } + volumeMounts := []apiv1.VolumeMount{ + { + Name: "volume-name", + MountPath: "/test", + }, + } + mirrorVolumeMounts := true + + woc := newWoc() + woc.wf.Spec.Volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.wf.Spec.Templates[0].Sidecars = []wfv1.UserContainer{ + { + MirrorVolumeMounts: &mirrorVolumeMounts, + Container: apiv1.Container{ + Name: "side-foo", + }, + }, + } + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 3, len(pod.Spec.Containers)) + assert.Equal(t, "main", pod.Spec.Containers[0].Name) + assert.Equal(t, "wait", pod.Spec.Containers[1].Name) + assert.Equal(t, "side-foo", pod.Spec.Containers[2].Name) +} From 1a028d5458ffef240f8af31caeecda91f057c3ba Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Fri, 5 Apr 2019 13:52:01 -0700 Subject: [PATCH 107/145] Secrets should be passed to pods using volumes instead of API calls (#1302) * Secrets should be passed to pods using downward API instead of API calls * Fixed Gogfmt format * fixed file close Gofmt * updated review comments * fixed gofmt * updated review comments --- workflow/artifacts/hdfs/hdfs.go | 4 +- workflow/common/common.go | 2 + workflow/controller/workflowpod.go | 130 +++++++++++++++++++++++++++-- workflow/executor/executor.go | 21 +++-- 4 files changed, 141 insertions(+), 16 deletions(-) diff --git a/workflow/artifacts/hdfs/hdfs.go b/workflow/artifacts/hdfs/hdfs.go index 2209fc0367bb..2e505b199d96 100644 --- a/workflow/artifacts/hdfs/hdfs.go +++ b/workflow/artifacts/hdfs/hdfs.go @@ -86,7 +86,7 @@ func CreateDriver(ci common.ResourceInterface, art *wfv1.HDFSArtifact) (*Artifac } } if art.KrbCCacheSecret != nil && art.KrbCCacheSecret.Name != "" { - bytes, err := ci.GetSecrets(namespace, art.KrbCCacheSecret.Name, art.KrbCCacheSecret.Key) + bytes, err := ci.GetSecretFromVolMount(art.KrbCCacheSecret.Name, art.KrbCCacheSecret.Key) if err != nil { return nil, err } @@ -103,7 +103,7 @@ func CreateDriver(ci common.ResourceInterface, art *wfv1.HDFSArtifact) (*Artifac } } if art.KrbKeytabSecret != nil && art.KrbKeytabSecret.Name != "" { - bytes, err := ci.GetSecrets(namespace, art.KrbKeytabSecret.Name, art.KrbKeytabSecret.Key) + bytes, err := ci.GetSecretFromVolMount(art.KrbKeytabSecret.Name, art.KrbKeytabSecret.Key) if err != nil { return nil, err } diff --git a/workflow/common/common.go b/workflow/common/common.go index 6a55552ebf05..96b25bd5a7c5 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -113,6 +113,7 @@ const ( KubeConfigDefaultMountPath = "/kube/config" KubeConfigDefaultVolumeName = "kubeconfig" + SecretVolMountPath = "/argo/secret" ) // GlobalVarWorkflowRootTags is a list of root tags in workflow which could be used for variable reference @@ -129,5 +130,6 @@ type ExecutionControl struct { type ResourceInterface interface { GetNamespace() string GetSecrets(namespace, name, key string) ([]byte, error) + GetSecretFromVolMount(name, key string) ([]byte, error) GetConfigMapKey(namespace, name, key string) (string, error) } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 2cd2b7fe3b50..add6018956fa 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -132,6 +132,11 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } + err := woc.addArchiveLocation(pod, tmpl) + if err != nil { + return nil, err + } + if tmpl.GetType() != wfv1.TemplateTypeResource { // we do not need the wait container for resource templates because // argoexec runs as the main container and will perform the job of @@ -153,7 +158,7 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont addSchedulingConstraints(pod, wfSpec, tmpl) woc.addMetadata(pod, tmpl) - err := addVolumeReferences(pod, wfSpec, tmpl, woc.wf.Status.PersistentVolumeClaims) + err = addVolumeReferences(pod, wfSpec, tmpl, woc.wf.Status.PersistentVolumeClaims) if err != nil { return nil, err } @@ -163,11 +168,6 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont return nil, err } - err = woc.addArchiveLocation(pod, tmpl) - if err != nil { - return nil, err - } - if tmpl.GetType() == wfv1.TemplateTypeScript { addExecutorStagingVolume(pod) } @@ -373,7 +373,8 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool, subCom MountPath: path, ReadOnly: true, SubPath: woc.controller.Config.KubeConfig.SecretKey, - }} + }, + } exec.Args = append(exec.Args, "--kubeconfig="+path) } return &exec @@ -492,6 +493,7 @@ func addVolumeReferences(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *wfv1.T } return nil } + if tmpl.Container != nil { err := addVolumeRef(tmpl.Container.VolumeMounts) if err != nil { @@ -504,12 +506,30 @@ func addVolumeReferences(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *wfv1.T return err } } + for _, sidecar := range tmpl.Sidecars { err := addVolumeRef(sidecar.VolumeMounts) if err != nil { return err } } + + volumes, volumeMounts := createSecretVolumes(tmpl) + pod.Spec.Volumes = append(pod.Spec.Volumes, volumes...) + + for idx, container := range pod.Spec.Containers { + if container.Name == common.WaitContainerName { + pod.Spec.Containers[idx].VolumeMounts = append(pod.Spec.Containers[idx].VolumeMounts, volumeMounts...) + break + } + } + for idx, container := range pod.Spec.InitContainers { + if container.Name == common.InitContainerName { + pod.Spec.InitContainers[idx].VolumeMounts = append(pod.Spec.InitContainers[idx].VolumeMounts, volumeMounts...) + break + } + } + return nil } @@ -768,6 +788,101 @@ func verifyResolvedVariables(obj interface{}) error { return unresolvedErr } +// createSecretVolumes will retrieve and create Volumes and Volumemount object for Pod +func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMount) { + var allVolumesMap = make(map[string]apiv1.Volume) + var uniqueKeyMap = make(map[string]bool) + var secretVolumes []apiv1.Volume + var secretVolMounts []apiv1.VolumeMount + + createArgoArtifactsRepoSecret(tmpl, allVolumesMap, uniqueKeyMap) + + for _, art := range tmpl.Outputs.Artifacts { + createSecretVolume(allVolumesMap, art, uniqueKeyMap) + } + for _, art := range tmpl.Inputs.Artifacts { + createSecretVolume(allVolumesMap, art, uniqueKeyMap) + } + + for volMountName, val := range allVolumesMap { + secretVolumes = append(secretVolumes, val) + secretVolMounts = append(secretVolMounts, apiv1.VolumeMount{ + Name: volMountName, + MountPath: common.SecretVolMountPath, + ReadOnly: true, + }) + } + + return secretVolumes, secretVolMounts +} + +func createArgoArtifactsRepoSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Volume, uniqueKeyMap map[string]bool) { + if s3ArtRepo := tmpl.ArchiveLocation.S3; s3ArtRepo != nil { + createSecretVal(volMap, s3ArtRepo.AccessKeySecret, uniqueKeyMap) + createSecretVal(volMap, s3ArtRepo.SecretKeySecret, uniqueKeyMap) + } else if hdfsArtRepo := tmpl.ArchiveLocation.HDFS; hdfsArtRepo != nil { + createSecretVal(volMap, *hdfsArtRepo.KrbKeytabSecret, uniqueKeyMap) + createSecretVal(volMap, *hdfsArtRepo.KrbCCacheSecret, uniqueKeyMap) + } else if artRepo := tmpl.ArchiveLocation.Artifactory; artRepo != nil { + createSecretVal(volMap, *artRepo.UsernameSecret, uniqueKeyMap) + createSecretVal(volMap, *artRepo.PasswordSecret, uniqueKeyMap) + } else if gitRepo := tmpl.ArchiveLocation.Git; gitRepo != nil { + createSecretVal(volMap, *gitRepo.UsernameSecret, uniqueKeyMap) + createSecretVal(volMap, *gitRepo.PasswordSecret, uniqueKeyMap) + createSecretVal(volMap, *gitRepo.SSHPrivateKeySecret, uniqueKeyMap) + } + +} + +func createSecretVolume(volMap map[string]apiv1.Volume, art wfv1.Artifact, keyMap map[string]bool) { + + if art.S3 != nil { + createSecretVal(volMap, art.S3.AccessKeySecret, keyMap) + createSecretVal(volMap, art.S3.SecretKeySecret, keyMap) + } else if art.Git != nil { + createSecretVal(volMap, *art.Git.UsernameSecret, keyMap) + createSecretVal(volMap, *art.Git.PasswordSecret, keyMap) + createSecretVal(volMap, *art.Git.SSHPrivateKeySecret, keyMap) + } else if art.Artifactory != nil { + createSecretVal(volMap, *art.Artifactory.UsernameSecret, keyMap) + createSecretVal(volMap, *art.Artifactory.PasswordSecret, keyMap) + } else if art.HDFS != nil { + createSecretVal(volMap, *art.HDFS.KrbCCacheSecret, keyMap) + createSecretVal(volMap, *art.HDFS.KrbKeytabSecret, keyMap) + + } +} + +func createSecretVal(volMap map[string]apiv1.Volume, secret apiv1.SecretKeySelector, keyMap map[string]bool) { + if vol, ok := volMap[secret.Name]; ok { + key := apiv1.KeyToPath{ + Key: secret.Key, + Path: secret.Name + "/" + secret.Key, + } + if val, _ := keyMap[secret.Name+"-"+secret.Key]; !val { + keyMap[secret.Name+"-"+secret.Key] = true + vol.Secret.Items = append(vol.Secret.Items, key) + } + } else { + volume := apiv1.Volume{ + Name: secret.Name, + VolumeSource: apiv1.VolumeSource{ + Secret: &apiv1.SecretVolumeSource{ + SecretName: secret.Name, + Items: []apiv1.KeyToPath{ + { + Key: secret.Key, + Path: secret.Name + "/" + secret.Key, + }, + }, + }, + }, + } + keyMap[secret.Name+"-"+secret.Key] = true + volMap[secret.Name] = volume + } +} + // findMainContainer finds main container func findMainContainer(pod *apiv1.Pod) *apiv1.Container { var mainCtr *apiv1.Container @@ -789,5 +904,6 @@ func mirrorVolumeMounts(sourceContainer, targetContainer *apiv1.Container) { } log.Debugf("Adding volume mount %v to container %v", volMnt.Name, targetContainer.Name) targetContainer.VolumeMounts = append(targetContainer.VolumeMounts, volMnt) + } } diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index c361f3f3abc4..17561a2b0600 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -435,6 +435,13 @@ func (we *WorkflowExecutor) SaveLogs() (*wfv1.Artifact, error) { return &art, nil } +// GetSecretFromVolMount will retrive the Secrets from VolumeMount +func (we *WorkflowExecutor) GetSecretFromVolMount(accessKeyName string, accessKey string) ([]byte, error) { + + return ioutil.ReadFile(filepath.Join(common.SecretVolMountPath, accessKeyName, accessKey)) + +} + // InitDriver initializes an instance of an artifact driver func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriver, error) { if art.S3 != nil { @@ -442,12 +449,12 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv var secretKey string if art.S3.AccessKeySecret.Name != "" { - accessKeyBytes, err := we.GetSecrets(we.Namespace, art.S3.AccessKeySecret.Name, art.S3.AccessKeySecret.Key) + accessKeyBytes, err := we.GetSecretFromVolMount(art.S3.AccessKeySecret.Name, art.S3.AccessKeySecret.Key) if err != nil { return nil, err } accessKey = string(accessKeyBytes) - secretKeyBytes, err := we.GetSecrets(we.Namespace, art.S3.SecretKeySecret.Name, art.S3.SecretKeySecret.Key) + secretKeyBytes, err := we.GetSecretFromVolMount(art.S3.SecretKeySecret.Name, art.S3.SecretKeySecret.Key) if err != nil { return nil, err } @@ -471,21 +478,21 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv InsecureIgnoreHostKey: art.Git.InsecureIgnoreHostKey, } if art.Git.UsernameSecret != nil { - usernameBytes, err := we.GetSecrets(we.Namespace, art.Git.UsernameSecret.Name, art.Git.UsernameSecret.Key) + usernameBytes, err := we.GetSecretFromVolMount(art.Git.UsernameSecret.Name, art.Git.UsernameSecret.Key) if err != nil { return nil, err } gitDriver.Username = string(usernameBytes) } if art.Git.PasswordSecret != nil { - passwordBytes, err := we.GetSecrets(we.Namespace, art.Git.PasswordSecret.Name, art.Git.PasswordSecret.Key) + passwordBytes, err := we.GetSecretFromVolMount(art.Git.PasswordSecret.Name, art.Git.PasswordSecret.Key) if err != nil { return nil, err } gitDriver.Password = string(passwordBytes) } if art.Git.SSHPrivateKeySecret != nil { - sshPrivateKeyBytes, err := we.GetSecrets(we.Namespace, art.Git.SSHPrivateKeySecret.Name, art.Git.SSHPrivateKeySecret.Key) + sshPrivateKeyBytes, err := we.GetSecretFromVolMount(art.Git.SSHPrivateKeySecret.Name, art.Git.SSHPrivateKeySecret.Key) if err != nil { return nil, err } @@ -495,11 +502,11 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv return &gitDriver, nil } if art.Artifactory != nil { - usernameBytes, err := we.GetSecrets(we.Namespace, art.Artifactory.UsernameSecret.Name, art.Artifactory.UsernameSecret.Key) + usernameBytes, err := we.GetSecretFromVolMount(art.Artifactory.UsernameSecret.Name, art.Artifactory.UsernameSecret.Key) if err != nil { return nil, err } - passwordBytes, err := we.GetSecrets(we.Namespace, art.Artifactory.PasswordSecret.Name, art.Artifactory.PasswordSecret.Key) + passwordBytes, err := we.GetSecretFromVolMount(art.Artifactory.PasswordSecret.Name, art.Artifactory.PasswordSecret.Key) if err != nil { return nil, err } From abb77062fc06ae964ce7ccd1a534ec8bbdf3747c Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Fri, 5 Apr 2019 15:46:44 -0700 Subject: [PATCH 108/145] CheckandEstimate implementation to optimize podReconciliation (#1308) * CheckandEstimate implementation * fixed variable rename * fixed gofmt * fixed feedbacks * Update operator.go * Update operator.go --- workflow/controller/operator.go | 62 ++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index bfc1ed6b52df..acc4b335bb15 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -61,6 +61,15 @@ type wfOperationCtx struct { // workflowDeadline is the deadline which the workflow is expected to complete before we // terminate the workflow. workflowDeadline *time.Time + + // currentWFSize is current Workflow size + currentWFSize int + + // unSavedNodeStatusSize is unsaved workflow size + unSavedNodeStatusSize int + + // isWFCompressionFailed is workflow compression failed status + isWFCompressionFailed bool } var ( @@ -124,7 +133,9 @@ func (woc *wfOperationCtx) operate() { woc.log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack()) } }() + woc.log.Infof("Processing workflow") + // Perform one-time workflow validation if woc.wf.Status.Phase == "" { woc.markWorkflowRunning() @@ -453,9 +464,9 @@ func (woc *wfOperationCtx) podReconciliation() error { seenPodLock := &sync.Mutex{} wfNodesLock := &sync.RWMutex{} - performAssessment := func(pod *apiv1.Pod) { + performAssessment := func(pod *apiv1.Pod) string { if pod == nil { - return + return "" } nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName] nodeID := woc.wf.NodeID(nodeNameForPod) @@ -475,16 +486,20 @@ func (woc *wfOperationCtx) podReconciliation() error { if node.Completed() && !node.IsDaemoned() { if tmpVal, tmpOk := pod.Labels[common.LabelKeyCompleted]; tmpOk { if tmpVal == "true" { - return + return nodeID } } woc.completedPods[pod.ObjectMeta.Name] = true } } + return nodeID } parallelPodNum := make(chan string, 500) var wg sync.WaitGroup + + woc.currentWFSize = woc.getSize() + for _, pod := range podList.Items { parallelPodNum <- pod.Name wg.Add(1) @@ -493,20 +508,23 @@ func (woc *wfOperationCtx) podReconciliation() error { wfNodesLock.Lock() origNodeStatus := *woc.wf.Status.DeepCopy() wfNodesLock.Unlock() - performAssessment(&tmpPod) + nodeID := performAssessment(&tmpPod) err = woc.applyExecutionControl(&tmpPod, wfNodesLock) if err != nil { woc.log.Warnf("Failed to apply execution control to pod %s", tmpPod.Name) } wfNodesLock.Lock() defer wfNodesLock.Unlock() - err = woc.checkAndCompress() + err = woc.checkAndEstimate(nodeID) if err != nil { woc.wf.Status = origNodeStatus nodeNameForPod := tmpPod.Annotations[common.AnnotationKeyNodeName] woc.log.Warnf("%v", err) woc.markNodeErrorClearOuput(nodeNameForPod, err) err = woc.checkAndCompress() + if err != nil { + woc.isWFCompressionFailed = true + } } <-parallelPodNum }(pod) @@ -1664,17 +1682,17 @@ func (woc *wfOperationCtx) getSize() int { // The compressed content will be assign to compressedNodes element and clear the nodestatus map. func (woc *wfOperationCtx) checkAndCompress() error { - if woc.wf.Status.CompressedNodes != "" || (woc.wf.Status.CompressedNodes == "" && woc.getSize() >= maxWorkflowSize) { - + if !woc.isWFCompressionFailed && (woc.wf.Status.CompressedNodes != "" || (woc.wf.Status.CompressedNodes == "" && woc.getSize() >= maxWorkflowSize)) { nodeContent, err := json.Marshal(woc.wf.Status.Nodes) if err != nil { return errors.InternalWrapError(err) } buff := string(nodeContent) woc.wf.Status.CompressedNodes = file.CompressEncodeString(buff) - } - if woc.wf.Status.CompressedNodes != "" && woc.getSize() >= maxWorkflowSize { + + if woc.isWFCompressionFailed || (woc.wf.Status.CompressedNodes != "" && woc.getSize() >= maxWorkflowSize) { + woc.isWFCompressionFailed = true return errors.InternalError(fmt.Sprintf("Workflow is longer than maximum allowed size. Size=%d", woc.getSize())) } return nil @@ -1698,3 +1716,29 @@ func (woc *wfOperationCtx) checkAndDecompress() error { } return nil } + +// checkAndEstimate will check and estimate the workflow size with current nodestatus +func (woc *wfOperationCtx) checkAndEstimate(nodeID string) error { + if nodeID == "" { + return nil + } + + if woc.isWFCompressionFailed { + return errors.InternalErrorf("Workflow is longer than maximum allowed size. Size=%d", woc.currentWFSize+woc.unSavedNodeStatusSize) + } + + if woc.wf.Status.CompressedNodes != "" { + if node, ok := woc.wf.Status.Nodes[nodeID]; ok { + content, err := json.Marshal(node) + if err != nil { + return errors.InternalWrapError(err) + } + nodeSize := len(file.CompressEncodeString(string(content))) + if (nodeSize + woc.unSavedNodeStatusSize + woc.currentWFSize) >= maxWorkflowSize { + return errors.InternalErrorf("Workflow is longer than maximum allowed size. Size=%d", woc.currentWFSize+nodeSize+woc.unSavedNodeStatusSize) + } + woc.unSavedNodeStatusSize += nodeSize + } + } + return nil +} From bbdf2e2c8f1b5a8dc83e88fedba9b1899f6bc78b Mon Sep 17 00:00:00 2001 From: Xianlu Bird Date: Tue, 9 Apr 2019 12:01:56 +0800 Subject: [PATCH 109/145] Add alibaba cloud to officially using argo list (#1313) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ec0cc9cca489..a1b0734ed420 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,7 @@ Currently **officially** using Argo: 1. [Admiralty](https://admiralty.io/) 1. [Adobe](https://www.adobe.com/) +1. [Alibaba Cloud](https://www.alibabacloud.com/about) 1. [BlackRock](https://www.blackrock.com/) 1. [Canva](https://www.canva.com/) 1. [CoreFiling](https://www.corefiling.com/) From 0d400f2ce6db9478b4eaa6fe24849a686c9d1d44 Mon Sep 17 00:00:00 2001 From: Xianlu Bird Date: Wed, 10 Apr 2019 00:21:50 +0800 Subject: [PATCH 110/145] Refactor checkandEstimate to optimize podReconciliation (#1311) * Refactor checkandEstimate to optimize podReconciliation * Move compress function to persistUpdates --- workflow/controller/operator.go | 71 +++++---------------------------- 1 file changed, 10 insertions(+), 61 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index acc4b335bb15..44e72be6340c 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -61,15 +61,6 @@ type wfOperationCtx struct { // workflowDeadline is the deadline which the workflow is expected to complete before we // terminate the workflow. workflowDeadline *time.Time - - // currentWFSize is current Workflow size - currentWFSize int - - // unSavedNodeStatusSize is unsaved workflow size - unSavedNodeStatusSize int - - // isWFCompressionFailed is workflow compression failed status - isWFCompressionFailed bool } var ( @@ -300,6 +291,7 @@ func (woc *wfOperationCtx) persistUpdates() { err := woc.checkAndCompress() if err != nil { woc.log.Warnf("Error compressing workflow: %v", err) + woc.markWorkflowFailed(err.Error()) } if woc.wf.Status.CompressedNodes != "" { woc.wf.Status.Nodes = nil @@ -464,9 +456,9 @@ func (woc *wfOperationCtx) podReconciliation() error { seenPodLock := &sync.Mutex{} wfNodesLock := &sync.RWMutex{} - performAssessment := func(pod *apiv1.Pod) string { + performAssessment := func(pod *apiv1.Pod) { if pod == nil { - return "" + return } nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName] nodeID := woc.wf.NodeID(nodeNameForPod) @@ -486,51 +478,34 @@ func (woc *wfOperationCtx) podReconciliation() error { if node.Completed() && !node.IsDaemoned() { if tmpVal, tmpOk := pod.Labels[common.LabelKeyCompleted]; tmpOk { if tmpVal == "true" { - return nodeID + return } } woc.completedPods[pod.ObjectMeta.Name] = true } } - return nodeID + return } parallelPodNum := make(chan string, 500) var wg sync.WaitGroup - woc.currentWFSize = woc.getSize() - for _, pod := range podList.Items { parallelPodNum <- pod.Name wg.Add(1) go func(tmpPod apiv1.Pod) { defer wg.Done() - wfNodesLock.Lock() - origNodeStatus := *woc.wf.Status.DeepCopy() - wfNodesLock.Unlock() - nodeID := performAssessment(&tmpPod) + performAssessment(&tmpPod) err = woc.applyExecutionControl(&tmpPod, wfNodesLock) if err != nil { woc.log.Warnf("Failed to apply execution control to pod %s", tmpPod.Name) } - wfNodesLock.Lock() - defer wfNodesLock.Unlock() - err = woc.checkAndEstimate(nodeID) - if err != nil { - woc.wf.Status = origNodeStatus - nodeNameForPod := tmpPod.Annotations[common.AnnotationKeyNodeName] - woc.log.Warnf("%v", err) - woc.markNodeErrorClearOuput(nodeNameForPod, err) - err = woc.checkAndCompress() - if err != nil { - woc.isWFCompressionFailed = true - } - } <-parallelPodNum }(pod) } wg.Wait() + // Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in // the seen list it implies that the pod was deleted without the controller seeing the event. // It is now impossible to infer pod status. The only thing we can do at this point is to mark @@ -1682,7 +1657,7 @@ func (woc *wfOperationCtx) getSize() int { // The compressed content will be assign to compressedNodes element and clear the nodestatus map. func (woc *wfOperationCtx) checkAndCompress() error { - if !woc.isWFCompressionFailed && (woc.wf.Status.CompressedNodes != "" || (woc.wf.Status.CompressedNodes == "" && woc.getSize() >= maxWorkflowSize)) { + if woc.wf.Status.CompressedNodes != "" || (woc.wf.Status.CompressedNodes == "" && woc.getSize() >= maxWorkflowSize) { nodeContent, err := json.Marshal(woc.wf.Status.Nodes) if err != nil { return errors.InternalWrapError(err) @@ -1691,10 +1666,10 @@ func (woc *wfOperationCtx) checkAndCompress() error { woc.wf.Status.CompressedNodes = file.CompressEncodeString(buff) } - if woc.isWFCompressionFailed || (woc.wf.Status.CompressedNodes != "" && woc.getSize() >= maxWorkflowSize) { - woc.isWFCompressionFailed = true + if woc.wf.Status.CompressedNodes != "" && woc.getSize() >= maxWorkflowSize { return errors.InternalError(fmt.Sprintf("Workflow is longer than maximum allowed size. Size=%d", woc.getSize())) } + return nil } @@ -1716,29 +1691,3 @@ func (woc *wfOperationCtx) checkAndDecompress() error { } return nil } - -// checkAndEstimate will check and estimate the workflow size with current nodestatus -func (woc *wfOperationCtx) checkAndEstimate(nodeID string) error { - if nodeID == "" { - return nil - } - - if woc.isWFCompressionFailed { - return errors.InternalErrorf("Workflow is longer than maximum allowed size. Size=%d", woc.currentWFSize+woc.unSavedNodeStatusSize) - } - - if woc.wf.Status.CompressedNodes != "" { - if node, ok := woc.wf.Status.Nodes[nodeID]; ok { - content, err := json.Marshal(node) - if err != nil { - return errors.InternalWrapError(err) - } - nodeSize := len(file.CompressEncodeString(string(content))) - if (nodeSize + woc.unSavedNodeStatusSize + woc.currentWFSize) >= maxWorkflowSize { - return errors.InternalErrorf("Workflow is longer than maximum allowed size. Size=%d", woc.currentWFSize+nodeSize+woc.unSavedNodeStatusSize) - } - woc.unSavedNodeStatusSize += nodeSize - } - } - return nil -} From db89c477d65a29fc0a95ca55f68e1bd23d0170e0 Mon Sep 17 00:00:00 2001 From: Clemens Lange Date: Tue, 9 Apr 2019 23:10:09 +0200 Subject: [PATCH 111/145] Fix formatting issues in examples documentation (#1310) --- examples/README.md | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/examples/README.md b/examples/README.md index c65aacc103b7..bd1211a6d7b3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -32,9 +32,10 @@ For a complete description of the Argo workflow spec, please refer to https://gi - [Sidecars](#sidecars) - [Hardwired Artifacts](#hardwired-artifacts) - [Kubernetes Resources](#kubernetes-resources) -- [Docker-in-Docker Using Sidecars](#docker-in-docker-aka-dind-using-sidecars) +- [Docker-in-Docker Using Sidecars](#docker-in-docker-using-sidecars) +- [Custom Template Variable Reference](#custom-template-variable-reference) - [Continuous Integration Example](#continuous-integration-example) -- [Custom Template Variable Referrence](#Custom Template Variable Referrence) + ## Argo CLI In case you want to follow along with this walkthrough, here's a quick overview of the most useful argo command line interface (CLI) commands. @@ -68,7 +69,8 @@ Let's start by creating a very simple workflow template to echo "hello world" us You can run this directly from your shell with a simple docker command: -``` + +```sh bash% docker run docker/whalesay cowsay "hello world" _____________ < hello world > @@ -148,6 +150,7 @@ spec: This time, the `whalesay` template takes an input parameter named `message` that is passed as the `args` to the `cowsay` command. In order to reference parameters (e.g., ``"{{inputs.parameters.message}}"``), the parameters must be enclosed in double quotes to escape the curly braces in YAML. The argo CLI provides a convenient way to override parameters used to invoke the entrypoint. For example, the following command would bind the `message` parameter to "goodbye world" instead of the default "hello world". + ```sh argo submit arguments-parameters.yaml -p message="goodbye world" ``` @@ -159,6 +162,7 @@ message: goodbye world ``` To run use following command: + ```sh argo submit arguments-parameters.yaml --parameter-file params.yaml ``` @@ -254,7 +258,7 @@ spec: The above workflow spec prints three different flavors of "hello". The `hello-hello-hello` template consists of three `steps`. The first step named `hello1` will be run in sequence whereas the next two steps named `hello2a` and `hello2b` will be run in parallel with each other. Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named `hello2a` and `hello2b` ran in parallel with each other. -``` +```sh STEP PODNAME ✔ arguments-parameters-rbm92 ├---✔ hello1 steps-rbm92-2023062412 @@ -371,6 +375,7 @@ The `artifact-example` template passes the `hello-art` artifact generated as an ## The Structure of Workflow Specs We now know enough about the basic components of a workflow spec to review its basic structure: + - Kubernetes header including metadata - Spec body - Entrypoint invocation with optionally arguments @@ -389,7 +394,7 @@ Note that the controller section of the workflow spec will accept the same optio ## Secrets -Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the (Kubernetes documentation)[https://kubernetes.io/docs/concepts/configuration/secret/] for more information. +Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/) for more information. ```yaml # To run this example, first create the secret by running: @@ -429,6 +434,7 @@ spec: ``` ## Scripts & Results + Often, we just want a template that executes a script specified as a here-script (also known as a `here document`) in the workflow spec. This example shows how to do that: ```yaml @@ -655,6 +661,7 @@ spec: ``` We can even dynamically generate the list of items to iterate over! + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -787,7 +794,7 @@ spec: Here's the result of a couple of runs of coinflip for comparison. -``` +```sh argo get coinflip-recursive-tzcb5 STEP PODNAME MESSAGE @@ -811,6 +818,7 @@ STEP PODNAME MESSAGE └-·-✔ heads coinflip-recursive-tzcb5-4080323273 └-○ tails ``` + In the first run, the coin immediately comes up heads and we stop. In the second run, the coin comes up tail three times before it finally comes up heads and we stop. ## Exit handlers @@ -818,6 +826,7 @@ In the first run, the coin immediately comes up heads and we stop. In the second An exit handler is a template that *always* executes, irrespective of success or failure, at the end of the workflow. Some common use cases of exit handlers are: + - cleaning up after a workflow runs - sending notifications of workflow status (e.g., e-mail/Slack) - posting the pass/fail status to a webhook result (e.g. GitHub build result) @@ -871,6 +880,7 @@ spec: ``` ## Timeouts + To limit the elapsed time for a workflow, you can set the variable `activeDeadlineSeconds`. ```yaml @@ -893,6 +903,7 @@ spec: ## Volumes The following example dynamically creates a volume and then uses the volume in a two step workflow. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -1145,7 +1156,6 @@ spec: args: ["ls -l /src /bin/kubectl /s3"] ``` - ## Kubernetes Resources In many cases, you will want to manage Kubernetes resources from Argo workflows. The resource template allows you to create, delete or updated any type of Kubernetes resource. @@ -1163,7 +1173,7 @@ spec: templates: - name: pi-tmpl resource: # indicates that this is a resource template - action: create # can be any kubectl action (e.g. create, delete, apply, patch) + action: create # can be any kubectl action (e.g. create, delete, apply, patch) # The successCondition and failureCondition are optional expressions. # If failureCondition is true, the step is considered failed. # If successCondition is true, the step is considered successful. @@ -1258,7 +1268,8 @@ spec: mirrorVolumeMounts: true ``` -## Custom Template Variable Referrence +## Custom Template Variable Reference + In this example, we can see how we can use the other template language variable reference (E.g: Jinja) in Argo workflow template. Argo will validate and resolve only the variable that starts with Argo allowed prefix {***"item", "steps", "inputs", "outputs", "workflow", "tasks"***} @@ -1295,7 +1306,7 @@ spec: image: docker/whalesay command: [cowsay] args: ["{{user.username}}"] - + ``` ## Continuous Integration Example From c60010da29bd36c10c6e627802df6d6a06c1a59a Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Tue, 9 Apr 2019 14:53:23 -0700 Subject: [PATCH 112/145] Fix nil pointer dereference with secret volumes (#1314) --- workflow/controller/workflowpod.go | 43 +++++++++++++++--------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index add6018956fa..229973c2e7cc 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -818,42 +818,43 @@ func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMou func createArgoArtifactsRepoSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Volume, uniqueKeyMap map[string]bool) { if s3ArtRepo := tmpl.ArchiveLocation.S3; s3ArtRepo != nil { - createSecretVal(volMap, s3ArtRepo.AccessKeySecret, uniqueKeyMap) - createSecretVal(volMap, s3ArtRepo.SecretKeySecret, uniqueKeyMap) + createSecretVal(volMap, &s3ArtRepo.AccessKeySecret, uniqueKeyMap) + createSecretVal(volMap, &s3ArtRepo.SecretKeySecret, uniqueKeyMap) } else if hdfsArtRepo := tmpl.ArchiveLocation.HDFS; hdfsArtRepo != nil { - createSecretVal(volMap, *hdfsArtRepo.KrbKeytabSecret, uniqueKeyMap) - createSecretVal(volMap, *hdfsArtRepo.KrbCCacheSecret, uniqueKeyMap) + createSecretVal(volMap, hdfsArtRepo.KrbKeytabSecret, uniqueKeyMap) + createSecretVal(volMap, hdfsArtRepo.KrbCCacheSecret, uniqueKeyMap) } else if artRepo := tmpl.ArchiveLocation.Artifactory; artRepo != nil { - createSecretVal(volMap, *artRepo.UsernameSecret, uniqueKeyMap) - createSecretVal(volMap, *artRepo.PasswordSecret, uniqueKeyMap) + createSecretVal(volMap, artRepo.UsernameSecret, uniqueKeyMap) + createSecretVal(volMap, artRepo.PasswordSecret, uniqueKeyMap) } else if gitRepo := tmpl.ArchiveLocation.Git; gitRepo != nil { - createSecretVal(volMap, *gitRepo.UsernameSecret, uniqueKeyMap) - createSecretVal(volMap, *gitRepo.PasswordSecret, uniqueKeyMap) - createSecretVal(volMap, *gitRepo.SSHPrivateKeySecret, uniqueKeyMap) + createSecretVal(volMap, gitRepo.UsernameSecret, uniqueKeyMap) + createSecretVal(volMap, gitRepo.PasswordSecret, uniqueKeyMap) + createSecretVal(volMap, gitRepo.SSHPrivateKeySecret, uniqueKeyMap) } } func createSecretVolume(volMap map[string]apiv1.Volume, art wfv1.Artifact, keyMap map[string]bool) { - if art.S3 != nil { - createSecretVal(volMap, art.S3.AccessKeySecret, keyMap) - createSecretVal(volMap, art.S3.SecretKeySecret, keyMap) + createSecretVal(volMap, &art.S3.AccessKeySecret, keyMap) + createSecretVal(volMap, &art.S3.SecretKeySecret, keyMap) } else if art.Git != nil { - createSecretVal(volMap, *art.Git.UsernameSecret, keyMap) - createSecretVal(volMap, *art.Git.PasswordSecret, keyMap) - createSecretVal(volMap, *art.Git.SSHPrivateKeySecret, keyMap) + createSecretVal(volMap, art.Git.UsernameSecret, keyMap) + createSecretVal(volMap, art.Git.PasswordSecret, keyMap) + createSecretVal(volMap, art.Git.SSHPrivateKeySecret, keyMap) } else if art.Artifactory != nil { - createSecretVal(volMap, *art.Artifactory.UsernameSecret, keyMap) - createSecretVal(volMap, *art.Artifactory.PasswordSecret, keyMap) + createSecretVal(volMap, art.Artifactory.UsernameSecret, keyMap) + createSecretVal(volMap, art.Artifactory.PasswordSecret, keyMap) } else if art.HDFS != nil { - createSecretVal(volMap, *art.HDFS.KrbCCacheSecret, keyMap) - createSecretVal(volMap, *art.HDFS.KrbKeytabSecret, keyMap) - + createSecretVal(volMap, art.HDFS.KrbCCacheSecret, keyMap) + createSecretVal(volMap, art.HDFS.KrbKeytabSecret, keyMap) } } -func createSecretVal(volMap map[string]apiv1.Volume, secret apiv1.SecretKeySelector, keyMap map[string]bool) { +func createSecretVal(volMap map[string]apiv1.Volume, secret *apiv1.SecretKeySelector, keyMap map[string]bool) { + if secret == nil { + return + } if vol, ok := volMap[secret.Name]; ok { key := apiv1.KeyToPath{ Key: secret.Key, From 02550be31e53da79f1f4dbebda3ede7dc1052086 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Tue, 9 Apr 2019 18:12:28 -0700 Subject: [PATCH 113/145] Archive location should conditionally be added to template only when needed --- workflow/controller/workflowpod.go | 63 +++++++++++++------------ workflow/controller/workflowpod_test.go | 59 ++++++++++++++++++++++- 2 files changed, 92 insertions(+), 30 deletions(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 229973c2e7cc..46f6f13543e0 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -622,31 +622,42 @@ func (woc *wfOperationCtx) addInputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.T return nil } -// addArchiveLocation updates the template with the default artifact repository information -// configured in the controller. This is skipped for templates which have explicitly set an archive -// location in the template. +// addArchiveLocation conditionally updates the template with the default artifact repository +// information configured in the controller, for the purposes of archiving outputs. This is skipped +// for templates which do not need to archive anything, or have explicitly set an archive location +// in the template. func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Template) error { - if tmpl.ArchiveLocation == nil { - tmpl.ArchiveLocation = &wfv1.ArtifactLocation{ - ArchiveLogs: woc.controller.Config.ArtifactRepository.ArchiveLogs, + // needLocation keeps track if the workflow needs to have an archive location set. + // If so, and one was not supplied (or defaulted), we will return error + var needLocation bool + + if tmpl.ArchiveLocation != nil { + if tmpl.ArchiveLocation.S3 != nil || tmpl.ArchiveLocation.Artifactory != nil || tmpl.ArchiveLocation.HDFS != nil { + // User explicitly set the location. nothing else to do. + return nil + } + if tmpl.ArchiveLocation.ArchiveLogs != nil && *tmpl.ArchiveLocation.ArchiveLogs { + needLocation = true } } - if tmpl.ArchiveLocation.S3 != nil || tmpl.ArchiveLocation.Artifactory != nil || tmpl.ArchiveLocation.HDFS != nil { - // User explicitly set the location. nothing else to do. + for _, art := range tmpl.Outputs.Artifacts { + if !art.HasLocation() { + needLocation = true + break + } + } + if !needLocation { + woc.log.Debugf("archive location unecessary") return nil } - // needLocation keeps track if the workflow needs to have an archive location set. - // If so, and one was not supplied (or defaulted), we will return error - var needLocation bool - if tmpl.ArchiveLocation.ArchiveLogs != nil && *tmpl.ArchiveLocation.ArchiveLogs { - needLocation = true + tmpl.ArchiveLocation = &wfv1.ArtifactLocation{ + ArchiveLogs: woc.controller.Config.ArtifactRepository.ArchiveLogs, } - // artifact location is defaulted using the following formula: // //.tgz // (e.g. myworkflowartifacts/argo-wf-fhljp/argo-wf-fhljp-123291312382/src.tgz) if s3Location := woc.controller.Config.ArtifactRepository.S3; s3Location != nil { - log.Debugf("Setting s3 artifact repository information") + woc.log.Debugf("Setting s3 artifact repository information") artLocationKey := s3Location.KeyFormat // NOTE: we use unresolved variables, will get substituted later if artLocationKey == "" { @@ -657,7 +668,7 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat Key: artLocationKey, } } else if woc.controller.Config.ArtifactRepository.Artifactory != nil { - log.Debugf("Setting artifactory artifact repository information") + woc.log.Debugf("Setting artifactory artifact repository information") repoURL := "" if woc.controller.Config.ArtifactRepository.Artifactory.RepoURL != "" { repoURL = woc.controller.Config.ArtifactRepository.Artifactory.RepoURL + "/" @@ -668,22 +679,14 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat URL: artURL, } } else if hdfsLocation := woc.controller.Config.ArtifactRepository.HDFS; hdfsLocation != nil { - log.Debugf("Setting HDFS artifact repository information") + woc.log.Debugf("Setting HDFS artifact repository information") tmpl.ArchiveLocation.HDFS = &wfv1.HDFSArtifact{ HDFSConfig: hdfsLocation.HDFSConfig, Path: hdfsLocation.PathFormat, Force: hdfsLocation.Force, } } else { - for _, art := range tmpl.Outputs.Artifacts { - if !art.HasLocation() { - needLocation = true - break - } - } - if needLocation { - return errors.Errorf(errors.CodeBadRequest, "controller is not configured with a default archive location") - } + return errors.Errorf(errors.CodeBadRequest, "controller is not configured with a default archive location") } return nil } @@ -795,7 +798,7 @@ func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMou var secretVolumes []apiv1.Volume var secretVolMounts []apiv1.VolumeMount - createArgoArtifactsRepoSecret(tmpl, allVolumesMap, uniqueKeyMap) + createArchiveLocationSecret(tmpl, allVolumesMap, uniqueKeyMap) for _, art := range tmpl.Outputs.Artifacts { createSecretVolume(allVolumesMap, art, uniqueKeyMap) @@ -816,7 +819,10 @@ func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMou return secretVolumes, secretVolMounts } -func createArgoArtifactsRepoSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Volume, uniqueKeyMap map[string]bool) { +func createArchiveLocationSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Volume, uniqueKeyMap map[string]bool) { + if tmpl.ArchiveLocation == nil { + return + } if s3ArtRepo := tmpl.ArchiveLocation.S3; s3ArtRepo != nil { createSecretVal(volMap, &s3ArtRepo.AccessKeySecret, uniqueKeyMap) createSecretVal(volMap, &s3ArtRepo.SecretKeySecret, uniqueKeyMap) @@ -831,7 +837,6 @@ func createArgoArtifactsRepoSecret(tmpl *wfv1.Template, volMap map[string]apiv1. createSecretVal(volMap, gitRepo.PasswordSecret, uniqueKeyMap) createSecretVal(volMap, gitRepo.SSHPrivateKeySecret, uniqueKeyMap) } - } func createSecretVolume(volMap map[string]apiv1.Volume, art wfv1.Artifact, keyMap map[string]bool) { diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index f7c031e11d36..41f5bd12d495 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -1,6 +1,7 @@ package controller import ( + "encoding/json" "testing" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" @@ -181,7 +182,16 @@ func TestWorkflowControllerArchiveConfig(t *testing.T) { // TestWorkflowControllerArchiveConfigUnresolvable verifies workflow fails when archive location has // unresolvable variables func TestWorkflowControllerArchiveConfigUnresolvable(t *testing.T) { - woc := newWoc() + wf := unmarshalWF(helloWorldWf) + wf.Spec.Templates[0].Outputs = wfv1.Outputs{ + Artifacts: []wfv1.Artifact{ + { + Name: "foo", + Path: "/tmp/file", + }, + }, + } + woc := newWoc(*wf) woc.controller.Config.ArtifactRepository.S3 = &S3ArtifactRepository{ S3Bucket: wfv1.S3Bucket{ Bucket: "foo", @@ -194,6 +204,53 @@ func TestWorkflowControllerArchiveConfigUnresolvable(t *testing.T) { assert.Error(t, err) } +// TestConditionalNoAddArchiveLocation verifies we do not add archive location if it is not needed +func TestConditionalNoAddArchiveLocation(t *testing.T) { + woc := newWoc() + woc.controller.Config.ArtifactRepository.S3 = &S3ArtifactRepository{ + S3Bucket: wfv1.S3Bucket{ + Bucket: "foo", + }, + KeyFormat: "path/in/bucket", + } + woc.operate() + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.NoError(t, err) + var tmpl wfv1.Template + err = json.Unmarshal([]byte(pod.Annotations[common.AnnotationKeyTemplate]), &tmpl) + assert.NoError(t, err) + assert.Nil(t, tmpl.ArchiveLocation) +} + +// TestConditionalNoAddArchiveLocation verifies we add archive location when it is needed +func TestConditionalArchiveLocation(t *testing.T) { + wf := unmarshalWF(helloWorldWf) + wf.Spec.Templates[0].Outputs = wfv1.Outputs{ + Artifacts: []wfv1.Artifact{ + { + Name: "foo", + Path: "/tmp/file", + }, + }, + } + woc := newWoc() + woc.controller.Config.ArtifactRepository.S3 = &S3ArtifactRepository{ + S3Bucket: wfv1.S3Bucket{ + Bucket: "foo", + }, + KeyFormat: "path/in/bucket", + } + woc.operate() + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.NoError(t, err) + var tmpl wfv1.Template + err = json.Unmarshal([]byte(pod.Annotations[common.AnnotationKeyTemplate]), &tmpl) + assert.NoError(t, err) + assert.Nil(t, tmpl.ArchiveLocation) +} + // TestVolumeAndVolumeMounts verifies the ability to carry forward volumes and volumeMounts from workflow.spec func TestVolumeAndVolumeMounts(t *testing.T) { volumes := []apiv1.Volume{ From b4edfd30b0e3034d98e938b491cf5bd054b36525 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Tue, 9 Apr 2019 22:17:11 -0700 Subject: [PATCH 114/145] Fix SIGSEGV in watch/CheckAndDecompress. Consolidate duplicate code (resolves #1315) --- cmd/argo/commands/get.go | 20 ++------------------ cmd/argo/commands/list.go | 2 +- cmd/argo/commands/logs.go | 13 ++++++------- cmd/argo/commands/watch.go | 5 +++-- workflow/controller/controller.go | 8 ++++---- workflow/controller/operator.go | 19 ------------------- workflow/util/util.go | 17 +++++++++++++++++ 7 files changed, 33 insertions(+), 51 deletions(-) diff --git a/cmd/argo/commands/get.go b/cmd/argo/commands/get.go index b0badcb001ee..ef7b399887f0 100644 --- a/cmd/argo/commands/get.go +++ b/cmd/argo/commands/get.go @@ -8,9 +8,8 @@ import ( "strings" "text/tabwriter" - "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/util/file" + "github.com/argoproj/argo/workflow/util" "github.com/argoproj/pkg/humanize" "github.com/ghodss/yaml" "github.com/spf13/cobra" @@ -37,7 +36,7 @@ func NewGetCommand() *cobra.Command { if err != nil { log.Fatal(err) } - err = CheckAndDecompress(wf) + err = util.DecompressWorkflow(wf) if err != nil { log.Fatal(err) } @@ -50,21 +49,6 @@ func NewGetCommand() *cobra.Command { return command } -func CheckAndDecompress(wf *wfv1.Workflow) error { - if wf.Status.CompressedNodes != "" { - nodeContent, err := file.DecodeDecompressString(wf.Status.CompressedNodes) - if err != nil { - return errors.InternalWrapError(err) - } - err = json.Unmarshal([]byte(nodeContent), &wf.Status.Nodes) - if err != nil { - log.Fatal(err) - } - wf.Status.CompressedNodes = "" - } - return nil -} - func printWorkflow(wf *wfv1.Workflow, outFmt string) { switch outFmt { case "name": diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 9a0d04e3d6e9..081a04a37f31 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -134,7 +134,7 @@ func countPendingRunningCompleted(wf *wfv1.Workflow) (int, int, int) { pending := 0 running := 0 completed := 0 - err := CheckAndDecompress(wf) + err := util.DecompressWorkflow(wf) if err != nil { log.Fatal(err) } diff --git a/cmd/argo/commands/logs.go b/cmd/argo/commands/logs.go index cbc7a86c5db2..026ec7c8b221 100644 --- a/cmd/argo/commands/logs.go +++ b/cmd/argo/commands/logs.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "hash/fnv" - "math" "os" "strconv" @@ -13,6 +12,9 @@ import ( "sync" "time" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" @@ -23,11 +25,8 @@ import ( "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" workflowv1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/util" "github.com/argoproj/pkg/errors" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - v1 "k8s.io/api/core/v1" ) type logEntry struct { @@ -136,7 +135,7 @@ func (p *logPrinter) PrintPodLogs(podName string) error { // Prints logs for workflow pod steps and return most recent log timestamp per pod name func (p *logPrinter) printRecentWorkflowLogs(wf *v1alpha1.Workflow) map[string]*time.Time { var podNodes []v1alpha1.NodeStatus - err := CheckAndDecompress(wf) + err := util.DecompressWorkflow(wf) if err != nil { log.Warn(err) return nil @@ -198,7 +197,7 @@ func (p *logPrinter) printLiveWorkflowLogs(workflowName string, wfClient workflo defer cancel() processPods := func(wf *v1alpha1.Workflow) { - err := CheckAndDecompress(wf) + err := util.DecompressWorkflow(wf) if err != nil { log.Warn(err) return diff --git a/cmd/argo/commands/watch.go b/cmd/argo/commands/watch.go index abdcaaee5ae1..c1ca07f4396a 100644 --- a/cmd/argo/commands/watch.go +++ b/cmd/argo/commands/watch.go @@ -11,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/fields" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/util" ) func NewWatchCommand() *cobra.Command { @@ -45,8 +46,6 @@ func watchWorkflow(name string) { select { case next := <-watchIf.ResultChan(): wf, _ = next.Object.(*wfv1.Workflow) - err := CheckAndDecompress(wf) - errors.CheckError(err) case <-ticker.C: } if wf == nil { @@ -55,6 +54,8 @@ func watchWorkflow(name string) { errors.CheckError(err) continue } + err := util.DecompressWorkflow(wf) + errors.CheckError(err) print("\033[H\033[2J") print("\033[0;0H") printWorkflowHelper(wf, "") diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index d74036a4422d..adc98f6af220 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -243,12 +243,12 @@ func (wfc *WorkflowController) processNextItem() bool { } woc := newWorkflowOperationCtx(wf, wfc) - //Decompress the node if it is compressed - err = woc.checkAndDecompress() + // Decompress the node if it is compressed + err = util.DecompressWorkflow(woc.wf) if err != nil { - log.Warnf("Failed to decompress '%s' to workflow object: %v", key, err) - woc.markWorkflowFailed(fmt.Sprintf("invalid spec: %s", err.Error())) + woc.log.Warnf("workflow decompression failed: %v", err) + woc.markWorkflowFailed(fmt.Sprintf("workflow decompression failed: %s", err.Error())) woc.persistUpdates() wfc.throttler.Remove(key) return true diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 44e72be6340c..8cc5429fb069 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1672,22 +1672,3 @@ func (woc *wfOperationCtx) checkAndCompress() error { return nil } - -// checkAndDecompress will decompress the compressednode and assign to workflow.status.nodes map. -func (woc *wfOperationCtx) checkAndDecompress() error { - if woc.wf.Status.CompressedNodes != "" { - nodeContent, err := file.DecodeDecompressString(woc.wf.Status.CompressedNodes) - if err != nil { - return errors.InternalWrapError(err) - } - var tempNodes map[string]wfv1.NodeStatus - - err = json.Unmarshal([]byte(nodeContent), &tempNodes) - if err != nil { - woc.log.Warn(err) - return err - } - woc.wf.Status.Nodes = tempNodes - } - return nil -} diff --git a/workflow/util/util.go b/workflow/util/util.go index 156f32a365d8..fc25a6198662 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -32,6 +32,7 @@ import ( wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" cmdutil "github.com/argoproj/argo/util/cmd" + "github.com/argoproj/argo/util/file" "github.com/argoproj/argo/util/retry" unstructutil "github.com/argoproj/argo/util/unstructured" "github.com/argoproj/argo/workflow/common" @@ -525,3 +526,19 @@ func TerminateWorkflow(wfClient v1alpha1.WorkflowInterface, name string) error { } return err } + +// DecompressWorkflow decompresses the compressed status of a workflow (if compressed) +func DecompressWorkflow(wf *wfv1.Workflow) error { + if wf.Status.CompressedNodes != "" { + nodeContent, err := file.DecodeDecompressString(wf.Status.CompressedNodes) + if err != nil { + return errors.InternalWrapError(err) + } + err = json.Unmarshal([]byte(nodeContent), &wf.Status.Nodes) + if err != nil { + return err + } + wf.Status.CompressedNodes = "" + } + return nil +} From 75b28a37b923e278fc89fd647f78a42e7a3bf029 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 10 Apr 2019 00:53:12 -0700 Subject: [PATCH 115/145] Implement support for PNS (Process Namespace Sharing) executor (#1214) * Implements PNS (Process Namespace Sharing) executor * Adds limited support for Kubelet/K8s API artifact collection by mirroring volume mounts to wait sidecar * Adds validation to detect when output artifacts are not supported by the executor * Adds ability to customize executor from workflow-controller-configmap (e.g. add environment variables, append command line args such as loglevel) * Fixes an issue where daemon steps were not getting terminated properly --- CHANGELOG.md | 8 + Gopkg.lock | 27 +- cmd/argoexec/commands/init.go | 25 +- cmd/argoexec/commands/resource.go | 33 +- cmd/argoexec/commands/root.go | 109 ++--- cmd/argoexec/commands/wait.go | 34 +- cmd/argoexec/main.go | 2 +- cmd/workflow-controller/main.go | 16 +- demo.md | 9 +- docs/workflow-controller-configmap.yaml | 37 +- examples/artifact-disable-archive.yaml | 51 +++ examples/artifact-passing.yaml | 2 +- examples/ci-output-artifact.yaml | 7 +- examples/continue-on-fail.yaml | 36 ++ ...On-fail.yaml => dag-continue-on-fail.yaml} | 4 +- examples/global-outputs.yaml | 2 +- examples/output-parameter.yaml | 2 +- examples/parameter-aggregation-dag.yaml | 1 + examples/parameter-aggregation.yaml | 1 + examples/sidecar-dind.yaml | 2 +- examples/workflow-continueOn-fail.yaml | 67 --- pkg/apis/workflow/v1alpha1/types.go | 31 +- .../expectedfailures/disallow-unknown.json | 25 -- .../pns/pns-output-artifacts.yaml | 39 ++ .../pns/pns-quick-exit-output-art.yaml | 30 ++ .../functional/artifact-disable-archive.yaml | 50 +-- test/e2e/functional/continue-on-fail.yaml | 1 + test/e2e/functional/dag-argument-passing.yaml | 4 +- test/e2e/functional/global-outputs-dag.yaml | 2 +- .../functional/global-outputs-variable.yaml | 2 +- ...g-outputs.yaml => nested-dag-outputs.yaml} | 1 + .../functional/output-artifact-optional.yaml | 2 +- .../output-input-artifact-optional.yaml | 2 +- .../output-param-different-uid.yaml | 27 ++ test/e2e/functional/pns-output-params.yaml | 71 ++++ test/e2e/functional/retry-with-artifacts.yaml | 2 +- test/e2e/lintfail/disallow-unknown.yaml | 15 + .../invalid-spec.yaml | 0 .../malformed-spec.yaml} | 0 test/e2e/ui/ui-nested-steps.yaml | 12 +- util/archive/archive.go | 131 ++++++ util/archive/archive_test.go | 60 +++ workflow/artifacts/s3/s3.go | 24 +- workflow/common/common.go | 15 +- workflow/common/util.go | 162 +++++++- workflow/controller/config.go | 14 +- workflow/controller/exec_control.go | 8 +- workflow/controller/operator.go | 29 +- workflow/controller/operator_test.go | 22 +- workflow/controller/workflowpod.go | 187 +++++---- workflow/controller/workflowpod_test.go | 44 +- workflow/executor/common/common.go | 6 +- workflow/executor/docker/docker.go | 57 ++- workflow/executor/executor.go | 318 ++++++++++----- workflow/executor/k8sapi/k8sapi.go | 20 +- workflow/executor/kubelet/client.go | 86 +--- workflow/executor/kubelet/kubelet.go | 19 +- .../mocks/ContainerRuntimeExecutor.go | 44 +- workflow/executor/pns/pns.go | 385 ++++++++++++++++++ workflow/metrics/collector.go | 16 +- workflow/util/util.go | 6 +- workflow/validate/lint.go | 5 +- workflow/validate/validate.go | 78 +++- workflow/validate/validate_test.go | 148 ++++++- 64 files changed, 1946 insertions(+), 729 deletions(-) create mode 100644 examples/artifact-disable-archive.yaml create mode 100644 examples/continue-on-fail.yaml rename examples/{dag-continueOn-fail.yaml => dag-continue-on-fail.yaml} (93%) delete mode 100644 examples/workflow-continueOn-fail.yaml delete mode 100644 test/e2e/expectedfailures/disallow-unknown.json create mode 100644 test/e2e/expectedfailures/pns/pns-output-artifacts.yaml create mode 100644 test/e2e/expectedfailures/pns/pns-quick-exit-output-art.yaml mode change 100644 => 120000 test/e2e/functional/artifact-disable-archive.yaml create mode 120000 test/e2e/functional/continue-on-fail.yaml rename test/e2e/functional/{dag-outputs.yaml => nested-dag-outputs.yaml} (99%) create mode 100644 test/e2e/functional/output-param-different-uid.yaml create mode 100644 test/e2e/functional/pns-output-params.yaml create mode 100644 test/e2e/lintfail/disallow-unknown.yaml rename test/e2e/{expectedfailures => lintfail}/invalid-spec.yaml (100%) rename test/e2e/{expectedfailures/maformed-spec.yaml => lintfail/malformed-spec.yaml} (100%) create mode 100644 util/archive/archive.go create mode 100644 util/archive/archive_test.go create mode 100644 workflow/executor/pns/pns.go diff --git a/CHANGELOG.md b/CHANGELOG.md index db4a421e05e1..6ecd34f6c15b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 2.3.0 (Not Yet Released) + +### Deprecation Notice +The workflow-controller-configmap introduces a new config field, `executor`, which is a container +spec and provides controls over the executor sidecar container (i.e. `init`/`wait`). The fields +`executorImage`, `executorResources`, and `executorImagePullPolicy` are deprecated and will be +removed in a future release. + ## 2.2.1 (2018-10-18) ### Changelog since v2.2.0 diff --git a/Gopkg.lock b/Gopkg.lock index ec3e23ccef9e..8c9207807811 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -46,10 +46,12 @@ [[projects]] branch = "master" - digest = "1:c3b7ed058146643b16d3a9827550fba317dbff9f55249dfafac7eb6c3652ad23" + digest = "1:72347a6143ccb58245c6f8055662ae6cb2d5dd655699f0fc479c25cc610fc582" name = "github.com/argoproj/pkg" packages = [ + "cli", "errors", + "exec", "file", "humanize", "json", @@ -61,7 +63,7 @@ "time", ] pruneopts = "" - revision = "a581a48d63014312c4f2762787f669e46bdb1fd9" + revision = "7e3ef65c8d44303738c7e815bd9b1b297b39f5c8" [[projects]] branch = "master" @@ -392,6 +394,14 @@ pruneopts = "" revision = "58046073cbffe2f25d425fe1331102f55cf719de" +[[projects]] + branch = "master" + digest = "1:1dee6133ab829c8559a39031ad1e0e3538e4a7b34d3e0509d1fc247737e928c1" + name = "github.com/mitchellh/go-ps" + packages = ["."] + pruneopts = "" + revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" + [[projects]] digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd" name = "github.com/modern-go/concurrent" @@ -1143,12 +1153,22 @@ pruneopts = "" revision = "e3762e86a74c878ffed47484592986685639c2cd" +[[projects]] + branch = "master" + digest = "1:f6c19347011ba9a072aa55f5c7fa630c0b88303ac4ca83008454aef95b0c2078" + name = "k8s.io/utils" + packages = ["pointer"] + pruneopts = "" + revision = "21c4ce38f2a793ec01e925ddc31216500183b773" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ "github.com/Knetic/govaluate", + "github.com/argoproj/pkg/cli", "github.com/argoproj/pkg/errors", + "github.com/argoproj/pkg/exec", "github.com/argoproj/pkg/file", "github.com/argoproj/pkg/humanize", "github.com/argoproj/pkg/json", @@ -1162,8 +1182,8 @@ "github.com/evanphx/json-patch", "github.com/ghodss/yaml", "github.com/go-openapi/spec", - "github.com/golang/glog", "github.com/gorilla/websocket", + "github.com/mitchellh/go-ps", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", @@ -1222,6 +1242,7 @@ "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", "k8s.io/kube-openapi/pkg/common", + "k8s.io/utils/pointer", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/cmd/argoexec/commands/init.go b/cmd/argoexec/commands/init.go index c581a240496c..270cada7e52f 100644 --- a/cmd/argoexec/commands/init.go +++ b/cmd/argoexec/commands/init.go @@ -6,19 +6,18 @@ import ( "github.com/spf13/cobra" ) -func init() { - RootCmd.AddCommand(initCmd) -} - -var initCmd = &cobra.Command{ - Use: "init", - Short: "Load artifacts", - Run: func(cmd *cobra.Command, args []string) { - err := loadArtifacts() - if err != nil { - log.Fatalf("%+v", err) - } - }, +func NewInitCommand() *cobra.Command { + var command = cobra.Command{ + Use: "init", + Short: "Load artifacts", + Run: func(cmd *cobra.Command, args []string) { + err := loadArtifacts() + if err != nil { + log.Fatalf("%+v", err) + } + }, + } + return &command } func loadArtifacts() error { diff --git a/cmd/argoexec/commands/resource.go b/cmd/argoexec/commands/resource.go index d83c767ab18a..d024e1683ae9 100644 --- a/cmd/argoexec/commands/resource.go +++ b/cmd/argoexec/commands/resource.go @@ -9,23 +9,22 @@ import ( "github.com/spf13/cobra" ) -func init() { - RootCmd.AddCommand(resourceCmd) -} - -var resourceCmd = &cobra.Command{ - Use: "resource (get|create|apply|delete) MANIFEST", - Short: "update a resource and wait for resource conditions", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cmd.HelpFunc()(cmd, args) - os.Exit(1) - } - err := execResource(args[0]) - if err != nil { - log.Fatalf("%+v", err) - } - }, +func NewResourceCommand() *cobra.Command { + var command = cobra.Command{ + Use: "resource (get|create|apply|delete) MANIFEST", + Short: "update a resource and wait for resource conditions", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cmd.HelpFunc()(cmd, args) + os.Exit(1) + } + err := execResource(args[0]) + if err != nil { + log.Fatalf("%+v", err) + } + }, + } + return &command } func execResource(action string) error { diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go index c53dadcd2e1d..60405a8eb312 100644 --- a/cmd/argoexec/commands/root.go +++ b/cmd/argoexec/commands/root.go @@ -1,10 +1,11 @@ package commands import ( + "encoding/json" "os" - "github.com/argoproj/pkg/kube/cli" - "github.com/ghodss/yaml" + "github.com/argoproj/pkg/cli" + kubecli "github.com/argoproj/pkg/kube/cli" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -17,6 +18,7 @@ import ( "github.com/argoproj/argo/workflow/executor/docker" "github.com/argoproj/argo/workflow/executor/k8sapi" "github.com/argoproj/argo/workflow/executor/kubelet" + "github.com/argoproj/argo/workflow/executor/pns" ) const ( @@ -25,83 +27,82 @@ const ( ) var ( - // GlobalArgs hold global CLI flags - GlobalArgs globalFlags - - clientConfig clientcmd.ClientConfig -) - -type globalFlags struct { + clientConfig clientcmd.ClientConfig + logLevel string // --loglevel + glogLevel int // --gloglevel podAnnotationsPath string // --pod-annotations -} +) func init() { - clientConfig = cli.AddKubectlFlagsToCmd(RootCmd) - RootCmd.PersistentFlags().StringVar(&GlobalArgs.podAnnotationsPath, "pod-annotations", common.PodMetadataAnnotationsPath, "Pod annotations file from k8s downward API") - RootCmd.AddCommand(cmd.NewVersionCmd(CLIName)) + cobra.OnInitialize(initConfig) } -// RootCmd is the argo root level command -var RootCmd = &cobra.Command{ - Use: CLIName, - Short: "argoexec is the executor sidecar to workflow containers", - Run: func(cmd *cobra.Command, args []string) { - cmd.HelpFunc()(cmd, args) - }, +func initConfig() { + cli.SetLogLevel(logLevel) + cli.SetGLogLevel(glogLevel) } -func initExecutor() *executor.WorkflowExecutor { - podAnnotationsPath := common.PodMetadataAnnotationsPath - - // Use the path specified from the flag - if GlobalArgs.podAnnotationsPath != "" { - podAnnotationsPath = GlobalArgs.podAnnotationsPath +func NewRootCommand() *cobra.Command { + var command = cobra.Command{ + Use: CLIName, + Short: "argoexec is the executor sidecar to workflow containers", + Run: func(cmd *cobra.Command, args []string) { + cmd.HelpFunc()(cmd, args) + }, } + command.AddCommand(NewInitCommand()) + command.AddCommand(NewResourceCommand()) + command.AddCommand(NewWaitCommand()) + command.AddCommand(cmd.NewVersionCmd(CLIName)) + + clientConfig = kubecli.AddKubectlFlagsToCmd(&command) + command.PersistentFlags().StringVar(&podAnnotationsPath, "pod-annotations", common.PodMetadataAnnotationsPath, "Pod annotations file from k8s downward API") + command.PersistentFlags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.PersistentFlags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level") + + return &command +} + +func initExecutor() *executor.WorkflowExecutor { config, err := clientConfig.ClientConfig() - if err != nil { - panic(err.Error()) - } + checkErr(err) + namespace, _, err := clientConfig.Namespace() - if err != nil { - panic(err.Error()) - } + checkErr(err) clientset, err := kubernetes.NewForConfig(config) - if err != nil { - panic(err.Error()) - } - podName, ok := os.LookupEnv(common.EnvVarPodName) - if !ok { - log.Fatalf("Unable to determine pod name from environment variable %s", common.EnvVarPodName) - } + checkErr(err) + + podName, err := os.Hostname() + checkErr(err) + + tmpl, err := executor.LoadTemplate(podAnnotationsPath) + checkErr(err) var cre executor.ContainerRuntimeExecutor switch os.Getenv(common.EnvVarContainerRuntimeExecutor) { case common.ContainerRuntimeExecutorK8sAPI: cre, err = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace) - if err != nil { - panic(err.Error()) - } case common.ContainerRuntimeExecutorKubelet: cre, err = kubelet.NewKubeletExecutor() - if err != nil { - panic(err.Error()) - } + case common.ContainerRuntimeExecutorPNS: + cre, err = pns.NewPNSExecutor(clientset, podName, namespace, tmpl.Outputs.HasOutputs()) default: cre, err = docker.NewDockerExecutor() - if err != nil { - panic(err.Error()) - } - } - wfExecutor := executor.NewExecutor(clientset, podName, namespace, podAnnotationsPath, cre) - err = wfExecutor.LoadTemplate() - if err != nil { - panic(err.Error()) } + checkErr(err) - yamlBytes, _ := yaml.Marshal(&wfExecutor.Template) + wfExecutor := executor.NewExecutor(clientset, podName, namespace, podAnnotationsPath, cre, *tmpl) + yamlBytes, _ := json.Marshal(&wfExecutor.Template) vers := argo.GetVersion() log.Infof("Executor (version: %s, build_date: %s) initialized with template:\n%s", vers, vers.BuildDate, string(yamlBytes)) return &wfExecutor } + +// checkErr is a convenience function to panic upon error +func checkErr(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/cmd/argoexec/commands/wait.go b/cmd/argoexec/commands/wait.go index 32d9b1114579..1e933f29fdcf 100644 --- a/cmd/argoexec/commands/wait.go +++ b/cmd/argoexec/commands/wait.go @@ -8,19 +8,18 @@ import ( "github.com/spf13/cobra" ) -func init() { - RootCmd.AddCommand(waitCmd) -} - -var waitCmd = &cobra.Command{ - Use: "wait", - Short: "wait for main container to finish and save artifacts", - Run: func(cmd *cobra.Command, args []string) { - err := waitContainer() - if err != nil { - log.Fatalf("%+v", err) - } - }, +func NewWaitCommand() *cobra.Command { + var command = cobra.Command{ + Use: "wait", + Short: "wait for main container to finish and save artifacts", + Run: func(cmd *cobra.Command, args []string) { + err := waitContainer() + if err != nil { + log.Fatalf("%+v", err) + } + }, + } + return &command } func waitContainer() error { @@ -29,11 +28,16 @@ func waitContainer() error { defer stats.LogStats() stats.StartStatsTicker(5 * time.Minute) - // Wait for main container to complete and kill sidecars + // Wait for main container to complete err := wfExecutor.Wait() if err != nil { wfExecutor.AddError(err) - // do not return here so we can still try to save outputs + // do not return here so we can still try to kill sidecars & save outputs + } + err = wfExecutor.KillSidecars() + if err != nil { + wfExecutor.AddError(err) + // do not return here so we can still try save outputs } logArt, err := wfExecutor.SaveLogs() if err != nil { diff --git a/cmd/argoexec/main.go b/cmd/argoexec/main.go index 629e1b0806fd..99c6b4d30694 100644 --- a/cmd/argoexec/main.go +++ b/cmd/argoexec/main.go @@ -14,7 +14,7 @@ import ( ) func main() { - if err := commands.RootCmd.Execute(); err != nil { + if err := commands.NewRootCommand().Execute(); err != nil { fmt.Println(err) os.Exit(1) } diff --git a/cmd/workflow-controller/main.go b/cmd/workflow-controller/main.go index f881739dbf23..141615c9fcd5 100644 --- a/cmd/workflow-controller/main.go +++ b/cmd/workflow-controller/main.go @@ -2,13 +2,12 @@ package main import ( "context" - "flag" "fmt" "os" - "strconv" "time" - "github.com/argoproj/pkg/kube/cli" + "github.com/argoproj/pkg/cli" + kubecli "github.com/argoproj/pkg/kube/cli" "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -44,16 +43,11 @@ func NewRootCommand() *cobra.Command { Use: CLIName, Short: "workflow-controller is the controller to operate on workflows", RunE: func(c *cobra.Command, args []string) error { - - cmdutil.SetLogLevel(logLevel) + cli.SetLogLevel(logLevel) + cli.SetGLogLevel(glogLevel) stats.RegisterStackDumper() stats.StartStatsTicker(5 * time.Minute) - // Set the glog level for the k8s go-client - _ = flag.CommandLine.Parse([]string{}) - _ = flag.Lookup("logtostderr").Value.Set("true") - _ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel)) - config, err := clientConfig.ClientConfig() if err != nil { return err @@ -89,7 +83,7 @@ func NewRootCommand() *cobra.Command { }, } - clientConfig = cli.AddKubectlFlagsToCmd(&command) + clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") command.Flags().StringVar(&executorImage, "executor-image", "", "Executor image to use (overrides value in configmap)") diff --git a/demo.md b/demo.md index 63555f63f7ae..20d614797336 100644 --- a/demo.md +++ b/demo.md @@ -75,9 +75,12 @@ Argo supports S3 (AWS, GCS, Minio) as well as Artifactory as artifact repositori uses Minio for the sake of portability. Instructions on how to configure other artifact repositories are [here](https://github.com/argoproj/argo/blob/master/ARTIFACT_REPO.md). ``` -brew install kubernetes-helm # mac -helm init -helm install stable/minio --name argo-artifacts --set service.type=LoadBalancer --set persistence.enabled=false +helm install stable/minio \ + --name argo-artifacts \ + --set service.type=LoadBalancer \ + --set defaultBucket.enabled=true \ + --set defaultBucket.name=my-bucket \ + --set persistence.enabled=false ``` Login to the Minio UI using a web browser (port 9000) after exposing obtaining the external IP using `kubectl`. diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index 3e27ea2c1c70..d82b8c42a62a 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -49,6 +49,8 @@ data: endpoint: s3.amazonaws.com bucket: my-bucket region: us-west-2 + # insecure will disable TLS. Primarily used for minio installs not configured with TLS + insecure: false # keyFormat is a format pattern to define how artifacts will be organized in a bucket. # It can reference workflow metadata variables such as workflow.namespace, workflow.name, # pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow @@ -63,10 +65,8 @@ data: /{{workflow.creationTimestamp.d}}\ /{{workflow.name}}\ /{{pod.name}}" - # insecure will disable TLS. used for minio installs not configured with TLS - insecure: false # The actual secret object (in this example my-s3-credentials), should be created in every - # namespace which a workflow wants which wants to store its artifacts to S3. If omitted, + # namespace where a workflow needs to store its artifacts to S3. If omitted, # attempts to use IAM role to access the bucket (instead of accessKey/secretKey). accessKeySecret: name: my-s3-credentials @@ -84,16 +84,27 @@ data: # disable the TLS verification of the kubelet executor (default: false) kubeletInsecure: false - # executorResources specifies the resource requirements that will be used for the executor - # sidecar/init container. This is useful in clusters which require resources to be specified as - # part of admission control. - executorResources: - requests: - cpu: 0.1 - memory: 64Mi - limits: - cpu: 0.5 - memory: 512Mi + # executor controls how the init and wait container should be customized + # (available since Argo v2.3) + executor: + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 0.1 + memory: 64Mi + limits: + cpu: 0.5 + memory: 512Mi + # args & env allows command line arguments and environment variables to be appended to the + # executor container and is mainly used for development/debugging purposes. + args: + - --loglevel + - debug + - --gloglevel + - "6" + env: + - name: DEBUG_FLAG + value: "1" # metricsConfig controls the path and port for prometheus metrics metricsConfig: diff --git a/examples/artifact-disable-archive.yaml b/examples/artifact-disable-archive.yaml new file mode 100644 index 000000000000..444b01ac5b53 --- /dev/null +++ b/examples/artifact-disable-archive.yaml @@ -0,0 +1,51 @@ +# This example demonstrates the ability to disable the default behavior of archiving (tar.gz) +# when saving output artifacts. For directories, when archive is set to none, files in directory +# will be copied recursively in the case of S3. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: artifact-disable-archive- +spec: + entrypoint: artifact-disable-archive + templates: + - name: artifact-disable-archive + steps: + - - name: generate-artifact + template: whalesay + - - name: consume-artifact + template: print-message + arguments: + artifacts: + - name: etc + from: "{{steps.generate-artifact.outputs.artifacts.etc}}" + - name: hello-txt + from: "{{steps.generate-artifact.outputs.artifacts.hello-txt}}" + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["cowsay hello world | tee /tmp/hello_world.txt ; sleep 1"] + outputs: + artifacts: + - name: etc + path: /etc + archive: + none: {} + - name: hello-txt + path: /tmp/hello_world.txt + archive: + none: {} + + - name: print-message + inputs: + artifacts: + - name: etc + path: /tmp/etc + - name: hello-txt + path: /tmp/hello.txt + container: + image: alpine:latest + command: [sh, -c] + args: + - cat /tmp/hello.txt && cd /tmp/etc && find . diff --git a/examples/artifact-passing.yaml b/examples/artifact-passing.yaml index dd301b9ac116..90fdeacd3728 100644 --- a/examples/artifact-passing.yaml +++ b/examples/artifact-passing.yaml @@ -22,7 +22,7 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["cowsay hello world | tee /tmp/hello_world.txt"] + args: ["sleep 1; cowsay hello world | tee /tmp/hello_world.txt"] outputs: artifacts: - name: hello-art diff --git a/examples/ci-output-artifact.yaml b/examples/ci-output-artifact.yaml index fababef1bb13..591fec3cea39 100644 --- a/examples/ci-output-artifact.yaml +++ b/examples/ci-output-artifact.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: ci-example- + generateName: ci-output-artifact- spec: entrypoint: ci-example # a temporary volume, named workdir, will be used as a working @@ -75,7 +75,10 @@ spec: - name: release-artifact container: - image: debian:9.4 + image: alpine:3.8 + volumeMounts: + - name: workdir + mountPath: /go outputs: artifacts: - name: release diff --git a/examples/continue-on-fail.yaml b/examples/continue-on-fail.yaml new file mode 100644 index 000000000000..7681e99c597f --- /dev/null +++ b/examples/continue-on-fail.yaml @@ -0,0 +1,36 @@ +# Example on specifying parallelism on the outer workflow and limiting the number of its +# children workflowss to be run at the same time. +# +# If the parallelism of A is 1, the four steps of seq-step will run sequentially. + +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: continue-on-fail- +spec: + entrypoint: workflow-ignore + templates: + - name: workflow-ignore + steps: + - - name: A + template: whalesay + - - name: B + template: whalesay + - name: C + template: intentional-fail + continueOn: + failed: true + - - name: D + template: whalesay + + - name: whalesay + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] + + - name: intentional-fail + container: + image: alpine:latest + command: [sh, -c] + args: ["echo intentional failure; exit 1"] diff --git a/examples/dag-continueOn-fail.yaml b/examples/dag-continue-on-fail.yaml similarity index 93% rename from examples/dag-continueOn-fail.yaml rename to examples/dag-continue-on-fail.yaml index 2bb2f78b893b..dc9600babb52 100644 --- a/examples/dag-continueOn-fail.yaml +++ b/examples/dag-continue-on-fail.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: dag-contiueOn-fail- + generateName: dag-contiue-on-fail- spec: entrypoint: workflow templates: @@ -14,7 +14,7 @@ spec: dependencies: [A] template: intentional-fail continueOn: - failed: true + failed: true - name: C dependencies: [A] template: whalesay diff --git a/examples/global-outputs.yaml b/examples/global-outputs.yaml index e621b7c1fb5f..f2a270aa6141 100644 --- a/examples/global-outputs.yaml +++ b/examples/global-outputs.yaml @@ -19,7 +19,7 @@ spec: container: image: alpine:3.7 command: [sh, -c] - args: ["echo -n hello world > /tmp/hello_world.txt"] + args: ["sleep 1; echo -n hello world > /tmp/hello_world.txt"] outputs: parameters: # export a global parameter. The parameter will be programatically available in the completed diff --git a/examples/output-parameter.yaml b/examples/output-parameter.yaml index c9ccf686955f..d15f30f466ce 100644 --- a/examples/output-parameter.yaml +++ b/examples/output-parameter.yaml @@ -32,7 +32,7 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["echo -n hello world > /tmp/hello_world.txt"] + args: ["sleep 1; echo -n hello world > /tmp/hello_world.txt"] outputs: parameters: - name: hello-param diff --git a/examples/parameter-aggregation-dag.yaml b/examples/parameter-aggregation-dag.yaml index 49bc3bc6a24f..3a534e153bf0 100644 --- a/examples/parameter-aggregation-dag.yaml +++ b/examples/parameter-aggregation-dag.yaml @@ -49,6 +49,7 @@ spec: command: [sh, -xc] args: - | + sleep 1 && echo {{inputs.parameters.num}} > /tmp/num && if [ $(({{inputs.parameters.num}}%2)) -eq 0 ]; then echo "even" > /tmp/even; diff --git a/examples/parameter-aggregation.yaml b/examples/parameter-aggregation.yaml index f7df1f7f053a..4baec52e8af1 100644 --- a/examples/parameter-aggregation.yaml +++ b/examples/parameter-aggregation.yaml @@ -46,6 +46,7 @@ spec: command: [sh, -xc] args: - | + sleep 1 && echo {{inputs.parameters.num}} > /tmp/num && if [ $(({{inputs.parameters.num}}%2)) -eq 0 ]; then echo "even" > /tmp/even; diff --git a/examples/sidecar-dind.yaml b/examples/sidecar-dind.yaml index 467bb101dade..7bf8b67998c6 100644 --- a/examples/sidecar-dind.yaml +++ b/examples/sidecar-dind.yaml @@ -19,7 +19,7 @@ spec: value: 127.0.0.1 sidecars: - name: dind - image: docker:17.10-dind + image: docker:18.09.4-dind securityContext: privileged: true # mirrorVolumeMounts will mount the same volumes specified in the main container diff --git a/examples/workflow-continueOn-fail.yaml b/examples/workflow-continueOn-fail.yaml deleted file mode 100644 index 83a1442a617d..000000000000 --- a/examples/workflow-continueOn-fail.yaml +++ /dev/null @@ -1,67 +0,0 @@ -# Example on specifying parallelism on the outer workflow and limiting the number of its -# children workflowss to be run at the same time. -# -# As the parallelism of A is 1, the four steps of seq-step will run sequentially. - -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-continueOn-fail- -spec: - entrypoint: workflow - templates: - - name: workflow - steps: - - - name: wf-ignore - template: workflow-ignore - - name: wf-not-ignore - template: workflow-not-ignore - - - name: workflow-ignore - steps: - - - name: A - template: whalesay - - - name: B - template: whalesay - - name: C - template: intentional-fail - continueOn: - failed: true - - - name: D - template: whalesay - - - name: workflow-not-ignore - steps: - - - name: E - template: whalesay - - - name: F - template: whalesay - - name: G - template: intentional-fail - - - name: H - template: whalesay - - # - name: B - # inputs: - # parameters: - # - name: seq-id - # steps: - # - - name: jobs - # template: one-job - # arguments: - # parameters: - # - name: seq-id - # value: "{{inputs.parameters.seq-id}}" - # withParam: "[1, 2]" - - - name: whalesay - container: - image: docker/whalesay:latest - command: [cowsay] - args: ["hello world"] - - - name: intentional-fail - container: - image: alpine:latest - command: [sh, -c] - args: ["echo intentional failure; exit 1"] diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 3bfc57f8eb80..855b3ad51473 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -665,6 +665,10 @@ func (s *S3Artifact) String() string { return fmt.Sprintf("%s://%s/%s/%s", protocol, s.Endpoint, s.Bucket, s.Key) } +func (s *S3Artifact) HasLocation() bool { + return s != nil && s.Bucket != "" +} + // GitArtifact is the location of an git artifact type GitArtifact struct { // Repo is the git repository @@ -686,6 +690,10 @@ type GitArtifact struct { InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty"` } +func (g *GitArtifact) HasLocation() bool { + return g != nil && g.Repo != "" +} + // ArtifactoryAuth describes the secret selectors required for authenticating to artifactory type ArtifactoryAuth struct { // UsernameSecret is the secret selector to the repository username @@ -706,6 +714,10 @@ func (a *ArtifactoryArtifact) String() string { return a.URL } +func (a *ArtifactoryArtifact) HasLocation() bool { + return a != nil && a.URL != "" +} + // HDFSArtifact is the location of an HDFS artifact type HDFSArtifact struct { HDFSConfig `json:",inline"` @@ -717,6 +729,10 @@ type HDFSArtifact struct { Force bool `json:"force,omitempty"` } +func (h *HDFSArtifact) HasLocation() bool { + return h != nil && len(h.Addresses) > 0 +} + // HDFSConfig is configurations for HDFS type HDFSConfig struct { HDFSKrbConfig `json:",inline"` @@ -774,12 +790,20 @@ type RawArtifact struct { Data string `json:"data"` } +func (r *RawArtifact) HasLocation() bool { + return r != nil +} + // HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container type HTTPArtifact struct { // URL of the artifact URL string `json:"url"` } +func (h *HTTPArtifact) HasLocation() bool { + return h != nil && h.URL != "" +} + // ScriptTemplate is a template subtype to enable scripting through code steps type ScriptTemplate struct { apiv1.Container `json:",inline"` @@ -963,7 +987,12 @@ func (args *Arguments) GetParameterByName(name string) *Parameter { // HasLocation whether or not an artifact has a location defined func (a *Artifact) HasLocation() bool { - return a.S3 != nil || a.Git != nil || a.HTTP != nil || a.Artifactory != nil || a.Raw != nil || a.HDFS != nil + return a.S3.HasLocation() || + a.Git.HasLocation() || + a.HTTP.HasLocation() || + a.Artifactory.HasLocation() || + a.Raw.HasLocation() || + a.HDFS.HasLocation() } // GetTemplate retrieves a defined template by its name diff --git a/test/e2e/expectedfailures/disallow-unknown.json b/test/e2e/expectedfailures/disallow-unknown.json deleted file mode 100644 index 659d97d24dec..000000000000 --- a/test/e2e/expectedfailures/disallow-unknown.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "apiVersion": "argoproj.io/v1alpha1", - "kind": "Workflow", - "metadata": { - "generateName": "hello-world-" - }, - "spec": { - "entrypoint": "whalesay", - "templates": [ - { - "name": "whalesay", - "container": { - "image": "docker/whalesay:latest", - "command": [ - "cowsay" - ], - "args": [ - "hello world" - ], - "someExtraField": "foo" - } - } - ] - } -} diff --git a/test/e2e/expectedfailures/pns/pns-output-artifacts.yaml b/test/e2e/expectedfailures/pns/pns-output-artifacts.yaml new file mode 100644 index 000000000000..9680ef096507 --- /dev/null +++ b/test/e2e/expectedfailures/pns/pns-output-artifacts.yaml @@ -0,0 +1,39 @@ +# Workflow specifically designed for testing process namespace sharing with output artifacts +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: pns-output-artifacts- +spec: + entrypoint: pns-output-artifacts + templates: + - name: pns-output-artifacts + archiveLocation: + archiveLogs: true + container: + image: debian:9.2 + command: [sh, -c] + args: [" + echo hello world > /mnt/workdir/foo && + echo stdout && + echo '' && + echo stderr >&2 && + sleep 1 + "] + volumeMounts: + - name: workdir + mountPath: /mnt/workdir + outputs: + artifacts: + - name: etc + path: /etc + - name: mnt + path: /mnt + - name: workdir + path: /mnt/workdir + sidecars: + - name: nginx + image: nginx:latest + + volumes: + - name: workdir + emptyDir: {} diff --git a/test/e2e/expectedfailures/pns/pns-quick-exit-output-art.yaml b/test/e2e/expectedfailures/pns/pns-quick-exit-output-art.yaml new file mode 100644 index 000000000000..286a82846e26 --- /dev/null +++ b/test/e2e/expectedfailures/pns/pns-quick-exit-output-art.yaml @@ -0,0 +1,30 @@ +# Workflow specifically designed for testing process namespace sharing with output artifacts +# This fails because the main container exits before the wait sidecar is able to establish the file +# handle of the main container's root filesystem. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: pns-quick-exit-output-art- +spec: + entrypoint: pns-quick-exit-output-art + templates: + - name: pns-quick-exit-output-art + archiveLocation: + archiveLogs: true + container: + image: debian:9.2 + command: [sh, -x, -c] + args: [" + touch /mnt/workdir/foo + "] + volumeMounts: + - name: workdir + mountPath: /mnt/workdir + outputs: + artifacts: + - name: mnt + path: /mnt + + volumes: + - name: workdir + emptyDir: {} diff --git a/test/e2e/functional/artifact-disable-archive.yaml b/test/e2e/functional/artifact-disable-archive.yaml deleted file mode 100644 index f1f41e1bad39..000000000000 --- a/test/e2e/functional/artifact-disable-archive.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# This tests the disabling of archive, and ability to recursively copy a directory -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: artifact-disable-archive- -spec: - entrypoint: artifact-example - templates: - - name: artifact-example - steps: - - - name: generate-artifact - template: whalesay - - - name: consume-artifact - template: print-message - arguments: - artifacts: - - name: etc - from: "{{steps.generate-artifact.outputs.artifacts.etc}}" - - name: hello-txt - from: "{{steps.generate-artifact.outputs.artifacts.hello-txt}}" - - - name: whalesay - container: - image: docker/whalesay:latest - command: [sh, -c] - args: ["cowsay hello world | tee /tmp/hello_world.txt"] - outputs: - artifacts: - - name: etc - path: /etc - archive: - none: {} - - name: hello-txt - path: /tmp/hello_world.txt - archive: - none: {} - - - name: print-message - inputs: - artifacts: - - name: etc - path: /tmp/etc - - name: hello-txt - path: /tmp/hello.txt - container: - image: alpine:latest - command: [sh, -c] - args: - - cat /tmp/hello.txt && cd /tmp/etc && find . diff --git a/test/e2e/functional/artifact-disable-archive.yaml b/test/e2e/functional/artifact-disable-archive.yaml new file mode 120000 index 000000000000..109a8c619867 --- /dev/null +++ b/test/e2e/functional/artifact-disable-archive.yaml @@ -0,0 +1 @@ +../../../examples/artifact-disable-archive.yaml \ No newline at end of file diff --git a/test/e2e/functional/continue-on-fail.yaml b/test/e2e/functional/continue-on-fail.yaml new file mode 120000 index 000000000000..3bb5bfc75322 --- /dev/null +++ b/test/e2e/functional/continue-on-fail.yaml @@ -0,0 +1 @@ +../../../examples/continue-on-fail.yaml \ No newline at end of file diff --git a/test/e2e/functional/dag-argument-passing.yaml b/test/e2e/functional/dag-argument-passing.yaml index c1e51a6bb61c..24f5c7aa8c8b 100644 --- a/test/e2e/functional/dag-argument-passing.yaml +++ b/test/e2e/functional/dag-argument-passing.yaml @@ -2,7 +2,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: dag-arg-passing- + generateName: dag-argument-passing- spec: entrypoint: dag-arg-passing templates: @@ -16,7 +16,7 @@ spec: container: image: alpine:3.7 command: [sh, -c, -x] - args: ['echo "{{inputs.parameters.message}}"; cat /tmp/passthrough'] + args: ['sleep 1; echo "{{inputs.parameters.message}}"; cat /tmp/passthrough'] outputs: parameters: - name: hosts diff --git a/test/e2e/functional/global-outputs-dag.yaml b/test/e2e/functional/global-outputs-dag.yaml index fa7eeb449847..cea147513f7b 100644 --- a/test/e2e/functional/global-outputs-dag.yaml +++ b/test/e2e/functional/global-outputs-dag.yaml @@ -21,7 +21,7 @@ spec: container: image: alpine:3.7 command: [sh, -c] - args: ["echo -n hello world > /tmp/hello_world.txt"] + args: ["sleep 1; echo -n hello world > /tmp/hello_world.txt"] outputs: parameters: # export a global parameter. The parameter will be programatically available in the completed diff --git a/test/e2e/functional/global-outputs-variable.yaml b/test/e2e/functional/global-outputs-variable.yaml index eed27afd1cc0..ca2222e6f61a 100644 --- a/test/e2e/functional/global-outputs-variable.yaml +++ b/test/e2e/functional/global-outputs-variable.yaml @@ -23,7 +23,7 @@ spec: container: image: alpine:3.7 command: [sh, -c] - args: ["echo -n hello world > /tmp/hello_world.txt"] + args: ["sleep 1; echo -n hello world > /tmp/hello_world.txt"] outputs: parameters: - name: hello-param diff --git a/test/e2e/functional/dag-outputs.yaml b/test/e2e/functional/nested-dag-outputs.yaml similarity index 99% rename from test/e2e/functional/dag-outputs.yaml rename to test/e2e/functional/nested-dag-outputs.yaml index 89ecc41130cc..8cc92c5003da 100644 --- a/test/e2e/functional/dag-outputs.yaml +++ b/test/e2e/functional/nested-dag-outputs.yaml @@ -38,6 +38,7 @@ spec: image: docker/whalesay:latest command: [sh, -c] args: [" + sleep 1; cowsay hello world | tee /tmp/my-output-artifact.txt && echo 'my-output-parameter' > /tmp/my-output-parameter.txt "] diff --git a/test/e2e/functional/output-artifact-optional.yaml b/test/e2e/functional/output-artifact-optional.yaml index 3713b45450de..803289d6ca85 100644 --- a/test/e2e/functional/output-artifact-optional.yaml +++ b/test/e2e/functional/output-artifact-optional.yaml @@ -16,7 +16,7 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["cowsay hello world | tee /tmp/hello_world12.txt"] + args: ["sleep 1; cowsay hello world | tee /tmp/hello_world12.txt"] outputs: artifacts: - name: hello-art diff --git a/test/e2e/functional/output-input-artifact-optional.yaml b/test/e2e/functional/output-input-artifact-optional.yaml index a29959fed04d..f1519df74d4e 100644 --- a/test/e2e/functional/output-input-artifact-optional.yaml +++ b/test/e2e/functional/output-input-artifact-optional.yaml @@ -21,7 +21,7 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["cowsay hello world | tee /tmp/hello_world123.txt"] + args: ["sleep 1; cowsay hello world | tee /tmp/hello_world123.txt"] outputs: artifacts: - name: hello-art diff --git a/test/e2e/functional/output-param-different-uid.yaml b/test/e2e/functional/output-param-different-uid.yaml new file mode 100644 index 000000000000..dbb7942fc945 --- /dev/null +++ b/test/e2e/functional/output-param-different-uid.yaml @@ -0,0 +1,27 @@ +# Tests PNS ability to capture output artifact when user id is different +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: pns-output-parameter-different-user- +spec: + entrypoint: multi-whalesay + templates: + - name: multi-whalesay + steps: + - - name: whalesay + template: whalesay + withSequence: + count: "10" + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -c] + args: ["sleep 1; cowsay hello world | tee /tmp/hello_world.txt"] + securityContext: + runAsUser: 1234 + outputs: + parameters: + - name: hello-art + valueFrom: + path: /tmp/hello_world.txt \ No newline at end of file diff --git a/test/e2e/functional/pns-output-params.yaml b/test/e2e/functional/pns-output-params.yaml new file mode 100644 index 000000000000..fe0001d38322 --- /dev/null +++ b/test/e2e/functional/pns-output-params.yaml @@ -0,0 +1,71 @@ +# Workflow specifically designed for testing process namespace sharing with output parameters +# This exercises the copy out regular files from volume mounted paths, or base image layer paths, +# including overlaps between the two. +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: pns-outputs-params- +spec: + entrypoint: output-parameter + templates: + - name: output-parameter + steps: + - - name: generate-parameter + template: whalesay + - - name: consume-parameter + template: print-message + arguments: + parameters: + - { name: A, value: "{{steps.generate-parameter.outputs.parameters.A}}" } + - { name: B, value: "{{steps.generate-parameter.outputs.parameters.B}}" } + - { name: C, value: "{{steps.generate-parameter.outputs.parameters.C}}" } + - { name: D, value: "{{steps.generate-parameter.outputs.parameters.D}}" } + + - name: whalesay + container: + image: docker/whalesay:latest + command: [sh, -x, -c] + args: [" + sleep 1; + echo -n A > /tmp/A && + echo -n B > /mnt/outer/inner/B && + echo -n C > /tmp/C && + echo -n D > /mnt/outer/D + "] + volumeMounts: + - name: outer + mountPath: /mnt/outer + - name: inner + mountPath: /mnt/outer/inner + outputs: + parameters: + - name: A + valueFrom: + path: /tmp/A + - name: B + valueFrom: + path: /mnt/outer/inner/B + - name: C + valueFrom: + path: /tmp/C + - name: D + valueFrom: + path: /mnt/outer/D + + - name: print-message + inputs: + parameters: + - name: A + - name: B + - name: C + - name: D + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.A}} {{inputs.parameters.B}} {{inputs.parameters.C}} {{inputs.parameters.D}}"] + + volumes: + - name: outer + emptyDir: {} + - name: inner + emptyDir: {} diff --git a/test/e2e/functional/retry-with-artifacts.yaml b/test/e2e/functional/retry-with-artifacts.yaml index 7aa5dcd37421..4a509d568504 100644 --- a/test/e2e/functional/retry-with-artifacts.yaml +++ b/test/e2e/functional/retry-with-artifacts.yaml @@ -23,7 +23,7 @@ spec: container: image: docker/whalesay:latest command: [sh, -c] - args: ["cowsay hello world | tee /tmp/hello_world.txt"] + args: ["sleep 1; cowsay hello world | tee /tmp/hello_world.txt"] outputs: artifacts: - name: hello-art diff --git a/test/e2e/lintfail/disallow-unknown.yaml b/test/e2e/lintfail/disallow-unknown.yaml new file mode 100644 index 000000000000..4d7c349cbf7c --- /dev/null +++ b/test/e2e/lintfail/disallow-unknown.yaml @@ -0,0 +1,15 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: disallow-unknown- +spec: + entrypoint: whalesay + templates: + - name: whalesay + container: + image: docker/whalesay:latest + command: + - cowsay + args: + - hello world + someExtraField: foo diff --git a/test/e2e/expectedfailures/invalid-spec.yaml b/test/e2e/lintfail/invalid-spec.yaml similarity index 100% rename from test/e2e/expectedfailures/invalid-spec.yaml rename to test/e2e/lintfail/invalid-spec.yaml diff --git a/test/e2e/expectedfailures/maformed-spec.yaml b/test/e2e/lintfail/malformed-spec.yaml similarity index 100% rename from test/e2e/expectedfailures/maformed-spec.yaml rename to test/e2e/lintfail/malformed-spec.yaml diff --git a/test/e2e/ui/ui-nested-steps.yaml b/test/e2e/ui/ui-nested-steps.yaml index aeb03da41e1f..c091c6827a24 100644 --- a/test/e2e/ui/ui-nested-steps.yaml +++ b/test/e2e/ui/ui-nested-steps.yaml @@ -5,6 +5,9 @@ metadata: generateName: ui-nested-steps- spec: entrypoint: ui-nested-steps + volumes: + - name: workdir + emptyDir: {} templates: - name: ui-nested-steps steps: @@ -24,14 +27,17 @@ spec: - name: locate-faces container: image: alpine:latest - command: ["sh", "-c"] + command: [sh, -c] args: - - echo '[1, 2, 3]' > /result.json + - echo '[1, 2, 3]' > /workdir/result.json + volumeMounts: + - name: workdir + mountPath: /workdir outputs: parameters: - name: imagemagick-commands valueFrom: - path: /result.json + path: /workdir/result.json - name: handle-individual-faces steps: diff --git a/util/archive/archive.go b/util/archive/archive.go new file mode 100644 index 000000000000..400b12667fb8 --- /dev/null +++ b/util/archive/archive.go @@ -0,0 +1,131 @@ +package archive + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "path/filepath" + + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/util" + log "github.com/sirupsen/logrus" +) + +type flusher interface { + Flush() error +} + +// TarGzToWriter tar.gz's the source path to the supplied writer +func TarGzToWriter(sourcePath string, w io.Writer) error { + sourcePath, err := filepath.Abs(sourcePath) + if err != nil { + return errors.InternalErrorf("getting absolute path: %v", err) + } + log.Infof("Taring %s", sourcePath) + sourceFi, err := os.Stat(sourcePath) + if err != nil { + if os.IsNotExist(err) { + return errors.New(errors.CodeNotFound, err.Error()) + } + return errors.InternalWrapError(err) + } + if !sourceFi.Mode().IsRegular() && !sourceFi.IsDir() { + return errors.InternalErrorf("%s is not a regular file or directory", sourcePath) + } + if flush, ok := w.(flusher); ok { + defer func() { _ = flush.Flush() }() + } + gzw := gzip.NewWriter(w) + defer util.Close(gzw) + tw := tar.NewWriter(gzw) + defer util.Close(tw) + + if sourceFi.IsDir() { + return tarDir(sourcePath, tw) + } + return tarFile(sourcePath, tw) +} + +func tarDir(sourcePath string, tw *tar.Writer) error { + baseName := filepath.Base(sourcePath) + return filepath.Walk(sourcePath, func(fpath string, info os.FileInfo, err error) error { + if err != nil { + return errors.InternalWrapError(err) + } + // build the name to be used in the archive + nameInArchive, err := filepath.Rel(sourcePath, fpath) + if err != nil { + return errors.InternalWrapError(err) + } + nameInArchive = filepath.Join(baseName, nameInArchive) + log.Infof("writing %s", nameInArchive) + + var header *tar.Header + if (info.Mode() & os.ModeSymlink) != 0 { + linkTarget, err := os.Readlink(fpath) + if err != nil { + return errors.InternalWrapError(err) + } + header, err = tar.FileInfoHeader(info, filepath.ToSlash(linkTarget)) + if err != nil { + return errors.InternalWrapError(err) + } + } else { + header, err = tar.FileInfoHeader(info, info.Name()) + if err != nil { + return errors.InternalWrapError(err) + } + } + header.Name = nameInArchive + + err = tw.WriteHeader(header) + if err != nil { + return errors.InternalWrapError(err) + } + if !info.Mode().IsRegular() { + return nil + } + f, err := os.Open(fpath) + if err != nil { + return errors.InternalWrapError(err) + } + + // copy file data into tar writer + _, err = io.Copy(tw, f) + closeErr := f.Close() + if err != nil { + return err + } + if closeErr != nil { + return closeErr + } + return nil + }) +} + +func tarFile(sourcePath string, tw *tar.Writer) error { + f, err := os.Open(sourcePath) + if err != nil { + return errors.InternalWrapError(err) + } + defer util.Close(f) + info, err := f.Stat() + if err != nil { + return errors.InternalWrapError(err) + } + header, err := tar.FileInfoHeader(info, f.Name()) + if err != nil { + return errors.InternalWrapError(err) + } + header.Name = filepath.Base(sourcePath) + err = tw.WriteHeader(header) + if err != nil { + return errors.InternalWrapError(err) + } + _, err = io.Copy(tw, f) + if err != nil { + return err + } + return nil +} diff --git a/util/archive/archive_test.go b/util/archive/archive_test.go new file mode 100644 index 000000000000..2b4766b01fe0 --- /dev/null +++ b/util/archive/archive_test.go @@ -0,0 +1,60 @@ +package archive + +import ( + "bufio" + "crypto/rand" + "encoding/hex" + "os" + "path/filepath" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func tempFile(dir, prefix, suffix string) (*os.File, error) { + if dir == "" { + dir = os.TempDir() + } else { + os.MkdirAll(dir, 0700) + } + randBytes := make([]byte, 16) + rand.Read(randBytes) + filePath := filepath.Join(dir, prefix+hex.EncodeToString(randBytes)+suffix) + return os.Create(filePath) +} + +func TestTarDirectory(t *testing.T) { + f, err := tempFile(os.TempDir()+"/argo-test", "dir-", ".tgz") + assert.Nil(t, err) + log.Infof("Taring to %s", f.Name()) + w := bufio.NewWriter(f) + + err = TarGzToWriter("../../test/e2e", w) + assert.Nil(t, err) + + err = f.Close() + assert.Nil(t, err) +} + +func TestTarFile(t *testing.T) { + data, err := tempFile(os.TempDir()+"/argo-test", "file-", "") + assert.Nil(t, err) + _, err = data.WriteString("hello world") + assert.Nil(t, err) + data.Close() + + dataTarPath := data.Name() + ".tgz" + f, err := os.Create(dataTarPath) + assert.Nil(t, err) + log.Infof("Taring to %s", f.Name()) + w := bufio.NewWriter(f) + + err = TarGzToWriter(data.Name(), w) + assert.Nil(t, err) + err = os.Remove(data.Name()) + assert.Nil(t, err) + + err = f.Close() + assert.Nil(t, err) +} diff --git a/workflow/artifacts/s3/s3.go b/workflow/artifacts/s3/s3.go index e58f2f404e16..38f22d5a9d14 100644 --- a/workflow/artifacts/s3/s3.go +++ b/workflow/artifacts/s3/s3.go @@ -1,13 +1,14 @@ package s3 import ( - "github.com/argoproj/pkg/file" - argos3 "github.com/argoproj/pkg/s3" + "time" + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/wait" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "k8s.io/apimachinery/pkg/util/wait" - "time" + "github.com/argoproj/pkg/file" + argos3 "github.com/argoproj/pkg/s3" ) // S3ArtifactDriver is a driver for AWS S3 @@ -35,7 +36,7 @@ func (s3Driver *S3ArtifactDriver) newS3Client() (argos3.S3Client, error) { func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string) error { err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Second * 2, Factor: 2.0, Steps: 5, Jitter: 0.1}, func() (bool, error) { - log.Infof("ExponentialBackoff in S3 Load for path: %s", path) + log.Infof("S3 Load path: %s, key: %s", path, inputArtifact.S3.Key) s3cli, err := s3Driver.newS3Client() if err != nil { log.Warnf("Failed to create new S3 client: %v", err) @@ -46,6 +47,7 @@ func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string return true, nil } if !argos3.IsS3ErrCode(origErr, "NoSuchKey") { + log.Warnf("Failed get file: %v", origErr) return false, nil } // If we get here, the error was a NoSuchKey. The key might be a s3 "directory" @@ -60,6 +62,7 @@ func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string } if err = s3cli.GetDirectory(inputArtifact.S3.Bucket, inputArtifact.S3.Key, path); err != nil { + log.Warnf("Failed get directory: %v", err) return false, nil } return true, nil @@ -72,7 +75,7 @@ func (s3Driver *S3ArtifactDriver) Load(inputArtifact *wfv1.Artifact, path string func (s3Driver *S3ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { err := wait.ExponentialBackoff(wait.Backoff{Duration: time.Second * 2, Factor: 2.0, Steps: 5, Jitter: 0.1}, func() (bool, error) { - log.Infof("ExponentialBackoff in S3 Save for path: %s", path) + log.Infof("S3 Save path: %s, key: %s", path, outputArtifact.S3.Key) s3cli, err := s3Driver.newS3Client() if err != nil { log.Warnf("Failed to create new S3 client: %v", err) @@ -85,11 +88,14 @@ func (s3Driver *S3ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifac } if isDir { if err = s3cli.PutDirectory(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path); err != nil { + log.Warnf("Failed to put directory: %v", err) + return false, nil + } + } else { + if err = s3cli.PutFile(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path); err != nil { + log.Warnf("Failed to put file: %v", err) return false, nil } - } - if err = s3cli.PutFile(outputArtifact.S3.Bucket, outputArtifact.S3.Key, path); err != nil { - return false, nil } return true, nil }) diff --git a/workflow/common/common.go b/workflow/common/common.go index 96b25bd5a7c5..5b68158d5d4b 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -61,10 +61,11 @@ const ( // Each artifact will be named according to its input name (e.g: /argo/inputs/artifacts/CODE) ExecutorArtifactBaseDir = "/argo/inputs/artifacts" - // InitContainerMainFilesystemDir is a path made available to the init container such that the init container - // can access the same volume mounts used in the main container. This is used for the purposes of artifact loading - // (when there is overlapping paths between artifacts and volume mounts) - InitContainerMainFilesystemDir = "/mainctrfs" + // ExecutorMainFilesystemDir is a path made available to the init/wait containers such that they + // can access the same volume mounts used in the main container. This is used for the purposes + // of artifact loading (when there is overlapping paths between artifacts and volume mounts), + // as well as artifact collection by the wait container. + ExecutorMainFilesystemDir = "/mainctrfs" // ExecutorStagingEmptyDir is the path of the emptydir which is used as a staging area to transfer a file between init/main container for script/resource templates ExecutorStagingEmptyDir = "/argo/staging" @@ -75,9 +76,6 @@ const ( // Various environment variables containing pod information exposed to the executor container(s) - // EnvVarPodName contains the name of the pod (currently unused) - EnvVarPodName = "ARGO_POD_NAME" - // EnvVarContainerRuntimeExecutor contains the name of the container runtime executor to use, empty is equal to "docker" EnvVarContainerRuntimeExecutor = "ARGO_CONTAINER_RUNTIME_EXECUTOR" // EnvVarDownwardAPINodeIP is the envvar used to get the `status.hostIP` @@ -96,6 +94,9 @@ const ( // ContainerRuntimeExecutorK8sAPI to use the Kubernetes API server as container runtime executor ContainerRuntimeExecutorK8sAPI = "k8sapi" + // ContainerRuntimeExecutorPNS indicates to use process namespace sharing as the container runtime executor + ContainerRuntimeExecutorPNS = "pns" + // Variables that are added to the scope during template execution and can be referenced using {{}} syntax // GlobalVarWorkflowName is a global workflow variable referencing the workflow's metadata.name field diff --git a/workflow/common/util.go b/workflow/common/util.go index 72432016c08f..4c1dd2514a50 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -5,16 +5,16 @@ import ( "encoding/json" "fmt" "io" + "net/http" "os/exec" "regexp" "strconv" "strings" "time" - "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apis/workflow" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/ghodss/yaml" + "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" "github.com/valyala/fasttemplate" apiv1 "k8s.io/api/core/v1" @@ -23,11 +23,15 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" + + "github.com/argoproj/argo/errors" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util" ) // FindOverlappingVolume looks an artifact path, checks if it overlaps with any // user specified volumeMounts in the template, and returns the deepest volumeMount -// (if any). +// (if any). A return value of nil indicates the path is not under any volumeMount. func FindOverlappingVolume(tmpl *wfv1.Template, path string) *apiv1.VolumeMount { if tmpl.Container == nil { return nil @@ -66,6 +70,126 @@ func KillPodContainer(restConfig *rest.Config, namespace string, pod string, con return nil } +// ContainerLogStream returns an io.ReadCloser for a container's log stream using the websocket +// interface. This was implemented in the hopes that we could selectively choose stdout from stderr, +// but due to https://github.com/kubernetes/kubernetes/issues/28167, it is not possible to discern +// stdout from stderr using the K8s API server, so this function is unused, instead preferring the +// pod logs interface from client-go. It's left as a reference for when issue #28167 is eventually +// resolved. +func ContainerLogStream(config *rest.Config, namespace string, pod string, container string) (io.ReadCloser, error) { + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, errors.InternalWrapError(err) + } + logRequest := clientset.CoreV1().RESTClient().Get(). + Resource("pods"). + Name(pod). + Namespace(namespace). + SubResource("log"). + Param("container", container) + u := logRequest.URL() + switch u.Scheme { + case "https": + u.Scheme = "wss" + case "http": + u.Scheme = "ws" + default: + return nil, errors.Errorf("Malformed URL %s", u.String()) + } + + log.Info(u.String()) + wsrc := websocketReadCloser{ + &bytes.Buffer{}, + } + + wrappedRoundTripper, err := roundTripperFromConfig(config, wsrc.WebsocketCallback) + if err != nil { + return nil, errors.InternalWrapError(err) + } + + // Send the request and let the callback do its work + req := &http.Request{ + Method: http.MethodGet, + URL: u, + } + _, err = wrappedRoundTripper.RoundTrip(req) + if err != nil && !websocket.IsCloseError(err, websocket.CloseNormalClosure) { + return nil, errors.InternalWrapError(err) + } + return &wsrc, nil +} + +type RoundTripCallback func(conn *websocket.Conn, resp *http.Response, err error) error + +type WebsocketRoundTripper struct { + Dialer *websocket.Dialer + Do RoundTripCallback +} + +func (d *WebsocketRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + conn, resp, err := d.Dialer.Dial(r.URL.String(), r.Header) + if err == nil { + defer util.Close(conn) + } + return resp, d.Do(conn, resp, err) +} + +func (w *websocketReadCloser) WebsocketCallback(ws *websocket.Conn, resp *http.Response, err error) error { + if err != nil { + if resp != nil && resp.StatusCode != http.StatusOK { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(resp.Body) + return errors.InternalErrorf("Can't connect to log endpoint (%d): %s", resp.StatusCode, buf.String()) + } + return errors.InternalErrorf("Can't connect to log endpoint: %s", err.Error()) + } + + for { + _, body, err := ws.ReadMessage() + if len(body) > 0 { + //log.Debugf("%d: %s", msgType, string(body)) + _, writeErr := w.Write(body) + if writeErr != nil { + return writeErr + } + } + if err != nil { + if err == io.EOF { + log.Infof("websocket closed: %v", err) + return nil + } + log.Warnf("websocket error: %v", err) + return err + } + } +} + +func roundTripperFromConfig(config *rest.Config, callback RoundTripCallback) (http.RoundTripper, error) { + tlsConfig, err := rest.TLSConfigFor(config) + if err != nil { + return nil, err + } + // Create a roundtripper which will pass in the final underlying websocket connection to a callback + wsrt := &WebsocketRoundTripper{ + Do: callback, + Dialer: &websocket.Dialer{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + }, + } + // Make sure we inherit all relevant security headers + return rest.HTTPWrappersForConfig(config, wsrt) +} + +type websocketReadCloser struct { + *bytes.Buffer +} + +func (w *websocketReadCloser) Close() error { + //return w.conn.Close() + return nil +} + // ExecPodContainer runs a command in a container in a pod and returns the remotecommand.Executor func ExecPodContainer(restConfig *rest.Config, namespace string, pod string, container string, stdout bool, stderr bool, command ...string) (remotecommand.Executor, error) { clientset, err := kubernetes.NewForConfig(restConfig) @@ -146,17 +270,23 @@ func ProcessArgs(tmpl *wfv1.Template, args wfv1.Arguments, globalParams, localPa newInputArtifacts[i] = inArt continue } - // artifact must be supplied argArt := args.GetArtifactByName(inArt.Name) - if !inArt.Optional && argArt == nil { - return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s was not supplied", inArt.Name) + if !inArt.Optional { + // artifact must be supplied + if argArt == nil { + return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s was not supplied", inArt.Name) + } + if !argArt.HasLocation() && !validateOnly { + return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s missing location information", inArt.Name) + } } - if !inArt.Optional && !argArt.HasLocation() && !validateOnly { - return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s missing location information", inArt.Name) + if argArt != nil { + argArt.Path = inArt.Path + argArt.Mode = inArt.Mode + newInputArtifacts[i] = *argArt + } else { + newInputArtifacts[i] = inArt } - argArt.Path = inArt.Path - argArt.Mode = inArt.Mode - newInputArtifacts[i] = *argArt } tmpl.Inputs.Artifacts = newInputArtifacts @@ -259,10 +389,12 @@ func RunCommand(name string, arg ...string) error { log.Info(cmdStr) _, err := cmd.Output() if err != nil { - exErr := err.(*exec.ExitError) - errOutput := string(exErr.Stderr) - log.Errorf("`%s` failed: %s", cmdStr, errOutput) - return errors.InternalError(strings.TrimSpace(errOutput)) + if exErr, ok := err.(*exec.ExitError); ok { + errOutput := string(exErr.Stderr) + log.Errorf("`%s` failed: %s", cmdStr, errOutput) + return errors.InternalError(strings.TrimSpace(errOutput)) + } + return errors.InternalWrapError(err) } return nil } diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 7da737d145bf..80077087034d 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -22,12 +22,18 @@ import ( // WorkflowControllerConfig contain the configuration settings for the workflow controller type WorkflowControllerConfig struct { // ExecutorImage is the image name of the executor to use when running pods + // DEPRECATED: use --executor-image flag to workflow-controller instead ExecutorImage string `json:"executorImage,omitempty"` // ExecutorImagePullPolicy is the imagePullPolicy of the executor to use when running pods + // DEPRECATED: use `executor.imagePullPolicy` in configmap instead ExecutorImagePullPolicy string `json:"executorImagePullPolicy,omitempty"` + // Executor holds container customizations for the executor to use when running pods + Executor *apiv1.Container `json:"executor,omitempty"` + // ExecutorResources specifies the resource requirements that will be used for the executor sidecar + // DEPRECATED: use `executor.resources` in configmap instead ExecutorResources *apiv1.ResourceRequirements `json:"executorResources,omitempty"` // KubeConfig specifies a kube config file for the wait & init containers @@ -162,13 +168,13 @@ func (wfc *WorkflowController) executorImage() string { // executorImagePullPolicy returns the imagePullPolicy to use for the workflow executor func (wfc *WorkflowController) executorImagePullPolicy() apiv1.PullPolicy { - var policy string if wfc.cliExecutorImagePullPolicy != "" { - policy = wfc.cliExecutorImagePullPolicy + return apiv1.PullPolicy(wfc.cliExecutorImagePullPolicy) + } else if wfc.Config.Executor != nil && wfc.Config.Executor.ImagePullPolicy != "" { + return wfc.Config.Executor.ImagePullPolicy } else { - policy = wfc.Config.ExecutorImagePullPolicy + return apiv1.PullPolicy(wfc.Config.ExecutorImagePullPolicy) } - return apiv1.PullPolicy(policy) } func (wfc *WorkflowController) watchControllerConfigMap(ctx context.Context) (cache.Controller, error) { diff --git a/workflow/controller/exec_control.go b/workflow/controller/exec_control.go index b1ad7f12ddf0..b9ab95e4603f 100644 --- a/workflow/controller/exec_control.go +++ b/workflow/controller/exec_control.go @@ -66,6 +66,12 @@ func (woc *wfOperationCtx) applyExecutionControl(pod *apiv1.Pod, wfNodesLock *sy return nil } } + if podExecCtl.Deadline != nil && podExecCtl.Deadline.IsZero() { + // If the pod has already been explicitly signaled to terminate, then do nothing. + // This can happen when daemon steps are terminated. + woc.log.Infof("Skipping sync of execution control of pod %s. pod has been signaled to terminate", pod.Name) + return nil + } woc.log.Infof("Execution control for pod %s out-of-sync desired: %v, actual: %v", pod.Name, desiredExecCtl.Deadline, podExecCtl.Deadline) return woc.updateExecutionControl(pod.Name, desiredExecCtl) } @@ -122,7 +128,7 @@ func (woc *wfOperationCtx) updateExecutionControl(podName string, execCtl common woc.log.Infof("Signalling %s of updates", podName) exec, err := common.ExecPodContainer( woc.controller.restConfig, woc.wf.ObjectMeta.Namespace, podName, - common.WaitContainerName, true, true, "sh", "-c", "kill -s USR2 1", + common.WaitContainerName, true, true, "sh", "-c", "kill -s USR2 $(pidof argoexec)", ) if err != nil { return err diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 8cc5429fb069..60ba3c4379c4 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -21,6 +21,7 @@ import ( apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" @@ -130,7 +131,8 @@ func (woc *wfOperationCtx) operate() { // Perform one-time workflow validation if woc.wf.Status.Phase == "" { woc.markWorkflowRunning() - err := validate.ValidateWorkflow(woc.wf) + validateOpts := validate.ValidateOpts{ContainerRuntimeExecutor: woc.controller.Config.ContainerRuntimeExecutor} + err := validate.ValidateWorkflow(woc.wf, validateOpts) if err != nil { woc.markWorkflowFailed(fmt.Sprintf("invalid spec: %s", err.Error())) return @@ -596,15 +598,14 @@ func assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus { var newDaemonStatus *bool var message string updated := false - f := false switch pod.Status.Phase { case apiv1.PodPending: newPhase = wfv1.NodePending - newDaemonStatus = &f + newDaemonStatus = pointer.BoolPtr(false) message = getPendingReason(pod) case apiv1.PodSucceeded: newPhase = wfv1.NodeSucceeded - newDaemonStatus = &f + newDaemonStatus = pointer.BoolPtr(false) case apiv1.PodFailed: // ignore pod failure for daemoned steps if node.IsDaemoned() { @@ -612,7 +613,7 @@ func assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus { } else { newPhase, message = inferFailedReason(pod) } - newDaemonStatus = &f + newDaemonStatus = pointer.BoolPtr(false) case apiv1.PodRunning: newPhase = wfv1.NodeRunning tmplStr, ok := pod.Annotations[common.AnnotationKeyTemplate] @@ -635,8 +636,7 @@ func assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus { } // proceed to mark node status as running (and daemoned) newPhase = wfv1.NodeRunning - t := true - newDaemonStatus = &t + newDaemonStatus = pointer.BoolPtr(true) log.Infof("Processing ready daemon pod: %v", pod.ObjectMeta.SelfLink) } default: @@ -1534,17 +1534,12 @@ func (woc *wfOperationCtx) executeResource(nodeName string, tmpl *wfv1.Template, if node != nil { return node } - mainCtr := apiv1.Container{ - Image: woc.controller.executorImage(), - ImagePullPolicy: woc.controller.executorImagePullPolicy(), - Command: []string{"argoexec"}, - Args: []string{"resource", tmpl.Resource.Action}, - VolumeMounts: []apiv1.VolumeMount{ - volumeMountPodMetadata, - }, - Env: execEnvVars, + mainCtr := woc.newExecContainer(common.MainContainerName) + mainCtr.Command = []string{"argoexec", "resource", tmpl.Resource.Action} + mainCtr.VolumeMounts = []apiv1.VolumeMount{ + volumeMountPodMetadata, } - _, err := woc.createWorkflowPod(nodeName, mainCtr, tmpl) + _, err := woc.createWorkflowPod(nodeName, *mainCtr, tmpl) if err != nil { return woc.initializeNode(nodeName, wfv1.NodeTypePod, tmpl.Name, boundaryID, wfv1.NodeError, err.Error()) } diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index cf5d95b348b0..b30b88fd2d9c 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -435,14 +435,16 @@ func TestNestedTemplateParallelismLimit(t *testing.T) { // TestSidecarResourceLimits verifies resource limits on the sidecar can be set in the controller config func TestSidecarResourceLimits(t *testing.T) { controller := newController() - controller.Config.ExecutorResources = &apiv1.ResourceRequirements{ - Limits: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("0.5"), - apiv1.ResourceMemory: resource.MustParse("512Mi"), - }, - Requests: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("0.1"), - apiv1.ResourceMemory: resource.MustParse("64Mi"), + controller.Config.Executor = &apiv1.Container{ + Resources: apiv1.ResourceRequirements{ + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("0.5"), + apiv1.ResourceMemory: resource.MustParse("512Mi"), + }, + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("0.1"), + apiv1.ResourceMemory: resource.MustParse("64Mi"), + }, }, } wf := unmarshalWF(helloWorldWf) @@ -929,7 +931,7 @@ func TestMetadataPassing(t *testing.T) { var ( pod = pods.Items[0] - container = pod.Spec.Containers[0] + container = pod.Spec.Containers[1] foundRepo = false foundRev = false ) @@ -999,5 +1001,5 @@ func TestResolveIOPathPlaceholders(t *testing.T) { assert.Nil(t, err) assert.True(t, len(pods.Items) > 0, "pod was not created successfully") - assert.Equal(t, []string{"sh", "-c", "head -n 3 <\"/inputs/text/data\" | tee \"/outputs/text/data\" | wc -l > \"/outputs/actual-lines-count/data\""}, pods.Items[0].Spec.Containers[0].Command) + assert.Equal(t, []string{"sh", "-c", "head -n 3 <\"/inputs/text/data\" | tee \"/outputs/text/data\" | wc -l > \"/outputs/actual-lines-count/data\""}, pods.Items[0].Spec.Containers[1].Command) } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 46f6f13543e0..e582a281d2c5 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -5,16 +5,19 @@ import ( "fmt" "io" "path" + "path/filepath" "strconv" - "github.com/argoproj/argo/errors" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/workflow/common" log "github.com/sirupsen/logrus" "github.com/valyala/fasttemplate" apiv1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + "github.com/argoproj/argo/errors" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/common" ) // Reusable k8s pod spec portions used in workflow pods @@ -63,26 +66,8 @@ var ( MountPath: "/var/run/docker.sock", ReadOnly: true, } - - // execEnvVars exposes various pod information as environment variables to the exec container - execEnvVars = []apiv1.EnvVar{ - envFromField(common.EnvVarPodName, "metadata.name"), - } ) -// envFromField is a helper to return a EnvVar with the name and field -func envFromField(envVarName, fieldPath string) apiv1.EnvVar { - return apiv1.EnvVar{ - Name: envVarName, - ValueFrom: &apiv1.EnvVarSource{ - FieldRef: &apiv1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: fieldPath, - }, - }, - } -} - func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Container, tmpl *wfv1.Template) (*apiv1.Pod, error) { nodeID := woc.wf.NodeID(nodeName) woc.log.Debugf("Creating Pod: %s (%s)", nodeName, nodeID) @@ -105,10 +90,7 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont }, }, Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - Containers: []apiv1.Container{ - mainCtr, - }, + RestartPolicy: apiv1.RestartPolicyNever, Volumes: woc.createVolumes(), ActiveDeadlineSeconds: tmpl.ActiveDeadlineSeconds, ServiceAccountName: woc.wf.Spec.ServiceAccountName, @@ -131,6 +113,9 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont if woc.controller.Config.InstanceID != "" { pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } + if woc.controller.Config.ContainerRuntimeExecutor == common.ContainerRuntimeExecutorPNS { + pod.Spec.ShareProcessNamespace = pointer.BoolPtr(true) + } err := woc.addArchiveLocation(pod, tmpl) if err != nil { @@ -147,6 +132,11 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont } pod.Spec.Containers = append(pod.Spec.Containers, *waitCtr) } + // NOTE: the order of the container list is significant. kubelet will pull, create, and start + // each container sequentially in the order that they appear in this list. For PNS we want the + // wait container to start before the main, so that it always has the chance to see the main + // container's PID and root filesystem. + pod.Spec.Containers = append(pod.Spec.Containers, mainCtr) // Add init container only if it needs input artifacts. This is also true for // script templates (which needs to populate the script) @@ -169,22 +159,20 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont } if tmpl.GetType() == wfv1.TemplateTypeScript { - addExecutorStagingVolume(pod) + addScriptStagingVolume(pod) } - // addInitContainers should be called after all volumes have been manipulated - // in the main container (in case sidecar requires volume mount mirroring) + // addInitContainers, addSidecars and addOutputArtifactsVolumes should be called after all + // volumes have been manipulated in the main container since volumeMounts are mirrored err = addInitContainers(pod, tmpl) if err != nil { return nil, err } - - // addSidecars should be called after all volumes have been manipulated - // in the main container (in case sidecar requires volume mount mirroring) err = addSidecars(pod, tmpl) if err != nil { return nil, err } + addOutputArtifactsVolumes(pod, tmpl) // Set the container template JSON in pod annotations, which executor examines for things like // artifact location/path. @@ -258,28 +246,45 @@ func substituteGlobals(pod *apiv1.Pod, globalParams map[string]string) (*apiv1.P } func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container { - ctr := woc.newExecContainer(common.InitContainerName, false, "init") - ctr.VolumeMounts = append([]apiv1.VolumeMount{volumeMountPodMetadata}, ctr.VolumeMounts...) + ctr := woc.newExecContainer(common.InitContainerName) + ctr.Command = []string{"argoexec", "init"} return *ctr } func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Container, error) { - ctr := woc.newExecContainer(common.WaitContainerName, false, "wait") - ctr.VolumeMounts = append(woc.createVolumeMounts(), ctr.VolumeMounts...) + ctr := woc.newExecContainer(common.WaitContainerName) + ctr.Command = []string{"argoexec", "wait"} + switch woc.controller.Config.ContainerRuntimeExecutor { + case common.ContainerRuntimeExecutorPNS: + ctr.SecurityContext = &apiv1.SecurityContext{ + Capabilities: &apiv1.Capabilities{ + Add: []apiv1.Capability{ + // necessary to access main's root filesystem when run with a different user id + apiv1.Capability("SYS_PTRACE"), + }, + }, + } + case "", common.ContainerRuntimeExecutorDocker: + ctr.VolumeMounts = append(ctr.VolumeMounts, volumeMountDockerSock) + } return ctr, nil } func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar { + var execEnvVars []apiv1.EnvVar + if woc.controller.Config.Executor != nil { + execEnvVars = woc.controller.Config.Executor.Env + } switch woc.controller.Config.ContainerRuntimeExecutor { case common.ContainerRuntimeExecutorK8sAPI: - return append(execEnvVars, + execEnvVars = append(execEnvVars, apiv1.EnvVar{ Name: common.EnvVarContainerRuntimeExecutor, Value: woc.controller.Config.ContainerRuntimeExecutor, }, ) case common.ContainerRuntimeExecutorKubelet: - return append(execEnvVars, + execEnvVars = append(execEnvVars, apiv1.EnvVar{ Name: common.EnvVarContainerRuntimeExecutor, Value: woc.controller.Config.ContainerRuntimeExecutor, @@ -301,21 +306,15 @@ func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar { Value: strconv.FormatBool(woc.controller.Config.KubeletInsecure), }, ) - default: - return execEnvVars - } -} - -func (woc *wfOperationCtx) createVolumeMounts() []apiv1.VolumeMount { - volumeMounts := []apiv1.VolumeMount{ - volumeMountPodMetadata, - } - switch woc.controller.Config.ContainerRuntimeExecutor { - case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI: - return volumeMounts - default: - return append(volumeMounts, volumeMountDockerSock) + case common.ContainerRuntimeExecutorPNS: + execEnvVars = append(execEnvVars, + apiv1.EnvVar{ + Name: common.EnvVarContainerRuntimeExecutor, + Value: woc.controller.Config.ContainerRuntimeExecutor, + }, + ) } + return execEnvVars } func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { @@ -337,26 +336,29 @@ func (woc *wfOperationCtx) createVolumes() []apiv1.Volume { }) } switch woc.controller.Config.ContainerRuntimeExecutor { - case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI: + case common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorPNS: return volumes default: return append(volumes, volumeDockerSock) } } -func (woc *wfOperationCtx) newExecContainer(name string, privileged bool, subCommand string) *apiv1.Container { +func (woc *wfOperationCtx) newExecContainer(name string) *apiv1.Container { exec := apiv1.Container{ Name: name, Image: woc.controller.executorImage(), ImagePullPolicy: woc.controller.executorImagePullPolicy(), Env: woc.createEnvVars(), - SecurityContext: &apiv1.SecurityContext{ - Privileged: &privileged, + VolumeMounts: []apiv1.VolumeMount{ + volumeMountPodMetadata, }, - Command: []string{"argoexec"}, - Args: []string{subCommand}, } - if woc.controller.Config.ExecutorResources != nil { + if woc.controller.Config.Executor != nil { + exec.Args = woc.controller.Config.Executor.Args + } + if isResourcesSpecified(woc.controller.Config.Executor) { + exec.Resources = woc.controller.Config.Executor.Resources + } else if woc.controller.Config.ExecutorResources != nil { exec.Resources = *woc.controller.Config.ExecutorResources } if woc.controller.Config.KubeConfig != nil { @@ -380,6 +382,10 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool, subCom return &exec } +func isResourcesSpecified(ctr *apiv1.Container) bool { + return ctr != nil && (ctr.Resources.Limits.Cpu() != nil || ctr.Resources.Limits.Memory() != nil) +} + // addMetadata applies metadata specified in the template func (woc *wfOperationCtx) addMetadata(pod *apiv1.Pod, tmpl *wfv1.Template) { for k, v := range tmpl.Metadata.Annotations { @@ -573,7 +579,7 @@ func (woc *wfOperationCtx) addInputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.T // instead of the artifacts volume if tmpl.Container != nil { for _, mnt := range tmpl.Container.VolumeMounts { - mnt.MountPath = path.Join(common.InitContainerMainFilesystemDir, mnt.MountPath) + mnt.MountPath = filepath.Join(common.ExecutorMainFilesystemDir, mnt.MountPath) initCtr.VolumeMounts = append(initCtr.VolumeMounts, mnt) } } @@ -582,19 +588,19 @@ func (woc *wfOperationCtx) addInputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.T } } - mainCtrIndex := 0 - var mainCtr *apiv1.Container + mainCtrIndex := -1 for i, ctr := range pod.Spec.Containers { - if ctr.Name == common.MainContainerName { + switch ctr.Name { + case common.MainContainerName: mainCtrIndex = i - mainCtr = &pod.Spec.Containers[i] + break } } - if mainCtr == nil { - panic("Could not find main container in pod spec") + if mainCtrIndex == -1 { + panic("Could not find main or wait container in pod spec") } - // TODO: the order in which we construct the volume mounts may matter, - // especially if they are overlapping. + mainCtr := &pod.Spec.Containers[mainCtrIndex] + for _, art := range tmpl.Inputs.Artifacts { if art.Path == "" { return errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s did not specify a path", art.Name) @@ -622,6 +628,41 @@ func (woc *wfOperationCtx) addInputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.T return nil } +// addOutputArtifactsVolumes mirrors any volume mounts in the main container to the wait sidecar. +// For any output artifacts that were produced in mounted volumes (e.g. PVCs, emptyDirs), the +// wait container will collect the artifacts directly from volumeMount instead of `docker cp`-ing +// them to the wait sidecar. In order for this to work, we mirror all volume mounts in the main +// container under a well-known path. +func addOutputArtifactsVolumes(pod *apiv1.Pod, tmpl *wfv1.Template) { + if tmpl.GetType() == wfv1.TemplateTypeResource { + return + } + mainCtrIndex := -1 + waitCtrIndex := -1 + var mainCtr *apiv1.Container + for i, ctr := range pod.Spec.Containers { + switch ctr.Name { + case common.MainContainerName: + mainCtrIndex = i + case common.WaitContainerName: + waitCtrIndex = i + } + } + if mainCtrIndex == -1 || waitCtrIndex == -1 { + panic("Could not find main or wait container in pod spec") + } + mainCtr = &pod.Spec.Containers[mainCtrIndex] + waitCtr := &pod.Spec.Containers[waitCtrIndex] + + for _, mnt := range mainCtr.VolumeMounts { + mnt.MountPath = filepath.Join(common.ExecutorMainFilesystemDir, mnt.MountPath) + // ReadOnly is needed to be false for overlapping volume mounts + mnt.ReadOnly = false + waitCtr.VolumeMounts = append(waitCtr.VolumeMounts, mnt) + } + pod.Spec.Containers[waitCtrIndex] = *waitCtr +} + // addArchiveLocation conditionally updates the template with the default artifact repository // information configured in the controller, for the purposes of archiving outputs. This is skipped // for templates which do not need to archive anything, or have explicitly set an archive location @@ -647,7 +688,7 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat } } if !needLocation { - woc.log.Debugf("archive location unecessary") + woc.log.Debugf("archive location unnecessary") return nil } tmpl.ArchiveLocation = &wfv1.ArtifactLocation{ @@ -691,9 +732,9 @@ func (woc *wfOperationCtx) addArchiveLocation(pod *apiv1.Pod, tmpl *wfv1.Templat return nil } -// addExecutorStagingVolume sets up a shared staging volume between the init container +// addScriptStagingVolume sets up a shared staging volume between the init container // and main container for the purpose of holding the script source code for script templates -func addExecutorStagingVolume(pod *apiv1.Pod) { +func addScriptStagingVolume(pod *apiv1.Pod) { volName := "argo-staging" stagingVol := apiv1.Volume{ Name: volName, @@ -721,11 +762,7 @@ func addExecutorStagingVolume(pod *apiv1.Pod) { Name: volName, MountPath: common.ExecutorStagingEmptyDir, } - if ctr.VolumeMounts == nil { - ctr.VolumeMounts = []apiv1.VolumeMount{volMount} - } else { - ctr.VolumeMounts = append(ctr.VolumeMounts, volMount) - } + ctr.VolumeMounts = append(ctr.VolumeMounts, volMount) pod.Spec.Containers[i] = ctr found = true break diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index 41f5bd12d495..190c48d42c1c 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -2,6 +2,7 @@ package controller import ( "encoding/json" + "fmt" "testing" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" @@ -283,8 +284,8 @@ func TestVolumeAndVolumeMounts(t *testing.T) { assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) assert.Equal(t, "docker-sock", pod.Spec.Volumes[1].Name) assert.Equal(t, "volume-name", pod.Spec.Volumes[2].Name) - assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[1].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[1].VolumeMounts[0].Name) } // For Kubelet executor @@ -301,8 +302,8 @@ func TestVolumeAndVolumeMounts(t *testing.T) { assert.Equal(t, 2, len(pod.Spec.Volumes)) assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) assert.Equal(t, "volume-name", pod.Spec.Volumes[1].Name) - assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[1].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[1].VolumeMounts[0].Name) } // For K8sAPI executor @@ -319,12 +320,26 @@ func TestVolumeAndVolumeMounts(t *testing.T) { assert.Equal(t, 2, len(pod.Spec.Volumes)) assert.Equal(t, "podmetadata", pod.Spec.Volumes[0].Name) assert.Equal(t, "volume-name", pod.Spec.Volumes[1].Name) - assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, "volume-name", pod.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, 1, len(pod.Spec.Containers[1].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[1].VolumeMounts[0].Name) } } func TestOutOfCluster(t *testing.T) { + + verifyKubeConfigVolume := func(ctr apiv1.Container, volName, mountPath string) { + for _, vol := range ctr.VolumeMounts { + if vol.Name == volName && vol.MountPath == mountPath { + for _, arg := range ctr.Args { + if arg == fmt.Sprintf("--kubeconfig=%s", mountPath) { + return + } + } + } + } + t.Fatalf("%v does not have kubeconfig mounted properly (name: %s, mountPath: %s)", ctr, volName, mountPath) + } + // default mount path & volume name { woc := newWoc() @@ -341,11 +356,8 @@ func TestOutOfCluster(t *testing.T) { assert.Equal(t, "kubeconfig", pod.Spec.Volumes[1].Name) assert.Equal(t, "foo", pod.Spec.Volumes[1].VolumeSource.Secret.SecretName) - // kubeconfig volume is the last one - idx := len(pod.Spec.Containers[1].VolumeMounts) - 1 - assert.Equal(t, "kubeconfig", pod.Spec.Containers[1].VolumeMounts[idx].Name) - assert.Equal(t, "/kube/config", pod.Spec.Containers[1].VolumeMounts[idx].MountPath) - assert.Equal(t, "--kubeconfig=/kube/config", pod.Spec.Containers[1].Args[1]) + waitCtr := pod.Spec.Containers[0] + verifyKubeConfigVolume(waitCtr, "kubeconfig", "/kube/config") } // custom mount path & volume name, in case name collision @@ -367,10 +379,8 @@ func TestOutOfCluster(t *testing.T) { assert.Equal(t, "foo", pod.Spec.Volumes[1].VolumeSource.Secret.SecretName) // kubeconfig volume is the last one - idx := len(pod.Spec.Containers[1].VolumeMounts) - 1 - assert.Equal(t, "kube-config-secret", pod.Spec.Containers[1].VolumeMounts[idx].Name) - assert.Equal(t, "/some/path/config", pod.Spec.Containers[1].VolumeMounts[idx].MountPath) - assert.Equal(t, "--kubeconfig=/some/path/config", pod.Spec.Containers[1].Args[1]) + waitCtr := pod.Spec.Containers[0] + verifyKubeConfigVolume(waitCtr, "kube-config-secret", "/some/path/config") } } @@ -472,7 +482,7 @@ func TestSidecars(t *testing.T) { pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) assert.Nil(t, err) assert.Equal(t, 3, len(pod.Spec.Containers)) - assert.Equal(t, "main", pod.Spec.Containers[0].Name) - assert.Equal(t, "wait", pod.Spec.Containers[1].Name) + assert.Equal(t, "wait", pod.Spec.Containers[0].Name) + assert.Equal(t, "main", pod.Spec.Containers[1].Name) assert.Equal(t, "side-foo", pod.Spec.Containers[2].Name) } diff --git a/workflow/executor/common/common.go b/workflow/executor/common/common.go index e5b94cc38f4b..0ce8d251532f 100644 --- a/workflow/executor/common/common.go +++ b/workflow/executor/common/common.go @@ -19,7 +19,7 @@ const ( // killGracePeriod is the time in seconds after sending SIGTERM before // forcefully killing the sidecar with SIGKILL (value matches k8s) -const killGracePeriod = 10 +const KillGracePeriod = 10 // GetContainerID returns container ID of a ContainerStatus resource func GetContainerID(container *v1.ContainerStatus) string { @@ -94,7 +94,7 @@ func KillGracefully(c KubernetesClientInterface, containerID string) error { if err != nil { return err } - err = WaitForTermination(c, containerID, time.Second*killGracePeriod) + err = WaitForTermination(c, containerID, time.Second*KillGracePeriod) if err == nil { log.Infof("ContainerID %q successfully killed", containerID) return nil @@ -104,7 +104,7 @@ func KillGracefully(c KubernetesClientInterface, containerID string) error { if err != nil { return err } - err = WaitForTermination(c, containerID, time.Second*killGracePeriod) + err = WaitForTermination(c, containerID, time.Second*KillGracePeriod) if err != nil { return err } diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 9b0c7d9266bb..2c6b3893c7a7 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -4,24 +4,20 @@ import ( "archive/tar" "compress/gzip" "fmt" + "io" "os" "os/exec" - "strings" "time" - "github.com/argoproj/argo/util/file" - - "github.com/argoproj/argo/util" + log "github.com/sirupsen/logrus" "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/util" + "github.com/argoproj/argo/util/file" "github.com/argoproj/argo/workflow/common" - log "github.com/sirupsen/logrus" + execcommon "github.com/argoproj/argo/workflow/executor/common" ) -// killGracePeriod is the time in seconds after sending SIGTERM before -// forcefully killing the sidecar with SIGKILL (value matches k8s) -const killGracePeriod = 10 - type DockerExecutor struct{} func NewDockerExecutor() (*DockerExecutor, error) { @@ -73,35 +69,30 @@ func (d *DockerExecutor) CopyFile(containerID string, sourcePath string, destPat return nil } -// GetOutput returns the entirety of the container output as a string -// Used to capturing script results as an output parameter -func (d *DockerExecutor) GetOutput(containerID string) (string, error) { +func (d *DockerExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) { cmd := exec.Command("docker", "logs", containerID) log.Info(cmd.Args) - outBytes, _ := cmd.Output() - return strings.TrimSpace(string(outBytes)), nil -} - -// Wait for the container to complete -func (d *DockerExecutor) Wait(containerID string) error { - return common.RunCommand("docker", "wait", containerID) -} - -// Logs captures the logs of a container to a file -func (d *DockerExecutor) Logs(containerID string, path string) error { - cmd := exec.Command("docker", "logs", containerID) - outfile, err := os.Create(path) + if combinedOutput { + cmd.Stderr = cmd.Stdout + } + reader, err := cmd.StdoutPipe() if err != nil { - return errors.InternalWrapError(err) + return nil, errors.InternalWrapError(err) } - defer util.Close(outfile) - cmd.Stdout = outfile - cmd.Stderr = outfile err = cmd.Start() if err != nil { - return errors.InternalWrapError(err) + return nil, errors.InternalWrapError(err) } - return cmd.Wait() + return reader, nil +} + +func (d *DockerExecutor) WaitInit() error { + return nil +} + +// Wait for the container to complete +func (d *DockerExecutor) Wait(containerID string) error { + return common.RunCommand("docker", "wait", containerID) } // killContainers kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period @@ -120,8 +111,8 @@ func (d *DockerExecutor) Kill(containerIDs []string) error { // waitCmd.Wait() might return error "signal: killed" when we SIGKILL the process // We ignore errors in this case //ignoreWaitError := false - timer := time.AfterFunc(killGracePeriod*time.Second, func() { - log.Infof("Timed out (%ds) for containers to terminate gracefully. Killing forcefully", killGracePeriod) + timer := time.AfterFunc(execcommon.KillGracePeriod*time.Second, func() { + log.Infof("Timed out (%ds) for containers to terminate gracefully. Killing forcefully", execcommon.KillGracePeriod) forceKillArgs := append([]string{"kill", "--signal", "KILL"}, containerIDs...) forceKillCmd := exec.Command("docker", forceKillArgs...) log.Info(forceKillCmd.Args) diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 17561a2b0600..ad15c5439a86 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -19,8 +19,18 @@ import ( "syscall" "time" + argofile "github.com/argoproj/pkg/file" + log "github.com/sirupsen/logrus" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util/archive" "github.com/argoproj/argo/util/retry" artifact "github.com/argoproj/argo/workflow/artifacts" "github.com/argoproj/argo/workflow/artifacts/artifactory" @@ -30,12 +40,11 @@ import ( "github.com/argoproj/argo/workflow/artifacts/raw" "github.com/argoproj/argo/workflow/artifacts/s3" "github.com/argoproj/argo/workflow/common" - argofile "github.com/argoproj/pkg/file" - log "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" +) + +const ( + // This directory temporarily stores the tarballs of the artifacts before uploading + tempOutArtDir = "/argo/outputs/artifacts" ) // WorkflowExecutor is program which runs as the init/wait container @@ -69,28 +78,30 @@ type ContainerRuntimeExecutor interface { // CopyFile copies a source file in a container to a local path CopyFile(containerID string, sourcePath string, destPath string) error - // GetOutput returns the entirety of the container output as a string - // Used to capturing script results as an output parameter - GetOutput(containerID string) (string, error) + // GetOutputStream returns the entirety of the container output as a io.Reader + // Used to capture script results as an output parameter, and to archive container logs + GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) - // Wait for the container to complete - Wait(containerID string) error + // WaitInit is called before Wait() to signal the executor about an impending Wait call. + // For most executors this is a noop, and is only used by the the PNS executor + WaitInit() error - // Copy logs to a given path - Logs(containerID string, path string) error + // Wait waits for the container to complete + Wait(containerID string) error // Kill a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period Kill(containerIDs []string) error } // NewExecutor instantiates a new workflow executor -func NewExecutor(clientset kubernetes.Interface, podName, namespace, podAnnotationsPath string, cre ContainerRuntimeExecutor) WorkflowExecutor { +func NewExecutor(clientset kubernetes.Interface, podName, namespace, podAnnotationsPath string, cre ContainerRuntimeExecutor, template wfv1.Template) WorkflowExecutor { return WorkflowExecutor{ PodName: podName, ClientSet: clientset, Namespace: namespace, PodAnnotationsPath: podAnnotationsPath, RuntimeExecutor: cre, + Template: template, memoizedConfigMaps: map[string]string{}, memoizedSecrets: map[string][]byte{}, errors: []error{}, @@ -109,7 +120,7 @@ func (we *WorkflowExecutor) HandleError() { } } -// LoadArtifacts loads aftifacts from location to a container path +// LoadArtifacts loads artifacts from location to a container path func (we *WorkflowExecutor) LoadArtifacts() error { log.Infof("Start loading input artifacts...") @@ -119,7 +130,7 @@ func (we *WorkflowExecutor) LoadArtifacts() error { if !art.HasLocation() { if art.Optional { - log.Warnf("Artifact %s is not supplied. Artifact configured as an optional so, Artifact will be ignored", art.Name) + log.Warnf("Ignoring optional artifact '%s' which was not supplied", art.Name) continue } else { return errors.New("required artifact %s not supplied", art.Name) @@ -144,7 +155,7 @@ func (we *WorkflowExecutor) LoadArtifacts() error { // as opposed to the `input-artifacts` volume that is an implementation detail // unbeknownst to the user. log.Infof("Specified artifact path %s overlaps with volume mount at %s. Extracting to volume mount", art.Path, mnt.MountPath) - artPath = path.Join(common.InitContainerMainFilesystemDir, art.Path) + artPath = path.Join(common.ExecutorMainFilesystemDir, art.Path) } // The artifact is downloaded to a temporary location, after which we determine if @@ -211,15 +222,13 @@ func (we *WorkflowExecutor) SaveArtifacts() error { return err } - // This directory temporarily stores the tarballs of the artifacts before uploading - tempOutArtDir := "/argo/outputs/artifacts" err = os.MkdirAll(tempOutArtDir, os.ModePerm) if err != nil { return errors.InternalWrapError(err) } for i, art := range we.Template.Outputs.Artifacts { - err := we.saveArtifact(tempOutArtDir, mainCtrID, &art) + err := we.saveArtifact(mainCtrID, &art) if err != nil { return err } @@ -228,31 +237,19 @@ func (we *WorkflowExecutor) SaveArtifacts() error { return nil } -func (we *WorkflowExecutor) saveArtifact(tempOutArtDir string, mainCtrID string, art *wfv1.Artifact) error { - log.Infof("Saving artifact: %s", art.Name) +func (we *WorkflowExecutor) saveArtifact(mainCtrID string, art *wfv1.Artifact) error { // Determine the file path of where to find the artifact if art.Path == "" { return errors.InternalErrorf("Artifact %s did not specify a path", art.Name) } - - // fileName is incorporated into the final path when uploading it to the artifact repo - fileName := fmt.Sprintf("%s.tgz", art.Name) - // localArtPath is the final staging location of the file (or directory) which we will pass - // to the SaveArtifacts call - localArtPath := path.Join(tempOutArtDir, fileName) - err := we.RuntimeExecutor.CopyFile(mainCtrID, art.Path, localArtPath) + fileName, localArtPath, err := we.stageArchiveFile(mainCtrID, art) if err != nil { if art.Optional && errors.IsCode(errors.CodeNotFound, err) { - log.Warnf("Error in saving Artifact. Artifact configured as an optional so, Error will be ignored. Error= %v", err) + log.Warnf("Ignoring optional artifact '%s' which does not exist in path '%s': %v", art.Name, art.Path, err) return nil } return err } - fileName, localArtPath, err = stageArchiveFile(fileName, localArtPath, art) - if err != nil { - return err - } - if !art.HasLocation() { // If user did not explicitly set an artifact destination location in the template, // use the default archive location (appended with the filename). @@ -299,7 +296,13 @@ func (we *WorkflowExecutor) saveArtifact(tempOutArtDir string, mainCtrID string, return nil } -func stageArchiveFile(fileName, localArtPath string, art *wfv1.Artifact) (string, string, error) { +// stageArchiveFile stages a path in a container for archiving from the wait sidecar. +// Returns a filename and a local path for the upload. +// The filename is incorporated into the final path when uploading it to the artifact repo. +// The local path is the final staging location of the file (or directory) which we will pass +// to the SaveArtifacts call and may be a directory or file. +func (we *WorkflowExecutor) stageArchiveFile(mainCtrID string, art *wfv1.Artifact) (string, string, error) { + log.Infof("Staging artifact: %s", art.Name) strategy := art.Archive if strategy == nil { // If no strategy is specified, default to the tar strategy @@ -307,44 +310,83 @@ func stageArchiveFile(fileName, localArtPath string, art *wfv1.Artifact) (string Tar: &wfv1.TarStrategy{}, } } - tempOutArtDir := filepath.Dir(localArtPath) - if strategy.None != nil { - log.Info("Disabling archive before upload") - unarchivedArtPath := path.Join(tempOutArtDir, art.Name) - err := untar(localArtPath, unarchivedArtPath) - if err != nil { - return "", "", err + + if !we.isBaseImagePath(art.Path) { + // If we get here, we are uploading an artifact from a mirrored volume mount which the wait + // sidecar has direct access to. We can upload directly from the shared volume mount, + // instead of copying it from the container. + mountedArtPath := filepath.Join(common.ExecutorMainFilesystemDir, art.Path) + log.Infof("Staging %s from mirrored volume mount %s", art.Path, mountedArtPath) + if strategy.None != nil { + fileName := filepath.Base(art.Path) + log.Infof("No compression strategy needed. Staging skipped") + return fileName, mountedArtPath, nil } - // Delete the tarball - err = os.Remove(localArtPath) + fileName := fmt.Sprintf("%s.tgz", art.Name) + localArtPath := filepath.Join(tempOutArtDir, fileName) + f, err := os.Create(localArtPath) if err != nil { return "", "", errors.InternalWrapError(err) } - isDir, err := argofile.IsDirectory(unarchivedArtPath) + w := bufio.NewWriter(f) + err = archive.TarGzToWriter(mountedArtPath, w) if err != nil { - return "", "", errors.InternalWrapError(err) + return "", "", err } - fileName = filepath.Base(art.Path) - if isDir { - localArtPath = unarchivedArtPath - } else { - // If we are uploading a single file, we need to preserve original filename so that - // 1. minio client can infer its mime-type, based on file extension - // 2. the original filename is incorporated into the final path - localArtPath = path.Join(tempOutArtDir, fileName) - err = os.Rename(unarchivedArtPath, localArtPath) - if err != nil { - return "", "", errors.InternalWrapError(err) - } + log.Infof("Successfully staged %s from mirrored volume mount %s", art.Path, mountedArtPath) + return fileName, localArtPath, nil + } + + fileName := fmt.Sprintf("%s.tgz", art.Name) + localArtPath := filepath.Join(tempOutArtDir, fileName) + log.Infof("Copying %s from container base image layer to %s", art.Path, localArtPath) + + err := we.RuntimeExecutor.CopyFile(mainCtrID, art.Path, localArtPath) + if err != nil { + return "", "", err + } + if strategy.Tar != nil { + // NOTE we already tar gzip the file in the executor. So this is a noop. + return fileName, localArtPath, nil + } + // localArtPath now points to a .tgz file, and the archive strategy is *not* tar. We need to untar it + log.Infof("Untaring %s archive before upload", localArtPath) + unarchivedArtPath := path.Join(filepath.Dir(localArtPath), art.Name) + err = untar(localArtPath, unarchivedArtPath) + if err != nil { + return "", "", err + } + // Delete the tarball + err = os.Remove(localArtPath) + if err != nil { + return "", "", errors.InternalWrapError(err) + } + isDir, err := argofile.IsDirectory(unarchivedArtPath) + if err != nil { + return "", "", errors.InternalWrapError(err) + } + fileName = filepath.Base(art.Path) + if isDir { + localArtPath = unarchivedArtPath + } else { + // If we are uploading a single file, we need to preserve original filename so that + // 1. minio client can infer its mime-type, based on file extension + // 2. the original filename is incorporated into the final path + localArtPath = path.Join(tempOutArtDir, fileName) + err = os.Rename(unarchivedArtPath, localArtPath) + if err != nil { + return "", "", errors.InternalWrapError(err) } - } else if strategy.Tar != nil { - // NOTE we already tar gzip the file in the executor. So this is a noop. In the future, if - // we were to support other compression formats (e.g. bzip2) or options, the logic would go - // here, and compression would be moved out of the executors. } + // In the future, if we were to support other compression formats (e.g. bzip2) or options + // the logic would go here, and compression would be moved out of the executors return fileName, localArtPath, nil } +func (we *WorkflowExecutor) isBaseImagePath(path string) bool { + return common.FindOverlappingVolume(&we.Template, path) == nil +} + // SaveParameters will save the content in the specified file path as output parameter value func (we *WorkflowExecutor) SaveParameters() error { if len(we.Template.Outputs.Parameters) == 0 { @@ -363,10 +405,24 @@ func (we *WorkflowExecutor) SaveParameters() error { if param.ValueFrom == nil || param.ValueFrom.Path == "" { continue } - output, err := we.RuntimeExecutor.GetFileContents(mainCtrID, param.ValueFrom.Path) - if err != nil { - return err + + var output string + if we.isBaseImagePath(param.ValueFrom.Path) { + log.Infof("Copying %s from base image layer", param.ValueFrom.Path) + output, err = we.RuntimeExecutor.GetFileContents(mainCtrID, param.ValueFrom.Path) + if err != nil { + return err + } + } else { + log.Infof("Copying %s from from volume mount", param.ValueFrom.Path) + mountedPath := filepath.Join(common.ExecutorMainFilesystemDir, param.ValueFrom.Path) + out, err := ioutil.ReadFile(mountedPath) + if err != nil { + return err + } + output = string(out) } + outputLen := len(output) // Trims off a single newline for user convenience if outputLen > 0 && output[outputLen-1] == '\n' { @@ -395,7 +451,7 @@ func (we *WorkflowExecutor) SaveLogs() (*wfv1.Artifact, error) { } fileName := "main.log" mainLog := path.Join(tempLogsDir, fileName) - err = we.RuntimeExecutor.Logs(mainCtrID, mainLog) + err = we.saveLogToFile(mainCtrID, mainLog) if err != nil { return nil, err } @@ -437,9 +493,26 @@ func (we *WorkflowExecutor) SaveLogs() (*wfv1.Artifact, error) { // GetSecretFromVolMount will retrive the Secrets from VolumeMount func (we *WorkflowExecutor) GetSecretFromVolMount(accessKeyName string, accessKey string) ([]byte, error) { - return ioutil.ReadFile(filepath.Join(common.SecretVolMountPath, accessKeyName, accessKey)) +} +// saveLogToFile saves the entire log output of a container to a local file +func (we *WorkflowExecutor) saveLogToFile(mainCtrID, path string) error { + outFile, err := os.Create(path) + if err != nil { + return errors.InternalWrapError(err) + } + defer func() { _ = outFile.Close() }() + reader, err := we.RuntimeExecutor.GetOutputStream(mainCtrID, true) + if err != nil { + return err + } + defer func() { _ = reader.Close() }() + _, err = io.Copy(outFile, reader) + if err != nil { + return errors.InternalWrapError(err) + } + return nil } // InitDriver initializes an instance of an artifact driver @@ -664,10 +737,21 @@ func (we *WorkflowExecutor) CaptureScriptResult() error { if err != nil { return err } - out, err := we.RuntimeExecutor.GetOutput(mainContainerID) + reader, err := we.RuntimeExecutor.GetOutputStream(mainContainerID, false) if err != nil { return err } + defer func() { _ = reader.Close() }() + bytes, err := ioutil.ReadAll(reader) + if err != nil { + return errors.InternalWrapError(err) + } + out := string(bytes) + // Trims off a single newline for user convenience + outputLen := len(out) + if outputLen > 0 && out[outputLen-1] == '\n' { + out = out[0 : outputLen-1] + } we.Template.Outputs.Result = &out return nil } @@ -692,6 +776,7 @@ func (we *WorkflowExecutor) AnnotateOutputs(logArt *wfv1.Artifact) error { // AddError adds an error to the list of encountered errors durign execution func (we *WorkflowExecutor) AddError(err error) { + log.Errorf("executor error: %+v", err) we.errors = append(we.errors, err) } @@ -762,20 +847,13 @@ func containerID(ctrID string) string { // Wait is the sidecar container logic which waits for the main container to complete. // Also monitors for updates in the pod annotations which may change (e.g. terminate) // Upon completion, kills any sidecars after it finishes. -func (we *WorkflowExecutor) Wait() (err error) { - defer func() { - killSidecarsErr := we.killSidecars() - if killSidecarsErr != nil { - log.Errorf("Failed to kill sidecars: %v", killSidecarsErr) - if err == nil { - // set error only if not already set - err = killSidecarsErr - } - } - }() +func (we *WorkflowExecutor) Wait() error { + err := we.RuntimeExecutor.WaitInit() + if err != nil { + return err + } log.Infof("Waiting on main container") - var mainContainerID string - mainContainerID, err = we.waitMainContainerStart() + mainContainerID, err := we.waitMainContainerStart() if err != nil { return err } @@ -787,33 +865,52 @@ func (we *WorkflowExecutor) Wait() (err error) { go we.monitorDeadline(ctx, annotationUpdatesCh) err = we.RuntimeExecutor.Wait(mainContainerID) + if err != nil { + return err + } log.Infof("Main container completed") - return + return nil } // waitMainContainerStart waits for the main container to start and returns its container ID. func (we *WorkflowExecutor) waitMainContainerStart() (string, error) { for { - ctrStatus, err := we.GetMainContainerStatus() + podsIf := we.ClientSet.CoreV1().Pods(we.Namespace) + fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", we.PodName)) + opts := metav1.ListOptions{ + FieldSelector: fieldSelector.String(), + } + watchIf, err := podsIf.Watch(opts) if err != nil { - return "", err + return "", errors.InternalWrapErrorf(err, "Failed to establish pod watch: %v", err) } - if ctrStatus != nil { - log.Debug(ctrStatus) - if ctrStatus.ContainerID != "" { - we.mainContainerID = containerID(ctrStatus.ContainerID) - return containerID(ctrStatus.ContainerID), nil - } else if ctrStatus.State.Waiting == nil && ctrStatus.State.Running == nil && ctrStatus.State.Terminated == nil { - // status still not ready, wait - time.Sleep(1 * time.Second) - } else if ctrStatus.State.Waiting != nil { - // main container is still in waiting status - time.Sleep(1 * time.Second) - } else { - // main container in running or terminated state but missing container ID - return "", errors.InternalError("Main container ID cannot be found") + for watchEv := range watchIf.ResultChan() { + if watchEv.Type == watch.Error { + return "", errors.InternalErrorf("Pod watch error waiting for main to start: %v", watchEv.Object) + } + pod, ok := watchEv.Object.(*apiv1.Pod) + if !ok { + log.Warnf("Pod watch returned non pod object: %v", watchEv.Object) + continue + } + for _, ctrStatus := range pod.Status.ContainerStatuses { + if ctrStatus.Name == common.MainContainerName { + log.Debug(ctrStatus) + if ctrStatus.ContainerID != "" { + we.mainContainerID = containerID(ctrStatus.ContainerID) + return containerID(ctrStatus.ContainerID), nil + } else if ctrStatus.State.Waiting == nil && ctrStatus.State.Running == nil && ctrStatus.State.Terminated == nil { + // status still not ready, wait + } else if ctrStatus.State.Waiting != nil { + // main container is still in waiting status + } else { + // main container in running or terminated state but missing container ID + return "", errors.InternalError("Main container ID cannot be found") + } + } } } + log.Warnf("Pod watch closed unexpectedly") } } @@ -954,8 +1051,8 @@ func (we *WorkflowExecutor) monitorDeadline(ctx context.Context, annotationsUpda } } -// killSidecars kills any sidecars to the main container -func (we *WorkflowExecutor) killSidecars() error { +// KillSidecars kills any sidecars to the main container +func (we *WorkflowExecutor) KillSidecars() error { if len(we.Template.Sidecars) == 0 { log.Infof("No sidecars") return nil @@ -983,15 +1080,6 @@ func (we *WorkflowExecutor) killSidecars() error { return we.RuntimeExecutor.Kill(sidecarIDs) } -// LoadTemplate reads the template definition from the the Kubernetes downward api annotations volume file -func (we *WorkflowExecutor) LoadTemplate() error { - err := unmarshalAnnotationField(we.PodAnnotationsPath, common.AnnotationKeyTemplate, &we.Template) - if err != nil { - return err - } - return nil -} - // LoadExecutionControl reads the execution control definition from the the Kubernetes downward api annotations volume file func (we *WorkflowExecutor) LoadExecutionControl() error { err := unmarshalAnnotationField(we.PodAnnotationsPath, common.AnnotationKeyExecutionControl, &we.ExecutionControl) @@ -1004,6 +1092,16 @@ func (we *WorkflowExecutor) LoadExecutionControl() error { return nil } +// LoadTemplate reads the template definition from the the Kubernetes downward api annotations volume file +func LoadTemplate(path string) (*wfv1.Template, error) { + var tmpl wfv1.Template + err := unmarshalAnnotationField(path, common.AnnotationKeyTemplate, &tmpl) + if err != nil { + return nil, err + } + return &tmpl, nil +} + // unmarshalAnnotationField unmarshals the value of an annotation key into the supplied interface // from the downward api annotation volume file func unmarshalAnnotationField(filePath string, key string, into interface{}) error { diff --git a/workflow/executor/k8sapi/k8sapi.go b/workflow/executor/k8sapi/k8sapi.go index 6f3fd932f705..8c44ef06e548 100644 --- a/workflow/executor/k8sapi/k8sapi.go +++ b/workflow/executor/k8sapi/k8sapi.go @@ -1,10 +1,13 @@ package k8sapi import ( - "github.com/argoproj/argo/errors" + "io" + log "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + + "github.com/argoproj/argo/errors" ) type K8sAPIExecutor struct { @@ -30,17 +33,16 @@ func (k *K8sAPIExecutor) CopyFile(containerID string, sourcePath string, destPat return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the k8sapi executor.") } -// GetOutput returns the entirety of the container output as a string -// Used to capturing script results as an output parameter -func (k *K8sAPIExecutor) GetOutput(containerID string) (string, error) { +func (k *K8sAPIExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) { log.Infof("Getting output of %s", containerID) - return k.client.getLogs(containerID) + if !combinedOutput { + log.Warn("non combined output unsupported") + } + return k.client.getLogsAsStream(containerID) } -// Logs copies logs to a given path -func (k *K8sAPIExecutor) Logs(containerID, path string) error { - log.Infof("Saving output of %s to %s", containerID, path) - return k.client.saveLogs(containerID, path) +func (k *K8sAPIExecutor) WaitInit() error { + return nil } // Wait for the container to complete diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index 49730c187535..4af47c6836b6 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -15,8 +15,6 @@ import ( "syscall" "time" - "github.com/argoproj/argo/util" - "github.com/argoproj/argo/errors" "github.com/argoproj/argo/workflow/common" execcommon "github.com/argoproj/argo/workflow/executor/common" @@ -127,6 +125,26 @@ func (k *kubeletClient) getPodList() (*v1.PodList, error) { return podList, resp.Body.Close() } +func (k *kubeletClient) GetLogStream(containerID string) (io.ReadCloser, error) { + podList, err := k.getPodList() + if err != nil { + return nil, err + } + for _, pod := range podList.Items { + for _, container := range pod.Status.ContainerStatuses { + if execcommon.GetContainerID(&container) != containerID { + continue + } + resp, err := k.doRequestLogs(pod.Namespace, pod.Name, container.Name) + if err != nil { + return nil, err + } + return resp.Body, nil + } + } + return nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) +} + func (k *kubeletClient) doRequestLogs(namespace, podName, containerName string) (*http.Response, error) { u, err := url.ParseRequestURI(fmt.Sprintf("https://%s/containerLogs/%s/%s/%s", k.kubeletEndpoint, namespace, podName, containerName)) if err != nil { @@ -147,38 +165,6 @@ func (k *kubeletClient) doRequestLogs(namespace, podName, containerName string) return resp, nil } -func (k *kubeletClient) getLogs(namespace, podName, containerName string) (string, error) { - resp, err := k.doRequestLogs(namespace, podName, containerName) - if resp != nil { - defer func() { _ = resp.Body.Close() }() - } - if err != nil { - return "", err - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", errors.InternalWrapError(err) - } - return string(b), resp.Body.Close() -} - -func (k *kubeletClient) saveLogsToFile(namespace, podName, containerName, path string) error { - resp, err := k.doRequestLogs(namespace, podName, containerName) - if resp != nil { - defer func() { _ = resp.Body.Close() }() - } - if err != nil { - return err - } - outFile, err := os.Create(path) - if err != nil { - return errors.InternalWrapError(err) - } - defer util.Close(outFile) - _, err = io.Copy(outFile, resp.Body) - return err -} - func (k *kubeletClient) getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { podList, err := k.getPodList() if err != nil { @@ -195,38 +181,6 @@ func (k *kubeletClient) getContainerStatus(containerID string) (*v1.Pod, *v1.Con return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) } -func (k *kubeletClient) GetContainerLogs(containerID string) (string, error) { - podList, err := k.getPodList() - if err != nil { - return "", errors.InternalWrapError(err) - } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&container) != containerID { - continue - } - return k.getLogs(pod.Namespace, pod.Name, container.Name) - } - } - return "", errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) -} - -func (k *kubeletClient) SaveLogsToFile(containerID, path string) error { - podList, err := k.getPodList() - if err != nil { - return errors.InternalWrapError(err) - } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&container) != containerID { - continue - } - return k.saveLogsToFile(pod.Namespace, pod.Name, container.Name, path) - } - } - return errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) -} - func (k *kubeletClient) exec(u *url.URL) (*url.URL, error) { _, resp, err := k.websocketDialer.Dial(u.String(), k.httpHeader) if resp == nil { diff --git a/workflow/executor/kubelet/kubelet.go b/workflow/executor/kubelet/kubelet.go index 6cd8f9a482f0..cf5115e1b274 100644 --- a/workflow/executor/kubelet/kubelet.go +++ b/workflow/executor/kubelet/kubelet.go @@ -1,8 +1,11 @@ package kubelet import ( - "github.com/argoproj/argo/errors" + "io" + log "github.com/sirupsen/logrus" + + "github.com/argoproj/argo/errors" ) type KubeletExecutor struct { @@ -28,15 +31,15 @@ func (k *KubeletExecutor) CopyFile(containerID string, sourcePath string, destPa return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the kubelet executor.") } -// GetOutput returns the entirety of the container output as a string -// Used to capturing script results as an output parameter -func (k *KubeletExecutor) GetOutput(containerID string) (string, error) { - return k.cli.GetContainerLogs(containerID) +func (k *KubeletExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) { + if !combinedOutput { + log.Warn("non combined output unsupported") + } + return k.cli.GetLogStream(containerID) } -// Logs copies logs to a given path -func (k *KubeletExecutor) Logs(containerID, path string) error { - return k.cli.SaveLogsToFile(containerID, path) +func (k *KubeletExecutor) WaitInit() error { + return nil } // Wait for the container to complete diff --git a/workflow/executor/mocks/ContainerRuntimeExecutor.go b/workflow/executor/mocks/ContainerRuntimeExecutor.go index df574d2da817..55046f8fe877 100644 --- a/workflow/executor/mocks/ContainerRuntimeExecutor.go +++ b/workflow/executor/mocks/ContainerRuntimeExecutor.go @@ -1,6 +1,8 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. + package mocks +import io "io" import mock "github.com/stretchr/testify/mock" // ContainerRuntimeExecutor is an autogenerated mock type for the ContainerRuntimeExecutor type @@ -43,20 +45,22 @@ func (_m *ContainerRuntimeExecutor) GetFileContents(containerID string, sourcePa return r0, r1 } -// GetOutput provides a mock function with given fields: containerID -func (_m *ContainerRuntimeExecutor) GetOutput(containerID string) (string, error) { - ret := _m.Called(containerID) +// GetOutputStream provides a mock function with given fields: containerID, combinedOutput +func (_m *ContainerRuntimeExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) { + ret := _m.Called(containerID, combinedOutput) - var r0 string - if rf, ok := ret.Get(0).(func(string) string); ok { - r0 = rf(containerID) + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(string, bool) io.ReadCloser); ok { + r0 = rf(containerID, combinedOutput) } else { - r0 = ret.Get(0).(string) + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(containerID) + if rf, ok := ret.Get(1).(func(string, bool) error); ok { + r1 = rf(containerID, combinedOutput) } else { r1 = ret.Error(1) } @@ -78,13 +82,13 @@ func (_m *ContainerRuntimeExecutor) Kill(containerIDs []string) error { return r0 } -// Logs provides a mock function with given fields: containerID, path -func (_m *ContainerRuntimeExecutor) Logs(containerID string, path string) error { - ret := _m.Called(containerID, path) +// Wait provides a mock function with given fields: containerID +func (_m *ContainerRuntimeExecutor) Wait(containerID string) error { + ret := _m.Called(containerID) var r0 error - if rf, ok := ret.Get(0).(func(string, string) error); ok { - r0 = rf(containerID, path) + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(containerID) } else { r0 = ret.Error(0) } @@ -92,13 +96,13 @@ func (_m *ContainerRuntimeExecutor) Logs(containerID string, path string) error return r0 } -// Wait provides a mock function with given fields: containerID -func (_m *ContainerRuntimeExecutor) Wait(containerID string) error { - ret := _m.Called(containerID) +// WaitInit provides a mock function with given fields: +func (_m *ContainerRuntimeExecutor) WaitInit() error { + ret := _m.Called() var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(containerID) + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { r0 = ret.Error(0) } diff --git a/workflow/executor/pns/pns.go b/workflow/executor/pns/pns.go new file mode 100644 index 000000000000..3b6412ddf248 --- /dev/null +++ b/workflow/executor/pns/pns.go @@ -0,0 +1,385 @@ +package pns + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "syscall" + "time" + + executil "github.com/argoproj/pkg/exec" + gops "github.com/mitchellh/go-ps" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/util/archive" + "github.com/argoproj/argo/workflow/common" + execcommon "github.com/argoproj/argo/workflow/executor/common" +) + +type PNSExecutor struct { + clientset *kubernetes.Clientset + podName string + namespace string + + // ctrIDToPid maps a containerID to a process ID + ctrIDToPid map[string]int + // pidToCtrID maps a process ID to a container ID + pidToCtrID map[int]string + + // pidFileHandles holds file handles to all root containers + pidFileHandles map[int]*fileInfo + + // thisPID is the pid of this process + thisPID int + // mainPID holds the main container's pid + mainPID int + // mainFS holds a file descriptor to the main filesystem, allowing the executor to access the + // filesystem after the main process exited + mainFS *os.File + // rootFS holds a file descriptor to the root filesystem, allowing the executor to exit out of a chroot + rootFS *os.File + // debug enables additional debugging + debug bool + // hasOutputs indicates if the template has outputs. determines if we need to + hasOutputs bool +} + +type fileInfo struct { + file os.File + info os.FileInfo +} + +func NewPNSExecutor(clientset *kubernetes.Clientset, podName, namespace string, hasOutputs bool) (*PNSExecutor, error) { + thisPID := os.Getpid() + log.Infof("Creating PNS executor (namespace: %s, pod: %s, pid: %d, hasOutputs: %v)", namespace, podName, thisPID, hasOutputs) + if thisPID == 1 { + return nil, errors.New(errors.CodeBadRequest, "process namespace sharing is not enabled on pod") + } + return &PNSExecutor{ + clientset: clientset, + podName: podName, + namespace: namespace, + ctrIDToPid: make(map[string]int), + pidToCtrID: make(map[int]string), + pidFileHandles: make(map[int]*fileInfo), + thisPID: thisPID, + debug: log.GetLevel() == log.DebugLevel, + hasOutputs: hasOutputs, + }, nil +} + +func (p *PNSExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { + err := p.enterChroot() + if err != nil { + return "", err + } + defer func() { _ = p.exitChroot() }() + out, err := ioutil.ReadFile(sourcePath) + if err != nil { + return "", err + } + return string(out), nil +} + +// enterChroot enters chroot of the main container +func (p *PNSExecutor) enterChroot() error { + if p.mainFS == nil { + return errors.InternalErrorf("could not chroot into main for artifact collection: container may have exited too quickly") + } + if err := p.mainFS.Chdir(); err != nil { + return errors.InternalWrapErrorf(err, "failed to chdir to main filesystem: %v", err) + } + err := syscall.Chroot(".") + if err != nil { + return errors.InternalWrapErrorf(err, "failed to chroot to main filesystem: %v", err) + } + return nil +} + +// exitChroot exits chroot +func (p *PNSExecutor) exitChroot() error { + if err := p.rootFS.Chdir(); err != nil { + return errors.InternalWrapError(err) + } + err := syscall.Chroot(".") + if err != nil { + return errors.InternalWrapError(err) + } + return nil +} + +// CopyFile copies a source file in a container to a local path +func (p *PNSExecutor) CopyFile(containerID string, sourcePath string, destPath string) (err error) { + destFile, err := os.Create(destPath) + if err != nil { + return err + } + defer func() { + // exit chroot and close the file. preserve the original error + deferErr := p.exitChroot() + if err == nil && deferErr != nil { + err = errors.InternalWrapError(deferErr) + } + deferErr = destFile.Close() + if err == nil && deferErr != nil { + err = errors.InternalWrapError(deferErr) + } + }() + w := bufio.NewWriter(destFile) + err = p.enterChroot() + if err != nil { + return err + } + + err = archive.TarGzToWriter(sourcePath, w) + if err != nil { + return err + } + + return nil +} + +func (p *PNSExecutor) WaitInit() error { + if !p.hasOutputs { + return nil + } + go p.pollRootProcesses(time.Minute) + // Secure a filehandle on our own root. This is because we will chroot back and forth from + // the main container's filesystem, to our own. + rootFS, err := os.Open("/") + if err != nil { + return errors.InternalWrapError(err) + } + p.rootFS = rootFS + return nil +} + +// Wait for the container to complete +func (p *PNSExecutor) Wait(containerID string) error { + mainPID, err := p.getContainerPID(containerID) + if err != nil { + if !p.hasOutputs { + log.Warnf("Ignoring wait failure: %v. Process assumed to have completed", err) + return nil + } + return err + } + log.Infof("Main pid identified as %d", mainPID) + p.mainPID = mainPID + for pid, f := range p.pidFileHandles { + if pid == p.mainPID { + log.Info("Successfully secured file handle on main container root filesystem") + p.mainFS = &f.file + } else { + log.Infof("Closing root filehandle for non-main pid %d", pid) + _ = f.file.Close() + } + } + if p.mainFS == nil { + log.Warn("Failed to secure file handle on main container's root filesystem. Output artifacts from base image layer will fail") + } + + // wait for pid to complete + log.Infof("Waiting for main pid %d to complete", mainPID) + err = executil.WaitPID(mainPID) + if err != nil { + return err + } + log.Infof("Main pid %d completed", mainPID) + return nil +} + +// pollRootProcesses will poll /proc for root pids (pids without parents) in a tight loop, for the +// purpose of securing an open file handle against /proc//root as soon as possible. +// It opens file handles on all root pids because at this point, we do not yet know which pid is the +// "main" container. +// Polling is necessary because it is not possible to use something like fsnotify against procfs. +func (p *PNSExecutor) pollRootProcesses(timeout time.Duration) { + log.Warnf("Polling root processes (%v)", timeout) + deadline := time.Now().Add(timeout) + for { + p.updateCtrIDMap() + if p.mainFS != nil { + log.Info("Stopped root processes polling due to successful securing of main root fs") + break + } + if time.Now().After(deadline) { + log.Warnf("Polling root processes timed out (%v)", timeout) + break + } + time.Sleep(50 * time.Millisecond) + } +} + +func (p *PNSExecutor) GetOutputStream(containerID string, combinedOutput bool) (io.ReadCloser, error) { + if !combinedOutput { + log.Warn("non combined output unsupported") + } + opts := v1.PodLogOptions{ + Container: common.MainContainerName, + } + return p.clientset.CoreV1().Pods(p.namespace).GetLogs(p.podName, &opts).Stream() +} + +// Kill a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period +func (p *PNSExecutor) Kill(containerIDs []string) error { + var asyncErr error + wg := sync.WaitGroup{} + for _, cid := range containerIDs { + wg.Add(1) + go func(containerID string) { + err := p.killContainer(containerID) + if err != nil && asyncErr != nil { + asyncErr = err + } + wg.Done() + }(cid) + } + wg.Wait() + return asyncErr +} + +func (p *PNSExecutor) killContainer(containerID string) error { + pid, err := p.getContainerPID(containerID) + if err != nil { + log.Warnf("Ignoring kill container failure of %s: %v. Process assumed to have completed", containerID, err) + return nil + } + // On Unix systems, FindProcess always succeeds and returns a Process + // for the given pid, regardless of whether the process exists. + proc, _ := os.FindProcess(pid) + log.Infof("Sending SIGTERM to pid %d", pid) + err = proc.Signal(syscall.SIGTERM) + if err != nil { + log.Warnf("Failed to SIGTERM pid %d: %v", pid, err) + } + + waitPIDOpts := executil.WaitPIDOpts{Timeout: execcommon.KillGracePeriod * time.Second} + err = executil.WaitPID(pid, waitPIDOpts) + if err == nil { + log.Infof("PID %d completed", pid) + return nil + } + if err != executil.ErrWaitPIDTimeout { + return err + } + log.Warnf("Timed out (%v) waiting for pid %d to complete after SIGTERM. Issing SIGKILL", waitPIDOpts.Timeout, pid) + time.Sleep(30 * time.Minute) + err = proc.Signal(syscall.SIGKILL) + if err != nil { + log.Warnf("Failed to SIGKILL pid %d: %v", pid, err) + } + return err +} + +// getContainerPID returns the pid associated with the container id. Returns error if it was unable +// to be determined because no running root processes exist with that container ID +func (p *PNSExecutor) getContainerPID(containerID string) (int, error) { + pid, ok := p.ctrIDToPid[containerID] + if ok { + return pid, nil + } + p.updateCtrIDMap() + pid, ok = p.ctrIDToPid[containerID] + if !ok { + return -1, errors.InternalErrorf("Failed to determine pid for containerID %s: container may have exited too quickly", containerID) + } + return pid, nil +} + +// updateCtrIDMap updates the mapping between container IDs to PIDs +func (p *PNSExecutor) updateCtrIDMap() { + allProcs, err := gops.Processes() + if err != nil { + log.Warnf("Failed to list processes: %v", err) + return + } + for _, proc := range allProcs { + pid := proc.Pid() + if pid == 1 || pid == p.thisPID || proc.PPid() != 0 { + // ignore the pause container, our own pid, and non-root processes + continue + } + + // Useful code for debugging: + if p.debug { + if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/root", pid) + "/etc/os-release"); err == nil { + log.Infof("pid %d: %s", pid, string(data)) + _, _ = parseContainerID(pid) + } + } + + if p.hasOutputs && p.mainFS == nil { + rootPath := fmt.Sprintf("/proc/%d/root", pid) + currInfo, err := os.Stat(rootPath) + if err != nil { + log.Warnf("Failed to stat %s: %v", rootPath, err) + continue + } + log.Infof("pid %d: %v", pid, currInfo) + prevInfo := p.pidFileHandles[pid] + + // Secure the root filehandle of the process. NOTE if the file changed, it means that + // the main container may have switched (e.g. gone from busybox to the user's container) + if prevInfo == nil || !os.SameFile(prevInfo.info, currInfo) { + fs, err := os.Open(rootPath) + if err != nil { + log.Warnf("Failed to open %s: %v", rootPath, err) + continue + } + log.Infof("Secured filehandle on %s", rootPath) + p.pidFileHandles[pid] = &fileInfo{ + info: currInfo, + file: *fs, + } + if prevInfo != nil { + _ = prevInfo.file.Close() + } + } + } + + // Update maps of pids to container ids + if _, ok := p.pidToCtrID[pid]; !ok { + containerID, err := parseContainerID(pid) + if err != nil { + log.Warnf("Failed to identify containerID for process %d", pid) + continue + } + log.Infof("containerID %s mapped to pid %d", containerID, pid) + p.ctrIDToPid[containerID] = pid + p.pidToCtrID[pid] = containerID + } + } +} + +// parseContainerID parses the containerID of a pid +func parseContainerID(pid int) (string, error) { + cgroupPath := fmt.Sprintf("/proc/%d/cgroup", pid) + cgroupFile, err := os.OpenFile(cgroupPath, os.O_RDONLY, os.ModePerm) + if err != nil { + return "", errors.InternalWrapError(err) + } + defer func() { _ = cgroupFile.Close() }() + sc := bufio.NewScanner(cgroupFile) + for sc.Scan() { + // See https://www.systutorials.com/docs/linux/man/5-proc/ for /proc/XX/cgroup format. e.g.: + // 5:cpuacct,cpu,cpuset:/daemons + line := sc.Text() + log.Debugf("pid %d: %s", pid, line) + parts := strings.Split(line, "/") + if len(parts) > 1 { + if containerID := parts[len(parts)-1]; containerID != "" { + // need to check for empty string because the line may look like: 5:rdma:/ + return containerID, nil + } + } + } + return "", errors.InternalErrorf("Failed to parse container ID from %s", cgroupPath) +} diff --git a/workflow/metrics/collector.go b/workflow/metrics/collector.go index 960074d73cc4..40a3d2e30457 100644 --- a/workflow/metrics/collector.go +++ b/workflow/metrics/collector.go @@ -112,16 +112,12 @@ func (wc *workflowCollector) collectWorkflow(ch chan<- prometheus.Metric, wf wfv addGauge(descWorkflowInfo, 1, wf.Spec.Entrypoint, wf.Spec.ServiceAccountName, joinTemplates(wf.Spec.Templates)) - if phase := wf.Status.Phase; phase != "" { - // TODO: we do not have queuing feature yet so are not adding to a 'Pending' guague. - // Uncomment when we support queueing. - //addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodePending), string(wfv1.NodePending)) - addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodeRunning), string(wfv1.NodeRunning)) - addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodeSucceeded), string(wfv1.NodeSucceeded)) - addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodeSkipped), string(wfv1.NodeSkipped)) - addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodeFailed), string(wfv1.NodeFailed)) - addGauge(descWorkflowStatusPhase, boolFloat64(phase == wfv1.NodeError), string(wfv1.NodeError)) - } + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodePending || wf.Status.Phase == ""), string(wfv1.NodePending)) + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodeRunning), string(wfv1.NodeRunning)) + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodeSucceeded), string(wfv1.NodeSucceeded)) + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodeSkipped), string(wfv1.NodeSkipped)) + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodeFailed), string(wfv1.NodeFailed)) + addGauge(descWorkflowStatusPhase, boolFloat64(wf.Status.Phase == wfv1.NodeError), string(wfv1.NodeError)) if !wf.CreationTimestamp.IsZero() { addGauge(descWorkflowCreated, float64(wf.CreationTimestamp.Unix())) diff --git a/workflow/util/util.go b/workflow/util/util.go index fc25a6198662..842939aa3265 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -26,6 +26,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apis/workflow" @@ -239,7 +240,7 @@ func SubmitWorkflow(wfIf v1alpha1.WorkflowInterface, wf *wfv1.Workflow, opts *Su wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *opts.OwnerReference)) } - err := validate.ValidateWorkflow(wf) + err := validate.ValidateWorkflow(wf, validate.ValidateOpts{}) if err != nil { return nil, err } @@ -257,8 +258,7 @@ func SuspendWorkflow(wfIf v1alpha1.WorkflowInterface, workflowName string) error return false, errSuspendedCompletedWorkflow } if wf.Spec.Suspend == nil || *wf.Spec.Suspend != true { - t := true - wf.Spec.Suspend = &t + wf.Spec.Suspend = pointer.BoolPtr(true) wf, err = wfIf.Update(wf) if err != nil { if apierr.IsConflict(err) { diff --git a/workflow/validate/lint.go b/workflow/validate/lint.go index e14414e6cce6..d2b0d9327c73 100644 --- a/workflow/validate/lint.go +++ b/workflow/validate/lint.go @@ -5,11 +5,12 @@ import ( "os" "path/filepath" + "github.com/argoproj/pkg/json" + "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apis/workflow" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/workflow/common" - "github.com/argoproj/pkg/json" ) // LintWorkflowDir validates all workflow manifests in a directory. Ignores non-workflow manifests @@ -60,7 +61,7 @@ func LintWorkflowFile(filePath string, strict bool) error { return errors.Errorf(errors.CodeBadRequest, "%s failed to parse: %v", filePath, err) } for _, wf := range workflows { - err = ValidateWorkflow(&wf, true) + err = ValidateWorkflow(&wf, ValidateOpts{Lint: true}) if err != nil { return errors.Errorf(errors.CodeBadRequest, "%s: %s", filePath, err.Error()) } diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index acaabc593eae..007ae4b7d86a 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -8,16 +8,31 @@ import ( "regexp" "strings" + "github.com/valyala/fasttemplate" + apivalidation "k8s.io/apimachinery/pkg/util/validation" + "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/workflow/artifacts/hdfs" "github.com/argoproj/argo/workflow/common" - "github.com/valyala/fasttemplate" - apivalidation "k8s.io/apimachinery/pkg/util/validation" ) +// ValidateOpts provides options when linting +type ValidateOpts struct { + // Lint indicates if this is performing validation in the context of linting. If true, will + // skip some validations which is permissible during linting but not submission (e.g. missing + // input parameters to the workflow) + Lint bool + // ContainerRuntimeExecutor will trigger additional validation checks specific to different + // types of executors. For example, the inability of kubelet/k8s executors to copy artifacts + // out of the base image layer. If unspecified, will use docker executor validation + ContainerRuntimeExecutor string +} + // wfValidationCtx is the context for validating a workflow spec type wfValidationCtx struct { + ValidateOpts + wf *wfv1.Workflow // globalParams keeps track of variables which are available the global // scope and can be referenced from anywhere. @@ -36,21 +51,19 @@ const ( anyItemMagicValue = "item.*" ) -// ValidateWorkflow accepts a workflow and performs validation against it. If lint is specified as -// true, will skip some validations which is permissible during linting but not submission -func ValidateWorkflow(wf *wfv1.Workflow, lint ...bool) error { +// ValidateWorkflow accepts a workflow and performs validation against it. +func ValidateWorkflow(wf *wfv1.Workflow, opts ValidateOpts) error { ctx := wfValidationCtx{ + ValidateOpts: opts, wf: wf, globalParams: make(map[string]string), results: make(map[string]bool), } - linting := len(lint) > 0 && lint[0] - err := validateWorkflowFieldNames(wf.Spec.Templates) if err != nil { return errors.Errorf(errors.CodeBadRequest, "spec.templates%s", err.Error()) } - if linting { + if ctx.Lint { // if we are just linting we don't care if spec.arguments.parameters.XXX doesn't have an // explicit value. workflows without a default value is a desired use case err = validateArgumentsFieldNames("spec.arguments.", wf.Spec.Arguments) @@ -154,6 +167,10 @@ func (ctx *wfValidationCtx) validateTemplate(tmpl *wfv1.Template, args wfv1.Argu if err != nil { return err } + err = ctx.validateBaseImageOutputs(tmpl) + if err != nil { + return err + } if tmpl.ArchiveLocation != nil { err = validateArtifactLocation("templates.archiveLocation", *tmpl.ArchiveLocation) if err != nil { @@ -551,6 +568,51 @@ func validateOutputs(scope map[string]interface{}, tmpl *wfv1.Template) error { return nil } +// validateBaseImageOutputs detects if the template contains an output from +func (ctx *wfValidationCtx) validateBaseImageOutputs(tmpl *wfv1.Template) error { + switch ctx.ContainerRuntimeExecutor { + case "", common.ContainerRuntimeExecutorDocker: + // docker executor supports all modes of artifact outputs + case common.ContainerRuntimeExecutorPNS: + // pns supports copying from the base image, but only if there is no volume mount underneath it + errMsg := "pns executor does not support outputs from base image layer with volume mounts. must use emptyDir" + for _, out := range tmpl.Outputs.Artifacts { + if common.FindOverlappingVolume(tmpl, out.Path) == nil { + // output is in the base image layer. need to verify there are no volume mounts under it + if tmpl.Container != nil { + for _, volMnt := range tmpl.Container.VolumeMounts { + if strings.HasPrefix(volMnt.MountPath, out.Path+"/") { + return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.artifacts.%s: %s", tmpl.Name, out.Name, errMsg) + } + } + + } + if tmpl.Script != nil { + for _, volMnt := range tmpl.Container.VolumeMounts { + if strings.HasPrefix(volMnt.MountPath, out.Path+"/") { + return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.artifacts.%s: %s", tmpl.Name, out.Name, errMsg) + } + } + } + } + } + case common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorKubelet: + // for kubelet/k8s fail validation if we detect artifact is copied from base image layer + errMsg := fmt.Sprintf("%s executor does not support outputs from base image layer. must use emptyDir", ctx.ContainerRuntimeExecutor) + for _, out := range tmpl.Outputs.Artifacts { + if common.FindOverlappingVolume(tmpl, out.Path) == nil { + return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.artifacts.%s: %s", tmpl.Name, out.Name, errMsg) + } + } + for _, out := range tmpl.Outputs.Parameters { + if out.ValueFrom != nil && common.FindOverlappingVolume(tmpl, out.ValueFrom.Path) == nil { + return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.parameters.%s: %s", tmpl.Name, out.Name, errMsg) + } + } + } + return nil +} + // validateOutputParameter verifies that only one of valueFrom is defined in an output func validateOutputParameter(paramRef string, param *wfv1.Parameter) error { if param.ValueFrom == nil { diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index fd9eee10bc72..25774083052b 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -3,17 +3,19 @@ package validate import ( "testing" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/test" "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" + + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/test" + "github.com/argoproj/argo/workflow/common" ) // validate is a test helper to accept YAML as a string and return // its validation result. func validate(yamlStr string) error { wf := unmarshalWf(yamlStr) - return ValidateWorkflow(wf) + return ValidateWorkflow(wf, ValidateOpts{}) } func unmarshalWf(yamlStr string) *wfv1.Workflow { @@ -819,13 +821,13 @@ spec: func TestVolumeMountArtifactPathCollision(t *testing.T) { // ensure we detect and reject path collisions wf := unmarshalWf(volumeMountArtifactPathCollision) - err := ValidateWorkflow(wf) + err := ValidateWorkflow(wf, ValidateOpts{}) if assert.NotNil(t, err) { assert.Contains(t, err.Error(), "already mounted") } // tweak the mount path and validation should now be successful wf.Spec.Templates[0].Container.VolumeMounts[0].MountPath = "/differentpath" - err = ValidateWorkflow(wf) + err = ValidateWorkflow(wf, ValidateOpts{}) assert.Nil(t, err) } @@ -1111,7 +1113,7 @@ func TestPodNameVariable(t *testing.T) { } func TestGlobalParamWithVariable(t *testing.T) { - err := ValidateWorkflow(test.LoadE2EWorkflow("functional/global-outputs-variable.yaml")) + err := ValidateWorkflow(test.LoadE2EWorkflow("functional/global-outputs-variable.yaml"), ValidateOpts{}) assert.Nil(t, err) } @@ -1136,9 +1138,9 @@ spec: // TestSpecArgumentNoValue we allow parameters to have no value at the spec level during linting func TestSpecArgumentNoValue(t *testing.T) { wf := unmarshalWf(specArgumentNoValue) - err := ValidateWorkflow(wf, true) + err := ValidateWorkflow(wf, ValidateOpts{Lint: true}) assert.Nil(t, err) - err = ValidateWorkflow(wf) + err = ValidateWorkflow(wf, ValidateOpts{}) assert.NotNil(t, err) } @@ -1173,7 +1175,7 @@ spec: // TestSpecArgumentSnakeCase we allow parameter and artifact names to be snake case func TestSpecArgumentSnakeCase(t *testing.T) { wf := unmarshalWf(specArgumentSnakeCase) - err := ValidateWorkflow(wf, true) + err := ValidateWorkflow(wf, ValidateOpts{Lint: true}) assert.Nil(t, err) } @@ -1203,13 +1205,12 @@ spec: container: image: alpine:latest command: [echo, "{{inputs.parameters.num}}"] - ` // TestSpecBadSequenceCountAndEnd verifies both count and end cannot be defined func TestSpecBadSequenceCountAndEnd(t *testing.T) { wf := unmarshalWf(specBadSequenceCountAndEnd) - err := ValidateWorkflow(wf, true) + err := ValidateWorkflow(wf, ValidateOpts{Lint: true}) assert.Error(t, err) } @@ -1229,6 +1230,129 @@ spec: // TestCustomTemplatVariable verifies custom template variable func TestCustomTemplatVariable(t *testing.T) { wf := unmarshalWf(customVariableInput) - err := ValidateWorkflow(wf, true) + err := ValidateWorkflow(wf, ValidateOpts{Lint: true}) assert.Equal(t, err, nil) } + +var baseImageOutputArtifact = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: base-image-out-art- +spec: + entrypoint: base-image-out-art + templates: + - name: base-image-out-art + container: + image: alpine:latest + command: [echo, hello] + outputs: + artifacts: + - name: tmp + path: /tmp +` + +var baseImageOutputParameter = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: base-image-out-art- +spec: + entrypoint: base-image-out-art + templates: + - name: base-image-out-art + container: + image: alpine:latest + command: [echo, hello] + outputs: + parameters: + - name: tmp + valueFrom: + path: /tmp/file +` + +var volumeMountOutputArtifact = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: base-image-out-art- +spec: + entrypoint: base-image-out-art + volumes: + - name: workdir + emptyDir: {} + templates: + - name: base-image-out-art + container: + image: alpine:latest + command: [echo, hello] + volumeMounts: + - name: workdir + mountPath: /mnt/vol + outputs: + artifacts: + - name: workdir + path: /mnt/vol +` + +var baseImageDirWithEmptyDirOutputArtifact = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: base-image-out-art- +spec: + entrypoint: base-image-out-art + volumes: + - name: workdir + emptyDir: {} + templates: + - name: base-image-out-art + container: + image: alpine:latest + command: [echo, hello] + volumeMounts: + - name: workdir + mountPath: /mnt/vol + outputs: + artifacts: + - name: workdir + path: /mnt +` + +// TestBaseImageOutputVerify verifies we error when we detect the condition when the container +// runtime executor doesn't support output artifacts from a base image layer, and fails validation +func TestBaseImageOutputVerify(t *testing.T) { + wfBaseOutArt := unmarshalWf(baseImageOutputArtifact) + wfBaseOutParam := unmarshalWf(baseImageOutputParameter) + wfEmptyDirOutArt := unmarshalWf(volumeMountOutputArtifact) + wfBaseWithEmptyDirOutArt := unmarshalWf(baseImageDirWithEmptyDirOutputArtifact) + var err error + + for _, executor := range []string{common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorPNS, common.ContainerRuntimeExecutorDocker, ""} { + switch executor { + case common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorKubelet: + err = ValidateWorkflow(wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.Error(t, err) + err = ValidateWorkflow(wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.Error(t, err) + err = ValidateWorkflow(wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.Error(t, err) + case common.ContainerRuntimeExecutorPNS: + err = ValidateWorkflow(wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + err = ValidateWorkflow(wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + err = ValidateWorkflow(wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.Error(t, err) + case common.ContainerRuntimeExecutorDocker, "": + err = ValidateWorkflow(wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + err = ValidateWorkflow(wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + err = ValidateWorkflow(wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + } + err = ValidateWorkflow(wfEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + assert.NoError(t, err) + } +} From 40f9a87593d312a46f7fa24aaf32e125458cc701 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 10 Apr 2019 03:04:25 -0700 Subject: [PATCH 116/145] Reorganize manifests to kustomize 2 and update version to v2.3.0-rc1 --- VERSION | 2 +- api/openapi-spec/swagger.json | 2 +- hack/update-manifests.sh | 25 +++++++++++++------ .../argo-ui-deployment.yaml} | 4 +-- .../argo-ui-sa.yaml} | 0 .../argo-ui-service.yaml} | 0 manifests/base/argo-ui/kustomization.yaml | 7 ++++++ manifests/base/crds/kustomization.yaml | 5 ++++ .../workflow-crd.yaml} | 0 manifests/base/kustomization.yaml | 15 +++++++++++ .../workflow-controller/kustomization.yaml | 7 ++++++ .../workflow-controller-configmap.yaml} | 0 .../workflow-controller-deployment.yaml} | 6 ++--- .../workflow-controller-sa.yaml} | 0 .../argo-ui-rbac/argo-ui-clusterrole.yaml} | 0 .../argo-ui-clusterrolebinding.yaml} | 0 .../argo-ui-rbac/kustomization.yaml | 6 +++++ manifests/cluster-install/kustomization.yaml | 20 +++++---------- .../kustomization.yaml | 7 ++++++ .../workflow-aggregate-roles.yaml} | 0 .../workflow-controller-clusterrole.yaml} | 0 ...rkflow-controller-clusterrolebinding.yaml} | 0 manifests/install.yaml | 16 ++++-------- manifests/namespace-install.yaml | 12 +++++---- .../argo-ui-role.yaml} | 0 .../argo-ui-rolebinding.yaml} | 0 .../argo-ui-rbac/kustomization.yaml | 6 +++++ .../namespace-install/kustomization.yaml | 24 ++++++++---------- ...eployment.yaml => argo-ui-deployment.yaml} | 2 +- .../workflow-controller-configmap.yaml} | 0 .../kustomization.yaml | 6 +++++ .../workflow-controller-role.yaml} | 0 .../workflow-controller-rolebinding.yaml} | 0 33 files changed, 112 insertions(+), 60 deletions(-) rename manifests/base/{03d_argo-ui-deployment.yaml => argo-ui/argo-ui-deployment.yaml} (89%) rename manifests/base/{03a_argo-ui-sa.yaml => argo-ui/argo-ui-sa.yaml} (100%) rename manifests/base/{03e_argo-ui-service.yaml => argo-ui/argo-ui-service.yaml} (100%) create mode 100644 manifests/base/argo-ui/kustomization.yaml create mode 100644 manifests/base/crds/kustomization.yaml rename manifests/base/{01a_workflow-crd.yaml => crds/workflow-crd.yaml} (100%) create mode 100644 manifests/base/kustomization.yaml create mode 100644 manifests/base/workflow-controller/kustomization.yaml rename manifests/base/{02d_workflow-controller-configmap.yaml => workflow-controller/workflow-controller-configmap.yaml} (100%) rename manifests/base/{02e_workflow-controller-deployment.yaml => workflow-controller/workflow-controller-deployment.yaml} (79%) rename manifests/base/{02a_workflow-controller-sa.yaml => workflow-controller/workflow-controller-sa.yaml} (100%) rename manifests/{base/03b_argo-ui-clusterrole.yaml => cluster-install/argo-ui-rbac/argo-ui-clusterrole.yaml} (100%) rename manifests/{base/03c_argo-ui-clusterrolebinding.yaml => cluster-install/argo-ui-rbac/argo-ui-clusterrolebinding.yaml} (100%) create mode 100644 manifests/cluster-install/argo-ui-rbac/kustomization.yaml create mode 100644 manifests/cluster-install/workflow-controller-rbac/kustomization.yaml rename manifests/{base/01b_workflow-aggregate-roles.yaml => cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml} (100%) rename manifests/{base/02b_workflow-controller-clusterrole.yaml => cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml} (100%) rename manifests/{base/02c_workflow-controller-clusterrolebinding.yaml => cluster-install/workflow-controller-rbac/workflow-controller-clusterrolebinding.yaml} (100%) rename manifests/namespace-install/{03b_argo-ui-role.yaml => argo-ui-rbac/argo-ui-role.yaml} (100%) rename manifests/namespace-install/{03c_argo-ui-rolebinding.yaml => argo-ui-rbac/argo-ui-rolebinding.yaml} (100%) create mode 100644 manifests/namespace-install/argo-ui-rbac/kustomization.yaml rename manifests/namespace-install/overlays/{03d_argo-ui-deployment.yaml => argo-ui-deployment.yaml} (88%) rename manifests/namespace-install/{02d_workflow-controller-configmap.yaml => overlays/workflow-controller-configmap.yaml} (100%) create mode 100644 manifests/namespace-install/workflow-controller-rbac/kustomization.yaml rename manifests/namespace-install/{02b_workflow-controller-role.yaml => workflow-controller-rbac/workflow-controller-role.yaml} (100%) rename manifests/namespace-install/{02c_workflow-controller-rolebinding.yaml => workflow-controller-rbac/workflow-controller-rolebinding.yaml} (100%) diff --git a/VERSION b/VERSION index 276cbf9e2858..d62e3a5a6d34 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.0 +2.3.0-rc1 diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index c30116a25eda..d06bb86382d5 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Argo", - "version": "v2.3.0" + "version": "v2.3.0-rc1" }, "paths": {}, "definitions": { diff --git a/hack/update-manifests.sh b/hack/update-manifests.sh index b24787e3f489..e73c111ddc61 100755 --- a/hack/update-manifests.sh +++ b/hack/update-manifests.sh @@ -1,12 +1,21 @@ -#!/bin/sh +#!/bin/sh -x -e -IMAGE_NAMESPACE=${IMAGE_NAMESPACE:='argoproj'} -IMAGE_TAG=${IMAGE_TAG:='latest'} +SRCROOT="$( CDPATH='' cd -- "$(dirname "$0")/.." && pwd -P )" +AUTOGENMSG="# This is an auto-generated file. DO NOT EDIT" -autogen_warning="# This is an auto-generated file. DO NOT EDIT" +IMAGE_NAMESPACE="${IMAGE_NAMESPACE:-argoproj}" +IMAGE_TAG="${IMAGE_TAG:-latest}" -echo $autogen_warning > manifests/install.yaml -kustomize build manifests/cluster-install >> manifests/install.yaml +cd ${SRCROOT}/manifests/base && kustomize edit set image \ + argoproj/workflow-controller=${IMAGE_NAMESPACE}/workflow-controller:${IMAGE_TAG} \ + argoproj/argoui=${IMAGE_NAMESPACE}/argoui:${IMAGE_TAG} -echo $autogen_warning > manifests/namespace-install.yaml -kustomize build manifests/namespace-install >> manifests/namespace-install.yaml +echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/install.yaml" +kustomize build "${SRCROOT}/manifests/cluster-install" >> "${SRCROOT}/manifests/install.yaml" +sed -i.bak "s@- .*/argoexec:.*@- ${IMAGE_NAMESPACE}/argoexec:${IMAGE_TAG}@" "${SRCROOT}/manifests/install.yaml" +rm -f "${SRCROOT}/manifests/install.yaml.bak" + +echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/namespace-install.yaml" +kustomize build "${SRCROOT}/manifests/namespace-install" >> "${SRCROOT}/manifests/namespace-install.yaml" +sed -i.bak "s@- .*/argoexec:.*@- ${IMAGE_NAMESPACE}/argoexec:${IMAGE_TAG}@" "${SRCROOT}/manifests/namespace-install.yaml" +rm -f "${SRCROOT}/manifests/namespace-install.yaml.bak" diff --git a/manifests/base/03d_argo-ui-deployment.yaml b/manifests/base/argo-ui/argo-ui-deployment.yaml similarity index 89% rename from manifests/base/03d_argo-ui-deployment.yaml rename to manifests/base/argo-ui/argo-ui-deployment.yaml index eb3ded67da06..d59d3ef046e0 100644 --- a/manifests/base/03d_argo-ui-deployment.yaml +++ b/manifests/base/argo-ui/argo-ui-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: argo-ui @@ -14,7 +14,7 @@ spec: serviceAccountName: argo-ui containers: - name: argo-ui - image: argoproj/argoui:v2.2.1 + image: argoproj/argoui:latest env: - name: ARGO_NAMESPACE valueFrom: diff --git a/manifests/base/03a_argo-ui-sa.yaml b/manifests/base/argo-ui/argo-ui-sa.yaml similarity index 100% rename from manifests/base/03a_argo-ui-sa.yaml rename to manifests/base/argo-ui/argo-ui-sa.yaml diff --git a/manifests/base/03e_argo-ui-service.yaml b/manifests/base/argo-ui/argo-ui-service.yaml similarity index 100% rename from manifests/base/03e_argo-ui-service.yaml rename to manifests/base/argo-ui/argo-ui-service.yaml diff --git a/manifests/base/argo-ui/kustomization.yaml b/manifests/base/argo-ui/kustomization.yaml new file mode 100644 index 000000000000..450d53863931 --- /dev/null +++ b/manifests/base/argo-ui/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- argo-ui-deployment.yaml +- argo-ui-sa.yaml +- argo-ui-service.yaml diff --git a/manifests/base/crds/kustomization.yaml b/manifests/base/crds/kustomization.yaml new file mode 100644 index 000000000000..bf13ea938de3 --- /dev/null +++ b/manifests/base/crds/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- workflow-crd.yaml diff --git a/manifests/base/01a_workflow-crd.yaml b/manifests/base/crds/workflow-crd.yaml similarity index 100% rename from manifests/base/01a_workflow-crd.yaml rename to manifests/base/crds/workflow-crd.yaml diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml new file mode 100644 index 000000000000..c6815667188d --- /dev/null +++ b/manifests/base/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- crds +- workflow-controller +- argo-ui + +images: +- name: argoproj/argoui + newName: argoproj/argoui + newTag: v2.3.0-rc1 +- name: argoproj/workflow-controller + newName: argoproj/workflow-controller + newTag: v2.3.0-rc1 diff --git a/manifests/base/workflow-controller/kustomization.yaml b/manifests/base/workflow-controller/kustomization.yaml new file mode 100644 index 000000000000..609ba2517695 --- /dev/null +++ b/manifests/base/workflow-controller/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- workflow-controller-configmap.yaml +- workflow-controller-deployment.yaml +- workflow-controller-sa.yaml diff --git a/manifests/base/02d_workflow-controller-configmap.yaml b/manifests/base/workflow-controller/workflow-controller-configmap.yaml similarity index 100% rename from manifests/base/02d_workflow-controller-configmap.yaml rename to manifests/base/workflow-controller/workflow-controller-configmap.yaml diff --git a/manifests/base/02e_workflow-controller-deployment.yaml b/manifests/base/workflow-controller/workflow-controller-deployment.yaml similarity index 79% rename from manifests/base/02e_workflow-controller-deployment.yaml rename to manifests/base/workflow-controller/workflow-controller-deployment.yaml index 5236c6415439..4c96b0e8e286 100644 --- a/manifests/base/02e_workflow-controller-deployment.yaml +++ b/manifests/base/workflow-controller/workflow-controller-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: workflow-controller @@ -14,11 +14,11 @@ spec: serviceAccountName: argo containers: - name: workflow-controller - image: argoproj/workflow-controller:v2.2.1 + image: argoproj/workflow-controller:latest command: - workflow-controller args: - --configmap - workflow-controller-configmap - --executor-image - - argoproj/argoexec:v2.2.1 + - argoproj/argoexec:latest diff --git a/manifests/base/02a_workflow-controller-sa.yaml b/manifests/base/workflow-controller/workflow-controller-sa.yaml similarity index 100% rename from manifests/base/02a_workflow-controller-sa.yaml rename to manifests/base/workflow-controller/workflow-controller-sa.yaml diff --git a/manifests/base/03b_argo-ui-clusterrole.yaml b/manifests/cluster-install/argo-ui-rbac/argo-ui-clusterrole.yaml similarity index 100% rename from manifests/base/03b_argo-ui-clusterrole.yaml rename to manifests/cluster-install/argo-ui-rbac/argo-ui-clusterrole.yaml diff --git a/manifests/base/03c_argo-ui-clusterrolebinding.yaml b/manifests/cluster-install/argo-ui-rbac/argo-ui-clusterrolebinding.yaml similarity index 100% rename from manifests/base/03c_argo-ui-clusterrolebinding.yaml rename to manifests/cluster-install/argo-ui-rbac/argo-ui-clusterrolebinding.yaml diff --git a/manifests/cluster-install/argo-ui-rbac/kustomization.yaml b/manifests/cluster-install/argo-ui-rbac/kustomization.yaml new file mode 100644 index 000000000000..1a7199bc9042 --- /dev/null +++ b/manifests/cluster-install/argo-ui-rbac/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- argo-ui-clusterrole.yaml +- argo-ui-clusterrolebinding.yaml diff --git a/manifests/cluster-install/kustomization.yaml b/manifests/cluster-install/kustomization.yaml index d6d5c37b94a0..e27a756f4746 100644 --- a/manifests/cluster-install/kustomization.yaml +++ b/manifests/cluster-install/kustomization.yaml @@ -1,15 +1,7 @@ -namespace: argo +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization -resources: -- ../base/01a_workflow-crd.yaml -- ../base/01b_workflow-aggregate-roles.yaml -- ../base/02a_workflow-controller-sa.yaml -- ../base/02b_workflow-controller-clusterrole.yaml -- ../base/02c_workflow-controller-clusterrolebinding.yaml -- ../base/02d_workflow-controller-configmap.yaml -- ../base/02e_workflow-controller-deployment.yaml -- ../base/03a_argo-ui-sa.yaml -- ../base/03b_argo-ui-clusterrole.yaml -- ../base/03c_argo-ui-clusterrolebinding.yaml -- ../base/03d_argo-ui-deployment.yaml -- ../base/03e_argo-ui-service.yaml +bases: +- ../base +- ./workflow-controller-rbac +- ./argo-ui-rbac diff --git a/manifests/cluster-install/workflow-controller-rbac/kustomization.yaml b/manifests/cluster-install/workflow-controller-rbac/kustomization.yaml new file mode 100644 index 000000000000..e941cf876ad0 --- /dev/null +++ b/manifests/cluster-install/workflow-controller-rbac/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- workflow-aggregate-roles.yaml +- workflow-controller-clusterrole.yaml +- workflow-controller-clusterrolebinding.yaml diff --git a/manifests/base/01b_workflow-aggregate-roles.yaml b/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml similarity index 100% rename from manifests/base/01b_workflow-aggregate-roles.yaml rename to manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml diff --git a/manifests/base/02b_workflow-controller-clusterrole.yaml b/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml similarity index 100% rename from manifests/base/02b_workflow-controller-clusterrole.yaml rename to manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml diff --git a/manifests/base/02c_workflow-controller-clusterrolebinding.yaml b/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrolebinding.yaml similarity index 100% rename from manifests/base/02c_workflow-controller-clusterrolebinding.yaml rename to manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrolebinding.yaml diff --git a/manifests/install.yaml b/manifests/install.yaml index 5a12b2a15c17..e8d5afe5d588 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -17,13 +17,11 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo-ui - namespace: argo --- apiVersion: v1 kind: ServiceAccount metadata: name: argo - namespace: argo --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -192,13 +190,11 @@ apiVersion: v1 kind: ConfigMap metadata: name: workflow-controller-configmap - namespace: argo --- apiVersion: v1 kind: Service metadata: name: argo-ui - namespace: argo spec: ports: - port: 80 @@ -206,11 +202,10 @@ spec: selector: app: argo-ui --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: argo-ui - namespace: argo spec: selector: matchLabels: @@ -233,15 +228,14 @@ spec: value: "false" - name: BASE_HREF value: / - image: argoproj/argoui:v2.2.1 + image: argoproj/argoui:v2.3.0-rc1 name: argo-ui serviceAccountName: argo-ui --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: workflow-controller - namespace: argo spec: selector: matchLabels: @@ -256,9 +250,9 @@ spec: - --configmap - workflow-controller-configmap - --executor-image - - argoproj/argoexec:v2.2.1 + - argoproj/argoexec:v2.3.0-rc1 command: - workflow-controller - image: argoproj/workflow-controller:v2.2.1 + image: argoproj/workflow-controller:v2.3.0-rc1 name: workflow-controller serviceAccountName: argo diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 8453fec54ca2..7b55936496c2 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -142,7 +142,7 @@ spec: selector: app: argo-ui --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: argo-ui @@ -157,6 +157,8 @@ spec: spec: containers: - env: + - name: FORCE_NAMESPACE_ISOLATION + value: "true" - name: ARGO_NAMESPACE valueFrom: fieldRef: @@ -168,11 +170,11 @@ spec: value: "false" - name: BASE_HREF value: / - image: argoproj/argoui:v2.2.1 + image: argoproj/argoui:v2.3.0-rc1 name: argo-ui serviceAccountName: argo-ui --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: workflow-controller @@ -190,9 +192,9 @@ spec: - --configmap - workflow-controller-configmap - --executor-image - - argoproj/argoexec:v2.2.1 + - argoproj/argoexec:v2.3.0-rc1 command: - workflow-controller - image: argoproj/workflow-controller:v2.2.1 + image: argoproj/workflow-controller:v2.3.0-rc1 name: workflow-controller serviceAccountName: argo diff --git a/manifests/namespace-install/03b_argo-ui-role.yaml b/manifests/namespace-install/argo-ui-rbac/argo-ui-role.yaml similarity index 100% rename from manifests/namespace-install/03b_argo-ui-role.yaml rename to manifests/namespace-install/argo-ui-rbac/argo-ui-role.yaml diff --git a/manifests/namespace-install/03c_argo-ui-rolebinding.yaml b/manifests/namespace-install/argo-ui-rbac/argo-ui-rolebinding.yaml similarity index 100% rename from manifests/namespace-install/03c_argo-ui-rolebinding.yaml rename to manifests/namespace-install/argo-ui-rbac/argo-ui-rolebinding.yaml diff --git a/manifests/namespace-install/argo-ui-rbac/kustomization.yaml b/manifests/namespace-install/argo-ui-rbac/kustomization.yaml new file mode 100644 index 000000000000..a74d8797078e --- /dev/null +++ b/manifests/namespace-install/argo-ui-rbac/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- argo-ui-role.yaml +- argo-ui-rolebinding.yaml diff --git a/manifests/namespace-install/kustomization.yaml b/manifests/namespace-install/kustomization.yaml index 5533dbdabe13..038b641dab51 100644 --- a/manifests/namespace-install/kustomization.yaml +++ b/manifests/namespace-install/kustomization.yaml @@ -1,15 +1,11 @@ -resources: -- ../base/01a_workflow-crd.yaml -- ../base/02a_workflow-controller-sa.yaml -- ./02b_workflow-controller-role.yaml -- ./02c_workflow-controller-rolebinding.yaml -- ./02d_workflow-controller-configmap.yaml -- ../base/02e_workflow-controller-deployment.yaml -- ../base/03a_argo-ui-sa.yaml -- ./03b_argo-ui-role.yaml -- ./03c_argo-ui-rolebinding.yaml -- ../base/03d_argo-ui-deployment.yaml -- ../base/03e_argo-ui-service.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization -patches: -- ./overlays/03d_argo-ui-deployment.yaml +bases: +- ../base +- ./workflow-controller-rbac +- ./argo-ui-rbac + +patchesStrategicMerge: +- ./overlays/workflow-controller-configmap.yaml +- ./overlays/argo-ui-deployment.yaml diff --git a/manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml b/manifests/namespace-install/overlays/argo-ui-deployment.yaml similarity index 88% rename from manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml rename to manifests/namespace-install/overlays/argo-ui-deployment.yaml index 1efc2479f270..47582b47e996 100644 --- a/manifests/namespace-install/overlays/03d_argo-ui-deployment.yaml +++ b/manifests/namespace-install/overlays/argo-ui-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: argo-ui diff --git a/manifests/namespace-install/02d_workflow-controller-configmap.yaml b/manifests/namespace-install/overlays/workflow-controller-configmap.yaml similarity index 100% rename from manifests/namespace-install/02d_workflow-controller-configmap.yaml rename to manifests/namespace-install/overlays/workflow-controller-configmap.yaml diff --git a/manifests/namespace-install/workflow-controller-rbac/kustomization.yaml b/manifests/namespace-install/workflow-controller-rbac/kustomization.yaml new file mode 100644 index 000000000000..f7b23b2b0bc5 --- /dev/null +++ b/manifests/namespace-install/workflow-controller-rbac/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- workflow-controller-role.yaml +- workflow-controller-rolebinding.yaml diff --git a/manifests/namespace-install/02b_workflow-controller-role.yaml b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml similarity index 100% rename from manifests/namespace-install/02b_workflow-controller-role.yaml rename to manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml diff --git a/manifests/namespace-install/02c_workflow-controller-rolebinding.yaml b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml similarity index 100% rename from manifests/namespace-install/02c_workflow-controller-rolebinding.yaml rename to manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml From 1c729a72a2ae431623332b65646c97cb689eab01 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Wed, 10 Apr 2019 09:50:32 -0700 Subject: [PATCH 117/145] Update v2.3.0 CHANGELOG.md --- CHANGELOG.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ecd34f6c15b..1f44f4ab2320 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,63 @@ # Changelog -## 2.3.0 (Not Yet Released) +## 2.3.0-rc1 (2019-04-10) ++ Support for PNS (Process Namespace Sharing) executor (#1214) ++ Support for K8s API based Executor (#1010) (@dtaniwaki) ++ Adds limited support for Kubelet/K8s API artifact collection by mirroring volume mounts to wait sidecar ++ Support HDFS Artifact (#1159) (@dtaniwaki) ++ System level workflow parallelism limits & priorities (#1065) ++ Support larger workflows through node status compression (#1264) ++ Support nested steps workflow parallelism (#1046) (@WeiTang114) ++ Add feature to continue workflow on failed/error steps/tasks (#1205) (@schrodit) ++ Parameter and Argument names should support snake case (#1048) (@bbc88ks) ++ Add support for ppc64le and s390x (#1102) (@chenzhiwei) ++ Install mime-support in argoexec to set proper mime types for S3 artifacts ++ Allow owner reference to be set in submit util (#1120) (@nareshku) ++ add support for hostNetwork & dnsPolicy config (#1161) (@Dreamheart) ++ Add schedulerName to workflow and template spec (#1184) (@houz42) ++ Executor can access the k8s apiserver with a out-of-cluster config file (@houz42) ++ Proxy Priority and PriorityClassName to pods (#1179) (@dtaniwaki) ++ Add the `mergeStrategy` option to resource patching (#1269) (@ian-howell) ++ Add workflow labels and annotations global vars (#1280) (@discordianfish) ++ Support for optional input/output artifacts (#1277) ++ Add dns config support (#1301) (@xianlubird) ++ Added support for artifact path references (#1300) (@Ark-kun) ++ Add support for init containers (#1183) (@dtaniwaki) ++ Secrets should be passed to pods using volumes instead of API calls (#1302) ++ Azure AKS authentication issues #1079 (@gerardaus) + +* Update dependencies to K8s v1.12 and client-go 9.0 +* Add namespace explicitly to pod metadata (#1059) (@dvavili) +* Raise not implemented error when artifact saving is unsupported (#1062) (@dtaniwaki) +* Retry logic to s3 load and save function (#1082) (@kshamajain99) +* Remove docker_lib mount volume which is not needed anymore (#1115) (@ywskycn) +* Documentation improvements and fixes (@protochron, @jmcarp, @locona, @kivio, @fischerjulian, @annawinkler, @jdfalko, @groodt, @migggy, @nstott, @adrienjt) +* Validate ArchiveLocation artifacts (#1167) (@dtaniwaki) +* Git cloning via SSH was not verifying host public key (#1261) +* Speed up podReconciliation using parallel goroutine (#1286) (@xianlubird) + +- Initialize child node before marking phase. Fixes panic on invalid `When` (#1075) (@jmcarp) +- Submodules are dirty after checkout -- need to update (#1052) (@andreimc) +- Fix output artifact and parameter conflict (#1125) (@Ark-kun) +- Remove container wait timeout from 'argo logs --follow' (#1142) +- Fix panic in ttl controller (#1143) +- Kill daemoned step if workflow consist of single daemoned step (#1144) +- Fix global artifact overwriting in nested workflow (#1086) (@WeiTang114) +- Fix issue where steps with exhausted retires would not complete (#1148) +- Fix metadata for DAG with loops (#1149) +- Replace exponential retry with poll (#1166) (@kzadorozhny) +- Dockerfile: argoexec base image correction (#1213) (@elikatsis) +- Set executor image pull policy for resource template (#1174) (@dtaniwaki) +- fix dag retries (#1221) (@houz42) +- Remove extra quotes around output parameter value (#1232) (@elikatsis) +- Include stderr when retrieving docker logs (#1225) (@shahin) +- Fix the Prometheus address references (#1237) (@spacez320) +- Kubernetes Resource action: patch is not supported (#1245) +- Fake outputs don't notify and task completes successfully (#1247) +- Reduce redundancy pod label action (#1271) (@xianlubird) +- Fix bug with DockerExecutor's CopyFile (#1275) +- Fix for Resource creation where template has same parameter templating (#1283) +- Fixes an issue where daemon steps were not getting terminated properly ### Deprecation Notice The workflow-controller-configmap introduces a new config field, `executor`, which is a container From 950de1b94efc18473a85e1f23c9ed5e6ff75ba93 Mon Sep 17 00:00:00 2001 From: Chris Chambers Date: Thu, 11 Apr 2019 05:03:18 -0400 Subject: [PATCH 118/145] Export the methods of `KubernetesClientInterface` (#1294) All calls to these methods previously generated a panic at runtime because the calls resolved to the default, panic-always implementation, not to the overrides provided by `k8sAPIClient` and `kubeletClient`. Embedding an exported interface with unexported methods into a struct is the only way to implement that interface in another package. When doing this, the compiler generates default, panic-always implementations for all methods from the interface. Implementors can override exported methods, but it's not possible to override an unexported method from the interface. All invocations that go through the interface will come to the default implementation, even if the struct tries to provide an override. --- workflow/executor/common/common.go | 14 +++++++------- workflow/executor/k8sapi/client.go | 16 ++++++++-------- workflow/executor/kubelet/client.go | 10 +++++----- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/workflow/executor/common/common.go b/workflow/executor/common/common.go index 0ce8d251532f..dd218c447305 100644 --- a/workflow/executor/common/common.go +++ b/workflow/executor/common/common.go @@ -32,9 +32,9 @@ func GetContainerID(container *v1.ContainerStatus) string { // KubernetesClientInterface is the interface to implement getContainerStatus method type KubernetesClientInterface interface { - getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) - killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error - createArchive(containerID, sourcePath string) (*bytes.Buffer, error) + GetContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) + KillContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error + CreateArchive(containerID, sourcePath string) (*bytes.Buffer, error) } // WaitForTermination of the given containerID, set the timeout to 0 to discard it @@ -52,7 +52,7 @@ func WaitForTermination(c KubernetesClientInterface, containerID string, timeout for { select { case <-ticker.C: - _, containerStatus, err := c.getContainerStatus(containerID) + _, containerStatus, err := c.GetContainerStatus(containerID) if err != nil { return err } @@ -70,7 +70,7 @@ func WaitForTermination(c KubernetesClientInterface, containerID string, timeout // TerminatePodWithContainerID invoke the given SIG against the PID1 of the container. // No-op if the container is on the hostPID func TerminatePodWithContainerID(c KubernetesClientInterface, containerID string, sig syscall.Signal) error { - pod, container, err := c.getContainerStatus(containerID) + pod, container, err := c.GetContainerStatus(containerID) if err != nil { return err } @@ -84,7 +84,7 @@ func TerminatePodWithContainerID(c KubernetesClientInterface, containerID string if pod.Spec.RestartPolicy != "Never" { return fmt.Errorf("cannot terminate pod with a %q restart policy", pod.Spec.RestartPolicy) } - return c.killContainer(pod, container, sig) + return c.KillContainer(pod, container, sig) } // KillGracefully kills a container gracefully. @@ -115,7 +115,7 @@ func KillGracefully(c KubernetesClientInterface, containerID string) error { // CopyArchive downloads files and directories as a tarball and saves it to a specified path. func CopyArchive(c KubernetesClientInterface, containerID, sourcePath, destPath string) error { log.Infof("Archiving %s:%s to %s", containerID, sourcePath, destPath) - b, err := c.createArchive(containerID, sourcePath) + b, err := c.CreateArchive(containerID, sourcePath) if err != nil { return err } diff --git a/workflow/executor/k8sapi/client.go b/workflow/executor/k8sapi/client.go index 025e69a86686..1da5433be3e6 100644 --- a/workflow/executor/k8sapi/client.go +++ b/workflow/executor/k8sapi/client.go @@ -21,14 +21,14 @@ import ( ) type k8sAPIClient struct { - execcommon.KubernetesClientInterface - clientset *kubernetes.Clientset config *restclient.Config podName string namespace string } +var _ execcommon.KubernetesClientInterface = &k8sAPIClient{} + func newK8sAPIClient(clientset *kubernetes.Clientset, config *restclient.Config, podName, namespace string) (*k8sAPIClient, error) { return &k8sAPIClient{ clientset: clientset, @@ -39,7 +39,7 @@ func newK8sAPIClient(clientset *kubernetes.Clientset, config *restclient.Config, } func (c *k8sAPIClient) getFileContents(containerID, sourcePath string) (string, error) { - _, containerStatus, err := c.getContainerStatus(containerID) + _, containerStatus, err := c.GetContainerStatus(containerID) if err != nil { return "", err } @@ -55,8 +55,8 @@ func (c *k8sAPIClient) getFileContents(containerID, sourcePath string) (string, return stdOut.String(), nil } -func (c *k8sAPIClient) createArchive(containerID, sourcePath string) (*bytes.Buffer, error) { - _, containerStatus, err := c.getContainerStatus(containerID) +func (c *k8sAPIClient) CreateArchive(containerID, sourcePath string) (*bytes.Buffer, error) { + _, containerStatus, err := c.GetContainerStatus(containerID) if err != nil { return nil, err } @@ -73,7 +73,7 @@ func (c *k8sAPIClient) createArchive(containerID, sourcePath string) (*bytes.Buf } func (c *k8sAPIClient) getLogsAsStream(containerID string) (io.ReadCloser, error) { - _, containerStatus, err := c.getContainerStatus(containerID) + _, containerStatus, err := c.GetContainerStatus(containerID) if err != nil { return nil, err } @@ -114,7 +114,7 @@ func (c *k8sAPIClient) getPod() (*v1.Pod, error) { return c.clientset.CoreV1().Pods(c.namespace).Get(c.podName, metav1.GetOptions{}) } -func (c *k8sAPIClient) getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { +func (c *k8sAPIClient) GetContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { pod, err := c.getPod() if err != nil { return nil, nil, err @@ -132,7 +132,7 @@ func (c *k8sAPIClient) waitForTermination(containerID string, timeout time.Durat return execcommon.WaitForTermination(c, containerID, timeout) } -func (c *k8sAPIClient) killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { +func (c *k8sAPIClient) KillContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { command := []string{"/bin/sh", "-c", fmt.Sprintf("kill -%d 1", sig)} exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, container.Name, false, false, command...) if err != nil { diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index 4af47c6836b6..6fc7e5df69ab 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -28,8 +28,6 @@ const ( ) type kubeletClient struct { - execcommon.KubernetesClientInterface - httpClient *http.Client httpHeader http.Header websocketDialer *websocket.Dialer @@ -40,6 +38,8 @@ type kubeletClient struct { kubeletEndpoint string } +var _ execcommon.KubernetesClientInterface = &kubeletClient{} + func newKubeletClient() (*kubeletClient, error) { kubeletHost := os.Getenv(common.EnvVarDownwardAPINodeIP) if kubeletHost == "" { @@ -165,7 +165,7 @@ func (k *kubeletClient) doRequestLogs(namespace, podName, containerName string) return resp, nil } -func (k *kubeletClient) getContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { +func (k *kubeletClient) GetContainerStatus(containerID string) (*v1.Pod, *v1.ContainerStatus, error) { podList, err := k.getPodList() if err != nil { return nil, nil, errors.InternalWrapError(err) @@ -242,7 +242,7 @@ func (k *kubeletClient) readFileContents(u *url.URL) (*bytes.Buffer, error) { } // createArchive exec in the given containerID and create a tarball of the given sourcePath. Works with directory -func (k *kubeletClient) createArchive(containerID, sourcePath string) (*bytes.Buffer, error) { +func (k *kubeletClient) CreateArchive(containerID, sourcePath string) (*bytes.Buffer, error) { return k.getCommandOutput(containerID, fmt.Sprintf("command=tar&command=-cf&command=-&command=%s&output=1", sourcePath)) } @@ -284,7 +284,7 @@ func (k *kubeletClient) WaitForTermination(containerID string, timeout time.Dura return execcommon.WaitForTermination(k, containerID, timeout) } -func (k *kubeletClient) killContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { +func (k *kubeletClient) KillContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error { u, err := url.ParseRequestURI(fmt.Sprintf("wss://%s/exec/%s/%s/%s?command=/bin/sh&&command=-c&command=kill+-%d+1&output=1&error=1", k.kubeletEndpoint, pod.Namespace, pod.Name, container.Name, sig)) if err != nil { return errors.InternalWrapError(err) From a5a2bcf21900019d979328250009af4137f7ff2a Mon Sep 17 00:00:00 2001 From: Ed Lee Date: Thu, 11 Apr 2019 18:54:56 -0700 Subject: [PATCH 119/145] Update README.md (#1321) --- community/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/community/README.md b/community/README.md index b051276b1a2d..4e6a2dbe7053 100644 --- a/community/README.md +++ b/community/README.md @@ -35,9 +35,11 @@ manner. ## Contributing to Argo -Read and abide by the [Argo Code of Conduct](https://github.com/argoproj/argo/blob/master/CODE_OF_CONDUCT.md) : +Read and abide by the [Argo Code of Conduct](https://github.com/argoproj/argo/blob/master/CODE_OF_CONDUCT.md). + Before submitting a pull request, please sign the [CLA](https://github.com/argoproj/argo/blob/master/community/Argo%20Individual%20CLA.pdf). This agreement gives us permission to use and redistribute your contributions as part of the project. +Contributors will be asked to read and sign a [CLA](https://github.com/argoproj/argo/blob/master/community/Argo%20Individual%20CLA.pdf). This agreement gives us permission to use and redistribute your contributions as part of the Argo Project and protects the users and contributors of the project. ## Community Meetings From 6607dca93db6255a2abc30ae76b5f935fce5735d Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Thu, 11 Apr 2019 21:32:27 -0700 Subject: [PATCH 120/145] Issue1316 Pod creation with secret volumemount (#1318) * CheckandEstimate implementation * fixed variable rename * fixed gofmt * fixed feedbacks * Fixed the duplicate mountpath issue --- workflow/controller/workflowpod.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index e582a281d2c5..04872ece41c2 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -848,7 +848,7 @@ func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMou secretVolumes = append(secretVolumes, val) secretVolMounts = append(secretVolMounts, apiv1.VolumeMount{ Name: volMountName, - MountPath: common.SecretVolMountPath, + MountPath: common.SecretVolMountPath + "/" + val.Name, ReadOnly: true, }) } @@ -900,7 +900,7 @@ func createSecretVal(volMap map[string]apiv1.Volume, secret *apiv1.SecretKeySele if vol, ok := volMap[secret.Name]; ok { key := apiv1.KeyToPath{ Key: secret.Key, - Path: secret.Name + "/" + secret.Key, + Path: secret.Key, } if val, _ := keyMap[secret.Name+"-"+secret.Key]; !val { keyMap[secret.Name+"-"+secret.Key] = true @@ -915,7 +915,7 @@ func createSecretVal(volMap map[string]apiv1.Volume, secret *apiv1.SecretKeySele Items: []apiv1.KeyToPath{ { Key: secret.Key, - Path: secret.Name + "/" + secret.Key, + Path: secret.Key, }, }, }, From 64370a2d185db66a8d2188d986c52a3b73aaf92b Mon Sep 17 00:00:00 2001 From: Ilias Katsakioris Date: Sat, 13 Apr 2019 01:09:27 +0300 Subject: [PATCH 121/145] Support parameter substitution in the volumes attribute (#1238) --- workflow/controller/dag.go | 7 ++++ workflow/controller/operator.go | 37 +++++++++++++++++- workflow/controller/steps.go | 15 ++++--- workflow/controller/workflowpod.go | 31 ++++++++------- workflow/controller/workflowpod_test.go | 52 ++++++++++++++++++++++--- 5 files changed, 115 insertions(+), 27 deletions(-) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 1a3f5cb40ccc..6ebf1d3f2c76 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -380,6 +380,13 @@ func (woc *wfOperationCtx) resolveDependencyReferences(dagCtx *dagContext, task } // Perform replacement + // Replace woc.volumes + err := woc.substituteParamsInVolumes(scope.replaceMap()) + if err != nil { + return nil, err + } + + // Replace task's parameters taskBytes, err := json.Marshal(task) if err != nil { return nil, errors.InternalWrapError(err) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 60ba3c4379c4..3e14d1b7611b 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -49,6 +49,9 @@ type wfOperationCtx struct { // globalParams holds any parameters that are available to be referenced // in the global scope (e.g. workflow.parameters.XXX). globalParams map[string]string + // volumes holds a DeepCopy of wf.Spec.Volumes to perform substitutions. + // It is then used in addVolumeReferences() when creating a pod. + volumes []apiv1.Volume // map of pods which need to be labeled with completed=true completedPods map[string]bool // deadline is the dealine time in which this operation should relinquish @@ -93,6 +96,7 @@ func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOper }), controller: wfc, globalParams: make(map[string]string), + volumes: wf.Spec.DeepCopy().Volumes, completedPods: make(map[string]bool), deadline: time.Now().UTC().Add(maxOperationTime), } @@ -158,7 +162,14 @@ func (woc *wfOperationCtx) operate() { woc.setGlobalParameters() - err := woc.createPVCs() + err := woc.substituteParamsInVolumes(woc.globalParams) + if err != nil { + woc.log.Errorf("%s volumes global param substitution error: %+v", woc.wf.ObjectMeta.Name, err) + woc.markWorkflowError(err, true) + return + } + + err = woc.createPVCs() if err != nil { woc.log.Errorf("%s pvc create error: %+v", woc.wf.ObjectMeta.Name, err) woc.markWorkflowError(err, true) @@ -1667,3 +1678,27 @@ func (woc *wfOperationCtx) checkAndCompress() error { return nil } + +func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) error { + if woc.volumes == nil { + return nil + } + + volumes := woc.volumes + volumesBytes, err := json.Marshal(volumes) + if err != nil { + return errors.InternalWrapError(err) + } + fstTmpl := fasttemplate.New(string(volumesBytes), "{{", "}}") + newVolumesStr, err := common.Replace(fstTmpl, params, true) + if err != nil { + return err + } + var newVolumes []apiv1.Volume + err = json.Unmarshal([]byte(newVolumesStr), &newVolumes) + if err != nil { + return errors.InternalWrapError(err) + } + woc.volumes = newVolumes + return nil +} diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index e54ae2bb852a..9e55c88d3911 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -278,6 +278,12 @@ func shouldExecute(when string) (bool, error) { func (woc *wfOperationCtx) resolveReferences(stepGroup []wfv1.WorkflowStep, scope *wfScope) ([]wfv1.WorkflowStep, error) { newStepGroup := make([]wfv1.WorkflowStep, len(stepGroup)) + // Step 0: replace all parameter scope references for volumes + err := woc.substituteParamsInVolumes(scope.replaceMap()) + if err != nil { + return nil, err + } + for i, step := range stepGroup { // Step 1: replace all parameter scope references in the step // TODO: improve this @@ -285,15 +291,8 @@ func (woc *wfOperationCtx) resolveReferences(stepGroup []wfv1.WorkflowStep, scop if err != nil { return nil, errors.InternalWrapError(err) } - replaceMap := make(map[string]string) - for key, val := range scope.scope { - valStr, ok := val.(string) - if ok { - replaceMap[key] = valStr - } - } fstTmpl := fasttemplate.New(string(stepBytes), "{{", "}}") - newStepStr, err := common.Replace(fstTmpl, replaceMap, true) + newStepStr, err := common.Replace(fstTmpl, scope.replaceMap(), true) if err != nil { return nil, err } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 04872ece41c2..7a4d6113a7ec 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -148,7 +148,7 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont addSchedulingConstraints(pod, wfSpec, tmpl) woc.addMetadata(pod, tmpl) - err = addVolumeReferences(pod, wfSpec, tmpl, woc.wf.Status.PersistentVolumeClaims) + err = addVolumeReferences(pod, woc.volumes, tmpl, woc.wf.Status.PersistentVolumeClaims) if err != nil { return nil, err } @@ -183,8 +183,13 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont pod.ObjectMeta.Annotations[common.AnnotationKeyTemplate] = string(tmplBytes) // Perform one last variable substitution here. Some variables come from the from workflow - // configmap (e.g. archive location), and were not substituted in executeTemplate. - pod, err = substituteGlobals(pod, woc.globalParams) + // configmap (e.g. archive location) or volumes attribute, and were not substituted + // in executeTemplate. + podParams := woc.globalParams + for _, inParam := range tmpl.Inputs.Parameters { + podParams["inputs.parameters."+inParam.Name] = *inParam.Value + } + pod, err = substitutePodParams(pod, podParams) if err != nil { return nil, err } @@ -220,20 +225,20 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont return created, nil } -// substituteGlobals returns a pod spec with global parameter references substituted as well as pod.name -func substituteGlobals(pod *apiv1.Pod, globalParams map[string]string) (*apiv1.Pod, error) { - newGlobalParams := make(map[string]string) - for k, v := range globalParams { - newGlobalParams[k] = v +// substitutePodParams returns a pod spec with parameter references substituted as well as pod.name +func substitutePodParams(pod *apiv1.Pod, podParams map[string]string) (*apiv1.Pod, error) { + newPodParams := make(map[string]string) + for k, v := range podParams { + newPodParams[k] = v } - newGlobalParams[common.LocalVarPodName] = pod.Name - globalParams = newGlobalParams + newPodParams[common.LocalVarPodName] = pod.Name + podParams = newPodParams specBytes, err := json.Marshal(pod) if err != nil { return nil, err } fstTmpl := fasttemplate.New(string(specBytes), "{{", "}}") - newSpecBytes, err := common.Replace(fstTmpl, globalParams, true) + newSpecBytes, err := common.Replace(fstTmpl, podParams, true) if err != nil { return nil, err } @@ -455,7 +460,7 @@ func addSchedulingConstraints(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *w // addVolumeReferences adds any volumeMounts that a container/sidecar is referencing, to the pod.spec.volumes // These are either specified in the workflow.spec.volumes or the workflow.spec.volumeClaimTemplate section -func addVolumeReferences(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *wfv1.Template, pvcs []apiv1.Volume) error { +func addVolumeReferences(pod *apiv1.Pod, vols []apiv1.Volume, tmpl *wfv1.Template, pvcs []apiv1.Volume) error { switch tmpl.GetType() { case wfv1.TemplateTypeContainer, wfv1.TemplateTypeScript: default: @@ -464,7 +469,7 @@ func addVolumeReferences(pod *apiv1.Pod, wfSpec *wfv1.WorkflowSpec, tmpl *wfv1.T // getVolByName is a helper to retrieve a volume by its name, either from the volumes or claims section getVolByName := func(name string) *apiv1.Volume { - for _, vol := range wfSpec.Volumes { + for _, vol := range vols { if vol.Name == name { return &vol } diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index 190c48d42c1c..4e5efe2c52dc 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -272,7 +272,7 @@ func TestVolumeAndVolumeMounts(t *testing.T) { // For Docker executor { woc := newWoc() - woc.wf.Spec.Volumes = volumes + woc.volumes = volumes woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorDocker @@ -291,7 +291,7 @@ func TestVolumeAndVolumeMounts(t *testing.T) { // For Kubelet executor { woc := newWoc() - woc.wf.Spec.Volumes = volumes + woc.volumes = volumes woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorKubelet @@ -309,7 +309,7 @@ func TestVolumeAndVolumeMounts(t *testing.T) { // For K8sAPI executor { woc := newWoc() - woc.wf.Spec.Volumes = volumes + woc.volumes = volumes woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorK8sAPI @@ -325,6 +325,48 @@ func TestVolumeAndVolumeMounts(t *testing.T) { } } +func TestVolumesPodSubstitution(t *testing.T) { + volumes := []apiv1.Volume{ + { + Name: "volume-name", + VolumeSource: apiv1.VolumeSource{ + PersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{ + ClaimName: "{{inputs.parameters.volume-name}}", + }, + }, + }, + } + volumeMounts := []apiv1.VolumeMount{ + { + Name: "volume-name", + MountPath: "/test", + }, + } + tmpStr := "test-name" + inputParameters := []wfv1.Parameter{ + { + Name: "volume-name", + Value: &tmpStr, + }, + } + + woc := newWoc() + woc.volumes = volumes + woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts + woc.wf.Spec.Templates[0].Inputs.Parameters = inputParameters + woc.controller.Config.ContainerRuntimeExecutor = common.ContainerRuntimeExecutorDocker + + woc.executeContainer(woc.wf.Spec.Entrypoint, &woc.wf.Spec.Templates[0], "") + podName := getPodName(woc.wf) + pod, err := woc.controller.kubeclientset.CoreV1().Pods("").Get(podName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, 3, len(pod.Spec.Volumes)) + assert.Equal(t, "volume-name", pod.Spec.Volumes[2].Name) + assert.Equal(t, "test-name", pod.Spec.Volumes[2].PersistentVolumeClaim.ClaimName) + assert.Equal(t, 1, len(pod.Spec.Containers[1].VolumeMounts)) + assert.Equal(t, "volume-name", pod.Spec.Containers[1].VolumeMounts[0].Name) +} + func TestOutOfCluster(t *testing.T) { verifyKubeConfigVolume := func(ctr apiv1.Container, volName, mountPath string) { @@ -428,7 +470,7 @@ func TestInitContainers(t *testing.T) { mirrorVolumeMounts := true woc := newWoc() - woc.wf.Spec.Volumes = volumes + woc.volumes = volumes woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts woc.wf.Spec.Templates[0].InitContainers = []wfv1.UserContainer{ { @@ -466,7 +508,7 @@ func TestSidecars(t *testing.T) { mirrorVolumeMounts := true woc := newWoc() - woc.wf.Spec.Volumes = volumes + woc.volumes = volumes woc.wf.Spec.Templates[0].Container.VolumeMounts = volumeMounts woc.wf.Spec.Templates[0].Sidecars = []wfv1.UserContainer{ { From bd8d5cb4b7510afb7bd43bd75e5c5d26ccc85ca4 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sun, 21 Apr 2019 00:21:02 -0700 Subject: [PATCH 122/145] `argo list` was not displaying non-zero priorities correctly --- cmd/argo/commands/list.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 081a04a37f31..7f779aa13bb6 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -119,7 +119,11 @@ func printTable(wfList []wfv1.Workflow, listArgs *listFlags) { if listArgs.allNamespaces { fmt.Fprintf(w, "%s\t", wf.ObjectMeta.Namespace) } - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d", wf.ObjectMeta.Name, workflowStatus(&wf), ageStr, durationStr, wf.Spec.Priority) + var priority int + if wf.Spec.Priority != nil { + priority = int(*wf.Spec.Priority) + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d", wf.ObjectMeta.Name, workflowStatus(&wf), ageStr, durationStr, priority) if listArgs.output == "wide" { pending, running, completed := countPendingRunningCompleted(&wf) fmt.Fprintf(w, "\t%d/%d/%d", pending, running, completed) From 34af5a065e42230148b48603fc81f57fb2b4c22c Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sun, 21 Apr 2019 00:22:17 -0700 Subject: [PATCH 123/145] Fix regression where argoexec wait would not return when podname was too long --- cmd/argoexec/commands/root.go | 8 +++++--- workflow/common/common.go | 2 ++ workflow/controller/workflowpod.go | 11 ++++++++++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go index 60405a8eb312..2cf792f06da5 100644 --- a/cmd/argoexec/commands/root.go +++ b/cmd/argoexec/commands/root.go @@ -74,8 +74,10 @@ func initExecutor() *executor.WorkflowExecutor { clientset, err := kubernetes.NewForConfig(config) checkErr(err) - podName, err := os.Hostname() - checkErr(err) + podName, ok := os.LookupEnv(common.EnvVarPodName) + if !ok { + log.Fatalf("Unable to determine pod name from environment variable %s", common.EnvVarPodName) + } tmpl, err := executor.LoadTemplate(podAnnotationsPath) checkErr(err) @@ -96,7 +98,7 @@ func initExecutor() *executor.WorkflowExecutor { wfExecutor := executor.NewExecutor(clientset, podName, namespace, podAnnotationsPath, cre, *tmpl) yamlBytes, _ := json.Marshal(&wfExecutor.Template) vers := argo.GetVersion() - log.Infof("Executor (version: %s, build_date: %s) initialized with template:\n%s", vers, vers.BuildDate, string(yamlBytes)) + log.Infof("Executor (version: %s, build_date: %s) initialized (pod: %s/%s) with template:\n%s", vers, vers.BuildDate, namespace, podName, string(yamlBytes)) return &wfExecutor } diff --git a/workflow/common/common.go b/workflow/common/common.go index 5b68158d5d4b..68909a755877 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -76,6 +76,8 @@ const ( // Various environment variables containing pod information exposed to the executor container(s) + // EnvVarPodName contains the name of the pod (currently unused) + EnvVarPodName = "ARGO_POD_NAME" // EnvVarContainerRuntimeExecutor contains the name of the container runtime executor to use, empty is equal to "docker" EnvVarContainerRuntimeExecutor = "ARGO_CONTAINER_RUNTIME_EXECUTOR" // EnvVarDownwardAPINodeIP is the envvar used to get the `status.hostIP` diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 7a4d6113a7ec..a6dc9d9a3ff9 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -277,8 +277,17 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar { var execEnvVars []apiv1.EnvVar + execEnvVars = append(execEnvVars, apiv1.EnvVar{ + Name: common.EnvVarPodName, + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }) if woc.controller.Config.Executor != nil { - execEnvVars = woc.controller.Config.Executor.Env + execEnvVars = append(execEnvVars, woc.controller.Config.Executor.Env...) } switch woc.controller.Config.ContainerRuntimeExecutor { case common.ContainerRuntimeExecutorK8sAPI: From 49a6b6d7ac1bb5f6b390eff1b218205d995142cb Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sun, 21 Apr 2019 00:51:23 -0700 Subject: [PATCH 124/145] wait will conditionally become privileged if main/sidecar privileged (resolves #1323) --- workflow/controller/workflowpod.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index a6dc9d9a3ff9..81dac30793aa 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -269,12 +269,37 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain }, }, } + if hasPrivilegedContainers(tmpl) { + // if the main or sidecar is privileged, the wait sidecar must also run privileged, + // in order to SIGTERM/SIGKILL the pid + ctr.SecurityContext.Privileged = pointer.BoolPtr(true) + } case "", common.ContainerRuntimeExecutorDocker: ctr.VolumeMounts = append(ctr.VolumeMounts, volumeMountDockerSock) } return ctr, nil } +// hasPrivilegedContainers tests if the main container or sidecars is privileged +func hasPrivilegedContainers(tmpl *wfv1.Template) bool { + if containerIsPrivileged(tmpl.Container) { + return true + } + for _, side := range tmpl.Sidecars { + if containerIsPrivileged(&side.Container) { + return true + } + } + return false +} + +func containerIsPrivileged(ctr *apiv1.Container) bool { + if ctr != nil && ctr.SecurityContext != nil && ctr.SecurityContext.Privileged != nil && *ctr.SecurityContext.Privileged { + return true + } + return false +} + func (woc *wfOperationCtx) createEnvVars() []apiv1.EnvVar { var execEnvVars []apiv1.EnvVar execEnvVars = append(execEnvVars, apiv1.EnvVar{ From bb1bfdd9106d9b64aa2dccf8d3554bdd31513cf8 Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sun, 21 Apr 2019 01:06:07 -0700 Subject: [PATCH 125/145] Update version to v2.3.0-rc2. Update changelog --- CHANGELOG.md | 36 ++++++++++++++++++++----- ROADMAP.md | 13 +++------ VERSION | 2 +- api/openapi-spec/swagger.json | 2 +- docs/workflow-controller-configmap.yaml | 3 ++- manifests/base/kustomization.yaml | 4 +-- manifests/install.yaml | 6 ++--- manifests/namespace-install.yaml | 6 ++--- 8 files changed, 45 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f44f4ab2320..bd85bfee8910 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,34 @@ # Changelog +## 2.3.0-rc2 (2019-04-21) + +### Changes since 2.3.0-rc1 ++ Support parameter substitution in the volumes attribute (#1238) +- Fix regression where argoexec wait would not return when podname was too long +- wait will conditionally become privileged if main/sidecar privileged (issue #1323) +- `argo list` was not displaying non-zero priorities correctly +- Pod creation with secret volumemount (#1318) +- Export the methods of `KubernetesClientInterface` (#1294) + + ## 2.3.0-rc1 (2019-04-10) + +### Notes about upgrading from v2.2 + +* Secrets are passed to the wait sidecar using volumeMounts instead of performing K8s API calls + performed by the. This is much more secure since it limits the privileges of the workflow pod + to no longer require namespace level secret access. However, as a consequence, workflow pods which + reference a secret that does not exist, will now indefinitely stay in a Pending state, as opposed + to the previous behavior of failing during runtime. + + +### Deprecation Notice +The workflow-controller-configmap introduces a new config field, `executor`, which is a container +spec and provides controls over the executor sidecar container (i.e. `init`/`wait`). The fields +`executorImage`, `executorResources`, and `executorImagePullPolicy` are deprecated and will be +removed in a future release. + +### New Features: + Support for PNS (Process Namespace Sharing) executor (#1214) + Support for K8s API based Executor (#1010) (@dtaniwaki) + Adds limited support for Kubelet/K8s API artifact collection by mirroring volume mounts to wait sidecar @@ -26,6 +54,7 @@ + Secrets should be passed to pods using volumes instead of API calls (#1302) + Azure AKS authentication issues #1079 (@gerardaus) +### New Features: * Update dependencies to K8s v1.12 and client-go 9.0 * Add namespace explicitly to pod metadata (#1059) (@dvavili) * Raise not implemented error when artifact saving is unsupported (#1062) (@dtaniwaki) @@ -36,6 +65,7 @@ * Git cloning via SSH was not verifying host public key (#1261) * Speed up podReconciliation using parallel goroutine (#1286) (@xianlubird) + - Initialize child node before marking phase. Fixes panic on invalid `When` (#1075) (@jmcarp) - Submodules are dirty after checkout -- need to update (#1052) (@andreimc) - Fix output artifact and parameter conflict (#1125) (@Ark-kun) @@ -59,12 +89,6 @@ - Fix for Resource creation where template has same parameter templating (#1283) - Fixes an issue where daemon steps were not getting terminated properly -### Deprecation Notice -The workflow-controller-configmap introduces a new config field, `executor`, which is a container -spec and provides controls over the executor sidecar container (i.e. `init`/`wait`). The fields -`executorImage`, `executorResources`, and `executorImagePullPolicy` are deprecated and will be -removed in a future release. - ## 2.2.1 (2018-10-18) ### Changelog since v2.2.0 diff --git a/ROADMAP.md b/ROADMAP.md index 425b1d2583bd..cf31798b54a4 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,19 +1,12 @@ # Roadmap -## v2.3 -* Priority - ability to set a priority per workflow -* Queuing - ability to limit number of concurrent workflows -* PNS (Proccess Namespace Sharing) Executor - ## v2.4 * Persistence - support offloading of workflow state into database layer * Large workflow support (enabled by persistence feature) -* Argo API server (integration with argo events, persistence layer) - -## v2.5 -* Argo API server enhancements (pagination, SSO, etc...) +* Backlog and bug fixes -### Proposed Items +## Proposed Items +* Argo API server * Best effort workflow steps * Template level finalizers * Artifact loop aggregation diff --git a/VERSION b/VERSION index d62e3a5a6d34..dd909b333d33 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.0-rc1 +2.3.0-rc2 diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index d06bb86382d5..2a2cbeb2f1e3 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Argo", - "version": "v2.3.0-rc1" + "version": "v2.3.0-rc2" }, "paths": {}, "definitions": { diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml index d82b8c42a62a..5f2e6bd2ba52 100644 --- a/docs/workflow-controller-configmap.yaml +++ b/docs/workflow-controller-configmap.yaml @@ -76,6 +76,7 @@ data: key: secretKey # Specifies the container runtime interface to use (default: docker) + # must be one of: docker, kubelet, k8sapi, pns containerRuntimeExecutor: docker # kubelet port when using kubelet executor (default: 10250) @@ -103,7 +104,7 @@ data: - --gloglevel - "6" env: - - name: DEBUG_FLAG + - name: SOME_ENV_VAR value: "1" # metricsConfig controls the path and port for prometheus metrics diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml index c6815667188d..c6e19a358a08 100644 --- a/manifests/base/kustomization.yaml +++ b/manifests/base/kustomization.yaml @@ -9,7 +9,7 @@ bases: images: - name: argoproj/argoui newName: argoproj/argoui - newTag: v2.3.0-rc1 + newTag: v2.3.0-rc2 - name: argoproj/workflow-controller newName: argoproj/workflow-controller - newTag: v2.3.0-rc1 + newTag: v2.3.0-rc2 diff --git a/manifests/install.yaml b/manifests/install.yaml index e8d5afe5d588..3a65c4bf5a75 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -228,7 +228,7 @@ spec: value: "false" - name: BASE_HREF value: / - image: argoproj/argoui:v2.3.0-rc1 + image: argoproj/argoui:v2.3.0-rc2 name: argo-ui serviceAccountName: argo-ui --- @@ -250,9 +250,9 @@ spec: - --configmap - workflow-controller-configmap - --executor-image - - argoproj/argoexec:v2.3.0-rc1 + - argoproj/argoexec:v2.3.0-rc2 command: - workflow-controller - image: argoproj/workflow-controller:v2.3.0-rc1 + image: argoproj/workflow-controller:v2.3.0-rc2 name: workflow-controller serviceAccountName: argo diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 7b55936496c2..de6273cad283 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -170,7 +170,7 @@ spec: value: "false" - name: BASE_HREF value: / - image: argoproj/argoui:v2.3.0-rc1 + image: argoproj/argoui:v2.3.0-rc2 name: argo-ui serviceAccountName: argo-ui --- @@ -192,9 +192,9 @@ spec: - --configmap - workflow-controller-configmap - --executor-image - - argoproj/argoexec:v2.3.0-rc1 + - argoproj/argoexec:v2.3.0-rc2 command: - workflow-controller - image: argoproj/workflow-controller:v2.3.0-rc1 + image: argoproj/workflow-controller:v2.3.0-rc2 name: workflow-controller serviceAccountName: argo From 4e37a444bde2a034885d0db35f7b38684505063e Mon Sep 17 00:00:00 2001 From: Jesse Suen Date: Sun, 21 Apr 2019 01:35:52 -0700 Subject: [PATCH 126/145] Add documentation on releasing --- docs/releasing.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 docs/releasing.md diff --git a/docs/releasing.md b/docs/releasing.md new file mode 100644 index 000000000000..4a71834f56de --- /dev/null +++ b/docs/releasing.md @@ -0,0 +1,38 @@ +# Release Instructions + +1. Update CHANGELOG.md with changes in the release + +2. Update VERSION with new tag + +3. Update codegen, manifests with new tag + +``` +make codegen manifests IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z +``` + +4. Commit VERSION and manifest changes + +5. git tag the release + +``` +git tag vX.Y.Z +``` + +6. Build the release + +``` +make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z +``` + +7. If successful, publish the release: +``` +export ARGO_RELEASE=vX.Y.Z +docker push argoproj/workflow-controller:${ARGO_RELEASE} +docker push argoproj/argoexec:${ARGO_RELEASE} +docker push argoproj/argocli:${ARGO_RELEASE} +git push upstream ${ARGO_RELEASE} +``` + +8. Draft GitHub release with the content from CHANGELOG.md, and CLI binaries produced in the `dist` directory + +* https://github.com/argoproj/argo/releases/new From 5a0d2b9206f78b6348009d5f32f09f25eac25517 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Tue, 23 Apr 2019 14:59:53 -0400 Subject: [PATCH 127/145] use a secret selector for getting credentials --- Gopkg.lock | 360 +++++------------- Gopkg.toml | 8 + api/openapi-spec/swagger.json | 10 +- .../workflow/v1alpha1/openapi_generated.go | 20 +- pkg/apis/workflow/v1alpha1/types.go | 3 +- .../v1alpha1/zz_generated.deepcopy.go | 5 +- workflow/artifacts/gcs/gcs.go | 9 +- workflow/common/common.go | 10 - workflow/controller/workflowpod.go | 27 -- workflow/executor/executor.go | 8 +- 10 files changed, 149 insertions(+), 311 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 233bb7612a7c..3bf47c2d8d9c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,39 +2,44 @@ [[projects]] - digest = "1:8b95956b70e181b19025c7ba3578fdfd8efbec4ce916490700488afb9218972c" name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "" + packages = [ + "compute/metadata", + "iam", + "internal", + "internal/optional", + "internal/trace", + "internal/version", + "storage" + ] revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" version = "v0.26.0" [[projects]] - digest = "1:b9660f5e3522b899d32b1f9bb98056203d6f76f673e1843eaa00869330103ba5" + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] name = "github.com/Knetic/govaluate" packages = ["."] - pruneopts = "" revision = "9aa49832a739dcd78a5542ff189fb82c3e423116" [[projects]] - digest = "1:8e47871087b94913898333f37af26732faaab30cdb41571136cf7aec9921dae7" name = "github.com/PuerkitoBio/purell" packages = ["."] - pruneopts = "" revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:331a419049c2be691e5ba1d24342fc77c7e767a80c666a18fd8a9f7b82419c1c" name = "github.com/PuerkitoBio/urlesc" packages = ["."] - pruneopts = "" revision = "de5bf2ad457846296e2031421a34e2568e304e35" [[projects]] branch = "master" - digest = "1:c3b7ed058146643b16d3a9827550fba317dbff9f55249dfafac7eb6c3652ad23" name = "github.com/argoproj/pkg" packages = [ "errors", @@ -46,59 +51,47 @@ "s3", "stats", "strftime", - "time", + "time" ] - pruneopts = "" revision = "a581a48d63014312c4f2762787f669e46bdb1fd9" [[projects]] branch = "master" - digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" name = "github.com/beorn7/perks" packages = ["quantile"] - pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:d6c13a378213e3de60445e49084b8a0a9ce582776dfc77927775dbeb3ff72a35" name = "github.com/docker/spdystream" packages = [ ".", - "spdy", + "spdy" ] - pruneopts = "" revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" [[projects]] branch = "master" - digest = "1:f1a75a8e00244e5ea77ff274baa9559eb877437b240ee7b278f3fc560d9f08bf" name = "github.com/dustin/go-humanize" packages = ["."] - pruneopts = "" revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" [[projects]] - digest = "1:8a34d7a37b8f07239487752e14a5faafcbbc718fc385ad429a2c4ac6f27a207f" name = "github.com/emicklei/go-restful" packages = [ ".", - "log", + "log" ] - pruneopts = "" revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" version = "v2.8.0" [[projects]] - digest = "1:ba7c75e38d81b9cf3e8601c081567be3b71bccca8c11aee5de98871360aa4d7b" name = "github.com/emirpasic/gods" packages = [ "containers", @@ -106,97 +99,75 @@ "lists/arraylist", "trees", "trees/binaryheap", - "utils", + "utils" ] - pruneopts = "" revision = "f6c17b524822278a87e3b3bd809fec33b51f5b46" version = "v1.9.0" [[projects]] - digest = "1:dcefbadf4534c5ecac8573698fba6e6e601157bfa8f96aafe29df31ae582ef2a" name = "github.com/evanphx/json-patch" packages = ["."] - pruneopts = "" revision = "afac545df32f2287a079e2dfb7ba2745a643747e" version = "v3.0.0" [[projects]] - digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" name = "github.com/fsnotify/fsnotify" packages = ["."] - pruneopts = "" revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" version = "v1.4.7" [[projects]] branch = "master" - digest = "1:ac2bf6881c6a96d07773dee3b9b2b369bc209c988505bd6cb283a8d549cb8699" name = "github.com/ghodss/yaml" packages = ["."] - pruneopts = "" revision = "c7ce16629ff4cd059ed96ed06419dd3856fd3577" [[projects]] - digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" packages = ["."] - pruneopts = "" revision = "358ee7663966325963d4e8b2e1fbd570c5195153" version = "v1.38.1" [[projects]] - digest = "1:e116a4866bffeec941056a1fcfd37e520fad1ee60e4e3579719f19a43c392e10" name = "github.com/go-openapi/jsonpointer" packages = ["."] - pruneopts = "" revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" version = "0.15.0" [[projects]] - digest = "1:3830527ef0f4f9b268d9286661c0f52f9115f8aefd9f45ee7352516f93489ac9" name = "github.com/go-openapi/jsonreference" packages = ["."] - pruneopts = "" revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" version = "0.15.0" [[projects]] - digest = "1:6caee195f5da296689270037c5a25c0bc3cc6e54ae5a356e395aa8946356dbc9" name = "github.com/go-openapi/spec" packages = ["."] - pruneopts = "" revision = "bce47c9386f9ecd6b86f450478a80103c3fe1402" version = "0.15.0" [[projects]] - digest = "1:22da48dbccb0539f511efbbbdeba68081866892234e57a9d7c7f9848168ae30c" name = "github.com/go-openapi/swag" packages = ["."] - pruneopts = "" revision = "2b0bd4f193d011c203529df626a65d63cb8a79e8" version = "0.15.0" [[projects]] - digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] branch = "master" - digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" name = "github.com/golang/glog" packages = ["."] - pruneopts = "" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] - digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -204,26 +175,21 @@ "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp", + "ptypes/timestamp" ] - pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6" name = "github.com/google/btree" packages = ["."] - pruneopts = "" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" - digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] @@ -233,108 +199,85 @@ version = "v2.0.0" [[projects]] - digest = "1:16b2837c8b3cf045fa2cdc82af0cf78b19582701394484ae76b2c3bc3c99ad73" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "" revision = "7c663266750e7d82587642f65e60bc4083f1f84e" version = "v0.2.0" [[projects]] - digest = "1:64d212c703a2b94054be0ce470303286b177ad260b2f89a307e3d1bb6c073ef6" name = "github.com/gorilla/websocket" packages = ["."] - pruneopts = "" revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" version = "v1.2.0" [[projects]] branch = "master" - digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "" revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] branch = "master" - digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "" revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" [[projects]] - digest = "1:7ab38c15bd21e056e3115c8b526d201eaf74e0308da9370997c6b3c187115d36" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "" revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" version = "v0.3.6" [[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" packages = ["."] - pruneopts = "" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] branch = "master" - digest = "1:95abc4eba158a39873bd4fabdee576d0ae13826b550f8b710881d80ae4093a0f" name = "github.com/jbenet/go-context" packages = ["io"] - pruneopts = "" revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" [[projects]] - digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc" name = "github.com/json-iterator/go" packages = ["."] - pruneopts = "" revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" [[projects]] - digest = "1:7fe04787f53bb61c1ba9c659b1a90ee3da16b4d6a1c41566bcb5077efbd30f97" name = "github.com/kevinburke/ssh_config" packages = ["."] - pruneopts = "" revision = "9fc7bb800b555d63157c65a904c86a2cc7b4e795" version = "0.4" [[projects]] branch = "master" - digest = "1:e977ed7b0619844e394c4e725d008ade0840f1882c500a66e797b98bde70cf87" name = "github.com/mailru/easyjson" packages = [ "buffer", "jlexer", - "jwriter", + "jwriter" ] - pruneopts = "" revision = "03f2033d19d5860aef995fe360ac7d395cd8ce65" [[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - pruneopts = "" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] - digest = "1:619ff8becfc8080f2cc4532ea21437e804038e0431c88e171c381fde96eb06ae" name = "github.com/minio/minio-go" packages = [ ".", @@ -342,223 +285,174 @@ "pkg/encrypt", "pkg/s3signer", "pkg/s3utils", - "pkg/set", + "pkg/set" ] - pruneopts = "" revision = "70799fe8dae6ecfb6c7d7e9e048fce27f23a1992" version = "v6.0.5" [[projects]] branch = "master" - digest = "1:83854f6b1d2ce047b69657e3a87ba7602f4c5505e8bdfd02ab857db8e983bde1" name = "github.com/mitchellh/go-homedir" packages = ["."] - pruneopts = "" revision = "58046073cbffe2f25d425fe1331102f55cf719de" [[projects]] - digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] - digest = "1:049b5bee78dfdc9628ee0e557219c41f683e5b06c5a5f20eaba0105ccc586689" name = "github.com/pelletier/go-buffruneio" packages = ["."] - pruneopts = "" revision = "c37440a7cf42ac63b919c752ca73a85067e05992" version = "v0.2.0" [[projects]] branch = "master" - digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" name = "github.com/pkg/errors" packages = ["."] - pruneopts = "" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" name = "github.com/pmezard/go-difflib" packages = ["difflib"] - pruneopts = "" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] - digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" name = "github.com/prometheus/client_golang" packages = [ "prometheus", - "prometheus/promhttp", + "prometheus/promhttp" ] - pruneopts = "" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" - digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" name = "github.com/prometheus/client_model" packages = ["go"] - pruneopts = "" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" - digest = "1:f477ef7b65d94fb17574fc6548cef0c99a69c1634ea3b6da248b63a61ebe0498" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model", + "model" ] - pruneopts = "" revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" - digest = "1:e04aaa0e8f8da0ed3d6c0700bd77eda52a47f38510063209d72d62f0ef807d5e" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs", + "xfs" ] - pruneopts = "" revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] - digest = "1:3962f553b77bf6c03fc07cd687a22dd3b00fe11aa14d31194f5505f5bb65cdc8" name = "github.com/sergi/go-diff" packages = ["diffmatchpatch"] - pruneopts = "" revision = "1744e2970ca51c86172c8190fadad617561ed6e7" version = "v1.0.0" [[projects]] - digest = "1:3fcbf733a8d810a21265a7f2fe08a3353db2407da052b233f8b204b5afc03d9b" name = "github.com/sirupsen/logrus" packages = ["."] - pruneopts = "" revision = "3e01752db0189b9157070a0e1668a620f9a85da2" version = "v1.0.6" [[projects]] branch = "master" - digest = "1:c8f6919ab9f140506fd4ad3f4a9c9c2af9ee7921e190af0c67b2fca2f903083c" name = "github.com/spf13/cobra" packages = ["."] - pruneopts = "" revision = "7c4570c3ebeb8129a1f7456d0908a8b676b6f9f1" [[projects]] - digest = "1:8e243c568f36b09031ec18dff5f7d2769dcf5ca4d624ea511c8e3197dc3d352d" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "" revision = "583c0c0531f06d5278b7d917446061adc344b5cd" version = "v1.0.1" [[projects]] - digest = "1:b1861b9a1aa0801b0b62945ed7477c1ab61a4bd03b55dfbc27f6d4f378110c8c" name = "github.com/src-d/gcfg" packages = [ ".", "scanner", "token", - "types", + "types" ] - pruneopts = "" revision = "f187355171c936ac84a82793659ebb4936bc1c23" version = "v1.3.0" [[projects]] - digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" name = "github.com/stretchr/objx" packages = ["."] - pruneopts = "" revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" version = "v0.1.1" [[projects]] - digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" name = "github.com/stretchr/testify" packages = [ "assert", "mock", "require", - "suite", + "suite" ] - pruneopts = "" revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" version = "v1.2.2" [[projects]] - digest = "1:3ddca2bd5496c6922a2a9e636530e178a43c2a534ea6634211acdc7d10222794" name = "github.com/tidwall/gjson" packages = ["."] - pruneopts = "" revision = "1e3f6aeaa5bad08d777ea7807b279a07885dd8b2" version = "v1.1.3" [[projects]] branch = "master" - digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e" name = "github.com/tidwall/match" packages = ["."] - pruneopts = "" revision = "1731857f09b1f38450e2c12409748407822dc6be" [[projects]] branch = "master" - digest = "1:857a9ecd5cb13379ecc8f798f6e6b6b574c98b9355657d91e068275f1120aaf7" name = "github.com/valyala/bytebufferpool" packages = ["."] - pruneopts = "" revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" [[projects]] branch = "master" - digest = "1:bf6f8915c0338e875383cb7fdebd58a4d360a232f461d9a029d7ccb12f90c5d7" name = "github.com/valyala/fasttemplate" packages = ["."] - pruneopts = "" revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" [[projects]] - digest = "1:afc0b8068986a01e2d8f449917829753a54f6bd4d1265c2b4ad9cba75560020f" name = "github.com/xanzy/ssh-agent" packages = ["."] - pruneopts = "" revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c" version = "v0.2.0" @@ -584,7 +478,6 @@ [[projects]] branch = "master" - digest = "1:53c4b75f22ea7757dea07eae380ea42de547ae6865a5e3b41866754a8a8219c9" name = "golang.org/x/crypto" packages = [ "argon2", @@ -605,14 +498,21 @@ "ssh", "ssh/agent", "ssh/knownhosts", - "ssh/terminal", + "ssh/terminal" ] - pruneopts = "" revision = "f027049dab0ad238e394a753dba2d14753473a04" [[projects]] branch = "master" - digest = "1:67c2d940f2d5c017ef88e9847709dca9b38d5fe82f1e33fb42ace515219f22f1" + name = "golang.org/x/lint" + packages = [ + ".", + "golint" + ] + revision = "959b441ac422379a43da2230f62be024250818b0" + +[[projects]] + branch = "master" name = "golang.org/x/net" packages = [ "context", @@ -622,40 +522,33 @@ "http2/hpack", "idna", "internal/timeseries", - "lex/httplex", "trace" ] - pruneopts = "" revision = "f9ce57c11b242f0f1599cf25c89d8cb02c45295a" [[projects]] branch = "master" - digest = "1:a8172cf4304ef01f0c7dd634c331880247d10f9e28b041821f2321a8e4bb3b7c" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt", + "jwt" ] - pruneopts = "" revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f" [[projects]] branch = "master" - digest = "1:6d9c86494d97c7fc8bbab029c17fc0ce9dc517aaae92a25d790d01b0e8732832" name = "golang.org/x/sys" packages = [ "cpu", "unix", - "windows", + "windows" ] - pruneopts = "" revision = "904bdc257025c7b3f43c19360ad3ab85783fad78" [[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -672,36 +565,41 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width", + "width" ] - pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" - digest = "1:55a681cb66f28755765fa5fa5104cbd8dc85c55c02d206f9f89566451e3fe1aa" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "" revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" [[projects]] branch = "master" - digest = "1:c73b8c7b4bfb2e69de55a3549d6a8089d7757899cc5b62ff1c180bd76e9ee7f6" name = "golang.org/x/tools" packages = [ + "cmd/goimports", "go/ast/astutil", + "go/buildutil", + "go/gcexportdata", + "go/internal/cgo", + "go/internal/gcimporter", + "go/loader", + "go/packages", + "go/packages/golist", + "go/packages/raw", + "go/types/typeutil", "imports", - "internal/fastwalk", + "internal/fastwalk" ] - pruneopts = "" revision = "ca6481ae56504398949d597084558e50ad07117a" [[projects]] - branch = "master" name = "google.golang.org/api" packages = [ + ".", "gensupport", "googleapi", "googleapi/internal/uritemplates", @@ -713,10 +611,10 @@ "transport/http", "transport/http/internal/propagation" ] - revision = "44c6748ece026e0fe668793d8f92e521356400a3" + revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7" + version = "v0.3.2" [[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = [ ".", @@ -728,9 +626,8 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "" revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" @@ -740,6 +637,7 @@ packages = [ "googleapis/api/annotations", "googleapis/iam/v1", + "googleapis/rpc/code", "googleapis/rpc/status" ] revision = "0e822944c569bf5c9afd034adaa56208bd2906ac" @@ -778,29 +676,24 @@ version = "v1.15.0" [[projects]] - digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" name = "gopkg.in/inf.v0" packages = ["."] - pruneopts = "" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" [[projects]] - digest = "1:6715e0bec216255ab784fe04aa4d5a0a626ae07a3a209080182e469bc142761a" name = "gopkg.in/src-d/go-billy.v4" packages = [ ".", "helper/chroot", "helper/polyfill", "osfs", - "util", + "util" ] - pruneopts = "" revision = "83cf655d40b15b427014d7875d10850f96edba14" version = "v4.2.0" [[projects]] - digest = "1:d014bc54441ee96e8306ea6a767264864d2fd0898962a9dee152e992b2e672da" name = "gopkg.in/src-d/go-git.v4" packages = [ ".", @@ -842,31 +735,53 @@ "utils/merkletrie/filesystem", "utils/merkletrie/index", "utils/merkletrie/internal/frame", - "utils/merkletrie/noder", + "utils/merkletrie/noder" ] - pruneopts = "" revision = "3bd5e82b2512d85becae9677fa06b5a973fd4cfb" version = "v4.5.0" [[projects]] - digest = "1:ceec7e96590fb8168f36df4795fefe17051d4b0c2acc7ec4e260d8138c4dafac" name = "gopkg.in/warnings.v0" packages = ["."] - pruneopts = "" revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b" version = "v0.1.2" [[projects]] - digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" +[[projects]] + name = "honnef.co/go/tools" + packages = [ + "arg", + "callgraph", + "callgraph/static", + "cmd/staticcheck", + "config", + "deprecated", + "functions", + "internal/sharedcheck", + "lint", + "lint/lintdsl", + "lint/lintutil", + "lint/lintutil/format", + "simple", + "ssa", + "ssa/ssautil", + "ssautil", + "staticcheck", + "staticcheck/vrp", + "stylecheck", + "unused", + "version" + ] + revision = "95959eaf5e3c41c66151dcfd91779616b84077a8" + version = "2019.1.1" + [[projects]] branch = "release-1.12" - digest = "1:ed04c5203ecbf6358fb6a774b0ecd40ea992d6dcc42adc1d3b7cf9eceb66b6c8" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -899,14 +814,12 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "" revision = "475331a8afff5587f47d0470a93f79c60c573c03" [[projects]] branch = "release-1.12" - digest = "1:5899da40e41bcc8c1df101b72954096bba9d85b763bc17efc846062ccc111c7b" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -954,14 +867,12 @@ "pkg/watch", "third_party/forked/golang/json", "third_party/forked/golang/netutil", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "" revision = "f71dbbc36e126f5a371b85f6cca96bc8c57db2b6" [[projects]] branch = "release-9.0" - digest = "1:77bf3d9f18ec82e08ac6c4c7e2d9d1a2ef8d16b25d3ff72fcefcf9256d751573" name = "k8s.io/client-go" packages = [ "discovery", @@ -1065,14 +976,12 @@ "util/integer", "util/jsonpath", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "" revision = "13596e875accbd333e0b5bd5fd9462185acd9958" [[projects]] branch = "release-1.12" - digest = "1:e6fffdf0dfeb0d189a7c6d735e76e7564685d3b6513f8b19d3651191cb6b084b" name = "k8s.io/code-generator" packages = [ "cmd/client-gen", @@ -1091,14 +1000,12 @@ "cmd/lister-gen", "cmd/lister-gen/args", "cmd/lister-gen/generators", - "pkg/util", + "pkg/util" ] - pruneopts = "" revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" [[projects]] branch = "master" - digest = "1:74eb4556b4379d0d76a3a5ada504ff6c5ef76cd85cbf1347cb649e4c1cc8ca9e" name = "k8s.io/gengo" packages = [ "args", @@ -1107,95 +1014,22 @@ "generator", "namer", "parser", - "types", + "types" ] - pruneopts = "" revision = "c42f3cdacc394f43077ff17e327d1b351c0304e4" [[projects]] branch = "master" - digest = "1:951bc2047eea6d316a17850244274554f26fd59189360e45f4056b424dadf2c1" name = "k8s.io/kube-openapi" packages = [ "pkg/common", - "pkg/util/proto", + "pkg/util/proto" ] - pruneopts = "" revision = "e3762e86a74c878ffed47484592986685639c2cd" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/Knetic/govaluate", - "github.com/argoproj/pkg/errors", - "github.com/argoproj/pkg/file", - "github.com/argoproj/pkg/humanize", - "github.com/argoproj/pkg/json", - "github.com/argoproj/pkg/kube/cli", - "github.com/argoproj/pkg/kube/errors", - "github.com/argoproj/pkg/s3", - "github.com/argoproj/pkg/stats", - "github.com/argoproj/pkg/strftime", - "github.com/argoproj/pkg/time", - "github.com/evanphx/json-patch", - "github.com/fsnotify/fsnotify", - "github.com/ghodss/yaml", - "github.com/go-openapi/spec", - "github.com/golang/glog", - "github.com/gorilla/websocket", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/sirupsen/logrus", - "github.com/spf13/cobra", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/suite", - "github.com/tidwall/gjson", - "github.com/valyala/fasttemplate", - "golang.org/x/crypto/ssh", - "gopkg.in/src-d/go-git.v4", - "gopkg.in/src-d/go-git.v4/plumbing/transport", - "gopkg.in/src-d/go-git.v4/plumbing/transport/http", - "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh", - "k8s.io/api/core/v1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/clock", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/validation", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/discovery/fake", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers/internalinterfaces", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/plugin/pkg/client/auth/oidc", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/remotecommand", - "k8s.io/client-go/util/flowcontrol", - "k8s.io/client-go/util/workqueue", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/code-generator/cmd/deepcopy-gen", - "k8s.io/code-generator/cmd/informer-gen", - "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/kube-openapi/pkg/common", - ] + inputs-digest = "b51e8453cbf883cd75a2b28342c7be0c860aaccaca1fff2ebc1d5bbaef288599" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 36431d39c9e7..b77d4eae0533 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -48,3 +48,11 @@ required = [ [[constraint]] name = "github.com/ghodss/yaml" branch = "master" + +[[constraint]] + name = "cloud.google.com/go" + version = "0.26.0" + +[[constraint]] + name = "google.golang.org/api" + version = "0.3.2" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 29254c385f40..536ee35208c2 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -233,12 +233,16 @@ "description": "GCSArtifact is the location of a GCS artifact", "required": [ "bucket", + "credentialsSecret", "key" ], "properties": { "bucket": { "type": "string" }, + "credentialsSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, "key": { "type": "string" } @@ -247,11 +251,15 @@ "io.argoproj.workflow.v1alpha1.GCSBucket": { "description": "GCSBucket contains the access information required for acting with a GCS bucket", "required": [ - "bucket" + "bucket", + "credentialsSecret" ], "properties": { "bucket": { "type": "string" + }, + "credentialsSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" } } }, diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 0f898464dbd7..18d8a2494ceb 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -459,6 +459,11 @@ func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) Format: "", }, }, + "credentialsSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, "key": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, @@ -466,10 +471,11 @@ func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) }, }, }, - Required: []string{"bucket", "key"}, + Required: []string{"bucket", "credentialsSecret", "key"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -485,11 +491,17 @@ func schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref common.ReferenceCallback) c Format: "", }, }, + "credentialsSecret": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, }, - Required: []string{"bucket"}, + Required: []string{"bucket", "credentialsSecret"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, } } diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index d2815274acd8..47da4c941dbe 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -599,7 +599,8 @@ func (s *S3Artifact) String() string { // GCSBucket contains the access information required for acting with a GCS bucket type GCSBucket struct { - Bucket string `json:"bucket"` + Bucket string `json:"bucket"` + CredentialsSecret apiv1.SecretKeySelector `json:"credentialsSecret"` } // GCSArtifact is the location of a GCS artifact diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 6b6ccec7ba76..8cd12c7e9420 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -128,7 +128,7 @@ func (in *ArtifactLocation) DeepCopyInto(out *ArtifactLocation) { if in.GCS != nil { in, out := &in.GCS, &out.GCS *out = new(GCSArtifact) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -246,7 +246,7 @@ func (in *DAGTemplate) DeepCopy() *DAGTemplate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCSArtifact) DeepCopyInto(out *GCSArtifact) { *out = *in - out.GCSBucket = in.GCSBucket + in.GCSBucket.DeepCopyInto(&out.GCSBucket) return } @@ -263,6 +263,7 @@ func (in *GCSArtifact) DeepCopy() *GCSArtifact { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCSBucket) DeepCopyInto(out *GCSBucket) { *out = *in + in.CredentialsSecret.DeepCopyInto(&out.CredentialsSecret) return } diff --git a/workflow/artifacts/gcs/gcs.go b/workflow/artifacts/gcs/gcs.go index 5802076c3ae8..49a8e55abdf9 100644 --- a/workflow/artifacts/gcs/gcs.go +++ b/workflow/artifacts/gcs/gcs.go @@ -4,20 +4,25 @@ import ( "cloud.google.com/go/storage" "context" "errors" + "fmt" argoErrors "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" log "github.com/sirupsen/logrus" + "google.golang.org/api/option" "io" "os" ) type GCSArtifactDriver struct { - Context context.Context + Context context.Context + CredsJSONData []byte } func (gcsDriver *GCSArtifactDriver) newGcsClient() (client *storage.Client, err error) { gcsDriver.Context = context.Background() - client, err = storage.NewClient(gcsDriver.Context) + + fmt.Println(string(gcsDriver.CredsJSONData)) + client, err = storage.NewClient(gcsDriver.Context, option.WithCredentialsJSON(gcsDriver.CredsJSONData)) if err != nil { return nil, argoErrors.InternalWrapError(err) } diff --git a/workflow/common/common.go b/workflow/common/common.go index e3dbb079169b..432339e10cbe 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -1,7 +1,6 @@ package common import ( - "os" "time" "github.com/argoproj/argo/pkg/apis/workflow" @@ -37,11 +36,6 @@ const ( // DockerSockVolumeName is the volume name for the /var/run/docker.sock host path volume DockerSockVolumeName = "docker-sock" - // GoogleSecretVolumeName is the volume name for the /var/secrets/google volume - GoogleSecretVolumeName = "google-cloud-key" - // EvnVarGoogleSecret contains the name of the google credentials file used fro GCS access - EnvVarGoogleSecret = "GOOGLE_CREDENTIALS_SECRET" - // AnnotationKeyNodeName is the pod metadata annotation key containing the workflow node name AnnotationKeyNodeName = workflow.FullName + "/node-name" // AnnotationKeyNodeMessage is the pod metadata annotation key the executor will use to @@ -122,10 +116,6 @@ const ( LocalVarPodName = "pod.name" ) -var ( - GoogleSecretName = os.Getenv(EnvVarGoogleSecret) -) - // ExecutionControl contains execution control parameters for executor to decide how to execute the container type ExecutionControl struct { // Deadline is a max timestamp in which an executor can run the container before terminating it diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 4700ecd996a0..e28179a0f998 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -87,25 +87,6 @@ var ( execEnvVars = []apiv1.EnvVar{ envFromField(common.EnvVarPodName, "metadata.name"), } - - volumeMountGoogleSecret = apiv1.VolumeMount{ - Name: common.GoogleSecretVolumeName, - MountPath: "/var/secrets/google", - } - - googleCredentialSecretEnvVar = apiv1.EnvVar{ - Name: "GOOGLE_APPLICATION_CREDENTIALS", - Value: "/var/secrets/google/key.json", - } - - volumeGoogleSecret = apiv1.Volume{ - Name: common.GoogleSecretVolumeName, - VolumeSource: apiv1.VolumeSource{ - Secret: &apiv1.SecretVolumeSource{ - SecretName: common.GoogleSecretName, - }, - }, - } ) // envFromField is a helper to return a EnvVar with the name and field @@ -157,10 +138,6 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont pod.ObjectMeta.Labels[common.LabelKeyControllerInstanceID] = woc.controller.Config.InstanceID } - if common.GoogleSecretName != "" { - pod.Spec.Volumes = append(pod.Spec.Volumes, volumeGoogleSecret) - } - if tmpl.GetType() != wfv1.TemplateTypeResource { // we do not need the wait container for resource templates because // argoexec runs as the main container and will perform the job of @@ -373,10 +350,6 @@ func (woc *wfOperationCtx) newExecContainer(name string, privileged bool) *apiv1 exec.Resources = *woc.controller.Config.ExecutorResources } - if common.GoogleSecretName != "" { - exec.VolumeMounts = append(exec.VolumeMounts, volumeMountGoogleSecret) - exec.Env = append(exec.Env, googleCredentialSecretEnvVar) - } return &exec } diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index a545e7ef58aa..25a2999ed7b1 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -441,7 +441,13 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv return &driver, nil } if art.GCS != nil { - driver := gcs.GCSArtifactDriver{} + credsJSONData, err := we.getSecrets(we.Namespace, art.GCS.CredentialsSecret.Name, art.GCS.CredentialsSecret.Key) + if err != nil { + return nil, err + } + driver := gcs.GCSArtifactDriver{ + CredsJSONData: []byte(credsJSONData), + } return &driver, nil } if art.HTTP != nil { From 805231f18bc1f1fe23166e3a43e8a5125ba6090b Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 09:21:55 -0400 Subject: [PATCH 128/145] fixing build issues --- Gopkg.lock | 175 +--------------------------- pkg/apis/workflow/v1alpha1/types.go | 4 + workflow/artifacts/gcs/gcs.go | 2 - workflow/controller/config.go | 2 +- workflow/controller/workflowpod.go | 1 - workflow/executor/executor.go | 2 +- 6 files changed, 12 insertions(+), 174 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c47cd77506a7..27ed97c56d94 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -16,23 +16,15 @@ version = "v0.26.0" [[projects]] - digest = "1:d62e9a41f2e45c103f6c15ffabb3466b3548db41b8cc135a4669794033ee761f" name = "github.com/Azure/go-autorest" packages = [ "autorest", "autorest/adal", "autorest/azure", - "autorest/date", + "autorest/date" ] - pruneopts = "" revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" -[[projects]] - name = "github.com/BurntSushi/toml" - packages = ["."] - revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" - version = "v0.3.1" - [[projects]] name = "github.com/Knetic/govaluate" packages = ["."] @@ -67,7 +59,7 @@ "strftime", "time" ] - revision = "a581a48d63014312c4f2762787f669e46bdb1fd9" + revision = "7e3ef65c8d44303738c7e815bd9b1b297b39f5c8" [[projects]] branch = "master" @@ -76,15 +68,13 @@ revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:5cf8a8393124ac3d5632a8c51d08d8ff2aa29b6b328306cb8b7560a7e83cf760" name = "github.com/colinmarc/hdfs" packages = [ ".", "protocol/hadoop_common", "protocol/hadoop_hdfs", - "rpc", + "rpc" ] - pruneopts = "" revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5" [[projects]] @@ -94,10 +84,8 @@ version = "v1.1.0" [[projects]] - digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" name = "github.com/dgrijalva/jwt-go" packages = ["."] - pruneopts = "" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" @@ -144,12 +132,6 @@ revision = "afac545df32f2287a079e2dfb7ba2745a643747e" version = "v3.0.0" -[[projects]] - name = "github.com/fsnotify/fsnotify" - packages = ["."] - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - [[projects]] branch = "master" name = "github.com/ghodss/yaml" @@ -258,10 +240,8 @@ revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] - digest = "1:d35e07e002ccc51cb01fa77e932ea62206c4d3b2fb0fa1f1b052885942108a96" name = "github.com/hashicorp/go-uuid" packages = ["."] - pruneopts = "" revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc" version = "v1.0.0" @@ -294,13 +274,11 @@ [[projects]] branch = "master" - digest = "1:1c030807110db46f33e7abd02c08dd98dc2c1c6620eea6941185025f16ad8bbb" name = "github.com/jcmturner/gofork" packages = [ "encoding/asn1", - "x/crypto/pbkdf2", + "x/crypto/pbkdf2" ] - pruneopts = "" revision = "2aebee971930cd0dd525873330952ab7df5ac95c" [[projects]] @@ -351,10 +329,8 @@ [[projects]] branch = "master" - digest = "1:1dee6133ab829c8559a39031ad1e0e3538e4a7b34d3e0509d1fc247737e928c1" name = "github.com/mitchellh/go-ps" packages = ["."] - pruneopts = "" revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" [[projects]] @@ -448,10 +424,8 @@ version = "v1.0.6" [[projects]] - digest = "1:9ba49264cef4386aded205f9cb5b1f2d30f983d7dc37a21c780d9db3edfac9a7" name = "github.com/spf13/cobra" packages = ["."] - pruneopts = "" revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053" [[projects]] @@ -566,15 +540,6 @@ ] revision = "f027049dab0ad238e394a753dba2d14753473a04" -[[projects]] - branch = "master" - name = "golang.org/x/lint" - packages = [ - ".", - "golint" - ] - revision = "959b441ac422379a43da2230f62be024250818b0" - [[projects]] branch = "master" name = "golang.org/x/net" @@ -644,17 +609,7 @@ branch = "master" name = "golang.org/x/tools" packages = [ - "cmd/goimports", "go/ast/astutil", - "go/buildutil", - "go/gcexportdata", - "go/internal/cgo", - "go/internal/gcimporter", - "go/loader", - "go/packages", - "go/packages/golist", - "go/packages/raw", - "go/types/typeutil", "imports", "internal/fastwalk" ] @@ -663,7 +618,6 @@ [[projects]] name = "google.golang.org/api" packages = [ - ".", "gensupport", "googleapi", "googleapi/internal/uritemplates", @@ -746,23 +700,18 @@ version = "v0.9.1" [[projects]] - digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12" name = "gopkg.in/jcmturner/aescts.v1" packages = ["."] - pruneopts = "" revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" version = "v1.0.1" [[projects]] - digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1" name = "gopkg.in/jcmturner/dnsutils.v1" packages = ["."] - pruneopts = "" revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" version = "v1.0.1" [[projects]] - digest = "1:f727cb776135c090d4043eca9cd921b9967f75704a97309172fde92591b3c828" name = "gopkg.in/jcmturner/gokrb5.v5" packages = [ "asn1tools", @@ -795,17 +744,14 @@ "messages", "mstypes", "pac", - "types", + "types" ] - pruneopts = "" revision = "32ba44ca5b42f17a4a9f33ff4305e70665a1bc0f" version = "v5.3.0" [[projects]] - digest = "1:269a70a6997455a9130b3005af6d2983323e4b8c712f3288a0df0e6013c18ee1" name = "gopkg.in/jcmturner/rpc.v0" packages = ["ndr"] - pruneopts = "" revision = "4480c480c9cd343b54b0acb5b62261cbd33d7adf" version = "v0.0.2" @@ -880,34 +826,6 @@ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" -[[projects]] - name = "honnef.co/go/tools" - packages = [ - "arg", - "callgraph", - "callgraph/static", - "cmd/staticcheck", - "config", - "deprecated", - "functions", - "internal/sharedcheck", - "lint", - "lint/lintdsl", - "lint/lintutil", - "lint/lintutil/format", - "simple", - "ssa", - "ssa/ssautil", - "ssautil", - "staticcheck", - "staticcheck/vrp", - "stylecheck", - "unused", - "version" - ] - revision = "95959eaf5e3c41c66151dcfd91779616b84077a8" - version = "2019.1.1" - [[projects]] branch = "release-1.12" name = "k8s.io/api" @@ -1159,94 +1077,13 @@ [[projects]] branch = "master" - digest = "1:f6c19347011ba9a072aa55f5c7fa630c0b88303ac4ca83008454aef95b0c2078" name = "k8s.io/utils" packages = ["pointer"] - pruneopts = "" revision = "21c4ce38f2a793ec01e925ddc31216500183b773" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/Knetic/govaluate", - "github.com/argoproj/pkg/cli", - "github.com/argoproj/pkg/errors", - "github.com/argoproj/pkg/exec", - "github.com/argoproj/pkg/file", - "github.com/argoproj/pkg/humanize", - "github.com/argoproj/pkg/json", - "github.com/argoproj/pkg/kube/cli", - "github.com/argoproj/pkg/kube/errors", - "github.com/argoproj/pkg/s3", - "github.com/argoproj/pkg/stats", - "github.com/argoproj/pkg/strftime", - "github.com/argoproj/pkg/time", - "github.com/colinmarc/hdfs", - "github.com/evanphx/json-patch", - "github.com/ghodss/yaml", - "github.com/go-openapi/spec", - "github.com/gorilla/websocket", - "github.com/mitchellh/go-ps", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/sirupsen/logrus", - "github.com/spf13/cobra", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/suite", - "github.com/tidwall/gjson", - "github.com/valyala/fasttemplate", - "golang.org/x/crypto/ssh", - "gopkg.in/jcmturner/gokrb5.v5/client", - "gopkg.in/jcmturner/gokrb5.v5/config", - "gopkg.in/jcmturner/gokrb5.v5/credentials", - "gopkg.in/jcmturner/gokrb5.v5/keytab", - "gopkg.in/src-d/go-git.v4", - "gopkg.in/src-d/go-git.v4/plumbing/transport", - "gopkg.in/src-d/go-git.v4/plumbing/transport/http", - "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh", - "k8s.io/api/core/v1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/clock", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/validation", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/discovery/fake", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers/internalinterfaces", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/plugin/pkg/client/auth/azure", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/plugin/pkg/client/auth/oidc", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/remotecommand", - "k8s.io/client-go/tools/watch", - "k8s.io/client-go/util/flowcontrol", - "k8s.io/client-go/util/workqueue", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/code-generator/cmd/deepcopy-gen", - "k8s.io/code-generator/cmd/informer-gen", - "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/kube-openapi/pkg/common", - "k8s.io/utils/pointer", - ] + inputs-digest = "efb1ee6ec26ba4329ef206c18289aa8369361a967d6444c0bbfe6193354fbb29" solver-name = "gps-cdcl" solver-version = 1 diff --git a/pkg/apis/workflow/v1alpha1/types.go b/pkg/apis/workflow/v1alpha1/types.go index 4be64baec875..7307f0464622 100644 --- a/pkg/apis/workflow/v1alpha1/types.go +++ b/pkg/apis/workflow/v1alpha1/types.go @@ -688,6 +688,10 @@ func (s *GCSArtifact) String() string { return fmt.Sprintf("gs://%s/%s", s.Bucket, s.Key) } +func (s *GCSArtifact) HasLocation() bool { + return s != nil && s.Bucket != "" +} + // GitArtifact is the location of an git artifact type GitArtifact struct { // Repo is the git repository diff --git a/workflow/artifacts/gcs/gcs.go b/workflow/artifacts/gcs/gcs.go index 49a8e55abdf9..89328d430898 100644 --- a/workflow/artifacts/gcs/gcs.go +++ b/workflow/artifacts/gcs/gcs.go @@ -4,7 +4,6 @@ import ( "cloud.google.com/go/storage" "context" "errors" - "fmt" argoErrors "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" log "github.com/sirupsen/logrus" @@ -21,7 +20,6 @@ type GCSArtifactDriver struct { func (gcsDriver *GCSArtifactDriver) newGcsClient() (client *storage.Client, err error) { gcsDriver.Context = context.Background() - fmt.Println(string(gcsDriver.CredsJSONData)) client, err = storage.NewClient(gcsDriver.Context, option.WithCredentialsJSON(gcsDriver.CredsJSONData)) if err != nil { return nil, argoErrors.InternalWrapError(err) diff --git a/workflow/controller/config.go b/workflow/controller/config.go index 7823203154b6..0cd87416fa27 100644 --- a/workflow/controller/config.go +++ b/workflow/controller/config.go @@ -96,7 +96,7 @@ type ArtifactRepository struct { Artifactory *ArtifactoryArtifactRepository `json:"artifactory,omitempty"` // HDFS stores artifacts in HDFS HDFS *HDFSArtifactRepository `json:"hdfs,omitempty"` - GCS *GCSArtifactRepository `json:"gcs,omitempty"` + GCS *GCSArtifactRepository `json:"gcs,omitempty"` } // S3ArtifactRepository defines the controller configuration for an S3 artifact repository diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 00ae2cf09fea..d5456c08922d 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -387,7 +387,6 @@ func (woc *wfOperationCtx) newExecContainer(name string) *apiv1.Container { Name: name, Image: woc.controller.executorImage(), ImagePullPolicy: woc.controller.executorImagePullPolicy(), - VolumeMounts: []apiv1.VolumeMount{}, Env: woc.createEnvVars(), VolumeMounts: []apiv1.VolumeMount{ volumeMountPodMetadata, diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 1aa5fb2f755d..78bb8b1b62f5 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -549,7 +549,7 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv return &driver, nil } if art.GCS != nil { - credsJSONData, err := we.getSecrets(we.Namespace, art.GCS.CredentialsSecret.Name, art.GCS.CredentialsSecret.Key) + credsJSONData, err := we.GetSecrets(we.Namespace, art.GCS.CredentialsSecret.Name, art.GCS.CredentialsSecret.Key) if err != nil { return nil, err } From 4352e8949ffa84c5fdf0d46bfb491aa871ffb55c Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 10:44:31 -0400 Subject: [PATCH 129/145] linter issues --- workflow/artifacts/gcs/gcs.go | 5 +++-- workflow/executor/executor.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/workflow/artifacts/gcs/gcs.go b/workflow/artifacts/gcs/gcs.go index 89328d430898..55977468bd7d 100644 --- a/workflow/artifacts/gcs/gcs.go +++ b/workflow/artifacts/gcs/gcs.go @@ -6,6 +6,7 @@ import ( "errors" argoErrors "github.com/argoproj/argo/errors" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util" log "github.com/sirupsen/logrus" "google.golang.org/api/option" "io" @@ -59,7 +60,7 @@ func (gcsDriver *GCSArtifactDriver) saveToFile(inputArtifact *wfv1.Artifact, fil if err != nil { return err } - defer r.Close() + defer util.Close(r) _, err = io.Copy(outputFile, r) if err != nil { @@ -97,7 +98,7 @@ func (gcsDriver *GCSArtifactDriver) saveToGCS(outputArtifact *wfv1.Artifact, fil return errors.New("only single files can be saved to GCS, not entire directories") } - defer inputFile.Close() + defer util.Close(inputFile) bucket := gcsClient.Bucket(outputArtifact.GCS.Bucket) object := bucket.Object(outputArtifact.GCS.Key) diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 78bb8b1b62f5..c11ce2ae55ab 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -554,7 +554,7 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv return nil, err } driver := gcs.GCSArtifactDriver{ - CredsJSONData: []byte(credsJSONData), + CredsJSONData: credsJSONData, } return &driver, nil } From 6511bc7b1c70a8768bca4d649689336e7f1498c6 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 13:01:08 -0400 Subject: [PATCH 130/145] fixing jenkinsfile(?) --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 38a1de3db6ad..456ea638d568 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -41,7 +41,7 @@ pipeline { stage('build utility container') { steps { - sh "docker build -t argo-builder -f Dockerfile-builder ." + sh "docker build -t argo-builder --target builder ." } } @@ -54,14 +54,14 @@ pipeline { stage('build controller') { steps { runUtilityCommand("make controller") - sh "docker build -t workflow-controller:${VERSION} -f Dockerfile-workflow-controller ." + sh "docker build -t workflow-controller:${VERSION} --target workflow-controller ." } } stage('build executor') { steps { runUtilityCommand("make executor") - sh "docker build -t argoexec:${VERSION} -f Dockerfile-argoexec ." + sh "docker build -t argoexec:${VERSION} --target argoexec-base ." } } From 280ca06a530a19d89b9434cce1a5593950f10677 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 13:14:29 -0400 Subject: [PATCH 131/145] jenkins --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 456ea638d568..6139946a8d7a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -10,9 +10,9 @@ def NAMESPACE = '' def runUtilityCommand(buildCommand) { // Run an arbitrary command inside the docker builder image sh "docker run --rm " + - "-v ${pwd()}/dist/pkg:/root/go/pkg " + - "-v ${pwd()}:/root/go/src/github.com/cyrusbiotechnology/argo " + - "-w /root/go/src/github.com/cyrusbiotechnology/argo argo-builder ${buildCommand}" + "-v ${pwd()}/dist/pkg:/usr/local/go/pkg " + + "-v ${pwd()}:/go/src/github.com/cyrusbiotechnology/argo " + + "-w /go/src/github.com/cyrusbiotechnology/argo argo-builder ${buildCommand}" } pipeline { From 7f66c88d8cbe99c39d1a18b81e38b43e8485ed42 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 13:24:29 -0400 Subject: [PATCH 132/145] jenkins --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6139946a8d7a..1e50ababc2cc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -47,7 +47,7 @@ pipeline { stage('run tests') { steps { - runUtilityCommand("go test ./...") + runUtilityCommand("dep ensure && go test ./...") } } From 9cbff08125fecb44f709fbe85eb7c6770cc60cd4 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 13:31:06 -0400 Subject: [PATCH 133/145] jenkins --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1e50ababc2cc..9a17f83db517 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -47,7 +47,7 @@ pipeline { stage('run tests') { steps { - runUtilityCommand("dep ensure && go test ./...") + runUtilityCommand("'dep ensure && go test ./...'") } } From 11266b8d831ec1b7c13f6f62366c706f51e04cc7 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 14:08:00 -0400 Subject: [PATCH 134/145] jenkins --- Jenkinsfile | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9a17f83db517..d680fae99e43 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,7 +12,7 @@ def runUtilityCommand(buildCommand) { sh "docker run --rm " + "-v ${pwd()}/dist/pkg:/usr/local/go/pkg " + "-v ${pwd()}:/go/src/github.com/cyrusbiotechnology/argo " + - "-w /go/src/github.com/cyrusbiotechnology/argo argo-builder ${buildCommand}" + "-w /go/src/github.com/cyrusbiotechnology/argo argoexec:${VERSION} ${buildCommand}" } pipeline { @@ -39,17 +39,12 @@ pipeline { } } - stage('build utility container') { - steps { - sh "docker build -t argo-builder --target builder ." - } - } +// stage('build utility container') { +// steps { +// sh "docker build -t argo-builder --target builder ." +// } +// } - stage('run tests') { - steps { - runUtilityCommand("'dep ensure && go test ./...'") - } - } stage('build controller') { steps { @@ -65,6 +60,13 @@ pipeline { } } + + stage('run tests') { + steps { + runUtilityCommand("go test ./...") + } + } + stage('build Linux and MacOS CLIs') { steps { runUtilityCommand("make cli CGO_ENABLED=0 LDFLAGS='-extldflags \"-static\"' ARGO_CLI_NAME=argo-linux-amd64") From 3b169841de2c9e64514af59f3aeac0501ba99f43 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 14:37:44 -0400 Subject: [PATCH 135/145] jenkins? --- Dockerfile | 7 +++++-- Jenkinsfile | 12 ++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 707a9dc556e9..a0464961ef65 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,7 +54,7 @@ COPY --from=builder /usr/local/bin/docker /usr/local/bin/ #################################################################################################### # Argo Build stage which performs the actual build of Argo binaries #################################################################################################### -FROM builder as argo-build +FROM builder as builder-base # A dummy directory is created under $GOPATH/src/dummy so we are able to use dep # to install all the packages of our dep lock file @@ -66,9 +66,12 @@ RUN cd ${GOPATH}/src/dummy && \ mv vendor/* ${GOPATH}/src/ && \ rmdir vendor -# Perform the build WORKDIR /go/src/github.com/cyrusbiotechnology/argo COPY . . + +FROM builder-base as argo-build +# Perform the build + ARG MAKE_TARGET="controller executor cli-linux-amd64" RUN make $MAKE_TARGET diff --git a/Jenkinsfile b/Jenkinsfile index d680fae99e43..32b2c1d6d6d5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,7 +12,7 @@ def runUtilityCommand(buildCommand) { sh "docker run --rm " + "-v ${pwd()}/dist/pkg:/usr/local/go/pkg " + "-v ${pwd()}:/go/src/github.com/cyrusbiotechnology/argo " + - "-w /go/src/github.com/cyrusbiotechnology/argo argoexec:${VERSION} ${buildCommand}" + "-w /go/src/github.com/cyrusbiotechnology/argo argo-builder ${buildCommand}" } pipeline { @@ -39,11 +39,11 @@ pipeline { } } -// stage('build utility container') { -// steps { -// sh "docker build -t argo-builder --target builder ." -// } -// } + stage('build utility container') { + steps { + sh "docker build -t argo-builder --target builder-base ." + } + } stage('build controller') { From fee62d3d32642ce0cbdebfdb2f9b4631824317b6 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 14:45:46 -0400 Subject: [PATCH 136/145] jenkins :( --- Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 32b2c1d6d6d5..9a72713731d0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,14 +48,12 @@ pipeline { stage('build controller') { steps { - runUtilityCommand("make controller") sh "docker build -t workflow-controller:${VERSION} --target workflow-controller ." } } stage('build executor') { steps { - runUtilityCommand("make executor") sh "docker build -t argoexec:${VERSION} --target argoexec-base ." } } From 5856cb6a16155f22c23679cf45e461d85d95efbc Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Wed, 24 Apr 2019 14:52:23 -0400 Subject: [PATCH 137/145] jenkins :( --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9a72713731d0..ead506335449 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -61,7 +61,7 @@ pipeline { stage('run tests') { steps { - runUtilityCommand("go test ./...") + runUtilityCommand("make test") } } From 5756fee719c263cd563316e532370fe669566809 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 09:54:48 -0400 Subject: [PATCH 138/145] jenkins --- Jenkinsfile | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index ead506335449..5f229828c1b4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,10 +9,7 @@ def NAMESPACE = '' def runUtilityCommand(buildCommand) { // Run an arbitrary command inside the docker builder image - sh "docker run --rm " + - "-v ${pwd()}/dist/pkg:/usr/local/go/pkg " + - "-v ${pwd()}:/go/src/github.com/cyrusbiotechnology/argo " + - "-w /go/src/github.com/cyrusbiotechnology/argo argo-builder ${buildCommand}" + sh "docker run --rm builder-base:latest ${buildCommand}" } pipeline { @@ -41,10 +38,15 @@ pipeline { stage('build utility container') { steps { - sh "docker build -t argo-builder --target builder-base ." + sh "docker build -t builder-base --target builder-base ." } } + stage('run tests') { + steps { + runUtilityCommand("go test ./...") + } + } stage('build controller') { steps { @@ -59,11 +61,7 @@ pipeline { } - stage('run tests') { - steps { - runUtilityCommand("make test") - } - } + stage('build Linux and MacOS CLIs') { steps { From 1afa5539045b20c8440a50cf8101617c6413c74e Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 10:13:21 -0400 Subject: [PATCH 139/145] jenkins --- Jenkinsfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 5f229828c1b4..7887051d0fa5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,7 +9,7 @@ def NAMESPACE = '' def runUtilityCommand(buildCommand) { // Run an arbitrary command inside the docker builder image - sh "docker run --rm builder-base:latest ${buildCommand}" + sh "docker run -v ${pwd)}/dist:/go/src/github.com/cyrusbiotechnology/argo/dist --rm builder-base:latest ${buildCommand}" } pipeline { @@ -42,11 +42,14 @@ pipeline { } } +// TODO uncomment once we have the infrastructure to submit tests +/* stage('run tests') { steps { runUtilityCommand("go test ./...") } } +*/ stage('build controller') { steps { From 9eab6bfb3f313cead2d21afba532ddd7ce2c2eb0 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 10:16:36 -0400 Subject: [PATCH 140/145] jenkins --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7887051d0fa5..c738b0eca946 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,7 +9,7 @@ def NAMESPACE = '' def runUtilityCommand(buildCommand) { // Run an arbitrary command inside the docker builder image - sh "docker run -v ${pwd)}/dist:/go/src/github.com/cyrusbiotechnology/argo/dist --rm builder-base:latest ${buildCommand}" + sh "docker run -v ${pwd()}/dist:/go/src/github.com/cyrusbiotechnology/argo/dist --rm builder-base:latest ${buildCommand}" } pipeline { From 716baf967da3dd7455854a15600f48372f24f094 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 13:29:43 -0400 Subject: [PATCH 141/145] jenkins --- Gopkg.lock | 1095 -------------------------- Jenkinsfile | 9 +- util/file/fileutil_test.go | 2 +- workflow/controller/operator.go | 2 +- workflow/controller/operator_test.go | 2 + 5 files changed, 8 insertions(+), 1102 deletions(-) delete mode 100644 Gopkg.lock diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index fc86176400d1..000000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1095 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "cloud.google.com/go" - packages = [ - "compute/metadata", - "iam", - "internal", - "internal/optional", - "internal/trace", - "internal/version", - "storage" - ] - revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" - version = "v0.26.0" - -[[projects]] - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date" - ] - revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" - -[[projects]] - name = "github.com/Knetic/govaluate" - packages = ["."] - revision = "9aa49832a739dcd78a5542ff189fb82c3e423116" - -[[projects]] - name = "github.com/PuerkitoBio/purell" - packages = ["."] - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - name = "github.com/argoproj/argo" - packages = ["util/file"] - revision = "bb1bfdd9106d9b64aa2dccf8d3554bdd31513cf8" - version = "v2.3.0-rc2" - -[[projects]] - branch = "master" - name = "github.com/argoproj/pkg" - packages = [ - "cli", - "errors", - "exec", - "file", - "humanize", - "json", - "kube/cli", - "kube/errors", - "s3", - "stats", - "strftime", - "time" - ] - revision = "7e3ef65c8d44303738c7e815bd9b1b297b39f5c8" - -[[projects]] - branch = "master" - name = "github.com/beorn7/perks" - packages = ["quantile"] - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - name = "github.com/colinmarc/hdfs" - packages = [ - ".", - "protocol/hadoop_common", - "protocol/hadoop_hdfs", - "rpc" - ] - revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - branch = "master" - name = "github.com/docker/spdystream" - packages = [ - ".", - "spdy" - ] - revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" - -[[projects]] - branch = "master" - name = "github.com/dustin/go-humanize" - packages = ["."] - revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" - -[[projects]] - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log" - ] - revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" - version = "v2.8.0" - -[[projects]] - name = "github.com/emirpasic/gods" - packages = [ - "containers", - "lists", - "lists/arraylist", - "trees", - "trees/binaryheap", - "utils" - ] - revision = "f6c17b524822278a87e3b3bd809fec33b51f5b46" - version = "v1.9.0" - -[[projects]] - name = "github.com/evanphx/json-patch" - packages = ["."] - revision = "afac545df32f2287a079e2dfb7ba2745a643747e" - version = "v3.0.0" - -[[projects]] - branch = "master" - name = "github.com/ghodss/yaml" - packages = ["."] - revision = "c7ce16629ff4cd059ed96ed06419dd3856fd3577" - -[[projects]] - name = "github.com/go-ini/ini" - packages = ["."] - revision = "358ee7663966325963d4e8b2e1fbd570c5195153" - version = "v1.38.1" - -[[projects]] - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" - version = "0.15.0" - -[[projects]] - name = "github.com/go-openapi/jsonreference" - packages = ["."] - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" - version = "0.15.0" - -[[projects]] - name = "github.com/go-openapi/spec" - packages = ["."] - revision = "bce47c9386f9ecd6b86f450478a80103c3fe1402" - version = "0.15.0" - -[[projects]] - name = "github.com/go-openapi/swag" - packages = ["."] - revision = "2b0bd4f193d011c203529df626a65d63cb8a79e8" - version = "0.15.0" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys" - ] - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - branch = "master" - name = "github.com/golang/glog" - packages = ["."] - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/google/btree" - packages = ["."] - revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" - -[[projects]] - branch = "master" - name = "github.com/google/gofuzz" - packages = ["."] - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - name = "github.com/googleapis/gax-go" - packages = ["."] - revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" - version = "v2.0.0" - -[[projects]] - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions" - ] - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - name = "github.com/gorilla/websocket" - packages = ["."] - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" - -[[projects]] - branch = "master" - name = "github.com/gregjones/httpcache" - packages = [ - ".", - "diskcache" - ] - revision = "9cad4c3443a7200dd6400aef47183728de563a38" - -[[projects]] - name = "github.com/hashicorp/go-uuid" - packages = ["."] - revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru" - ] - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" - -[[projects]] - name = "github.com/imdario/mergo" - packages = ["."] - revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" - version = "v0.3.6" - -[[projects]] - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - branch = "master" - name = "github.com/jbenet/go-context" - packages = ["io"] - revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" - -[[projects]] - branch = "master" - name = "github.com/jcmturner/gofork" - packages = [ - "encoding/asn1", - "x/crypto/pbkdf2" - ] - revision = "2aebee971930cd0dd525873330952ab7df5ac95c" - -[[projects]] - name = "github.com/json-iterator/go" - packages = ["."] - revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" - -[[projects]] - name = "github.com/kevinburke/ssh_config" - packages = ["."] - revision = "9fc7bb800b555d63157c65a904c86a2cc7b4e795" - version = "0.4" - -[[projects]] - branch = "master" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter" - ] - revision = "03f2033d19d5860aef995fe360ac7d395cd8ce65" - -[[projects]] - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - name = "github.com/minio/minio-go" - packages = [ - ".", - "pkg/credentials", - "pkg/encrypt", - "pkg/s3signer", - "pkg/s3utils", - "pkg/set" - ] - revision = "70799fe8dae6ecfb6c7d7e9e048fce27f23a1992" - version = "v6.0.5" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "58046073cbffe2f25d425fe1331102f55cf719de" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-ps" - packages = ["."] - revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - name = "github.com/pelletier/go-buffruneio" - packages = ["."] - revision = "c37440a7cf42ac63b919c752ca73a85067e05992" - version = "v0.2.0" - -[[projects]] - branch = "master" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" - -[[projects]] - name = "github.com/peterbourgon/diskv" - packages = ["."] - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp" - ] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "github.com/prometheus/client_model" - packages = ["go"] - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model" - ] - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs" - ] - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - name = "github.com/sergi/go-diff" - packages = ["diffmatchpatch"] - revision = "1744e2970ca51c86172c8190fadad617561ed6e7" - version = "v1.0.0" - -[[projects]] - name = "github.com/sirupsen/logrus" - packages = ["."] - revision = "3e01752db0189b9157070a0e1668a620f9a85da2" - version = "v1.0.6" - -[[projects]] - name = "github.com/spf13/cobra" - packages = ["."] - revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - name = "github.com/src-d/gcfg" - packages = [ - ".", - "scanner", - "token", - "types" - ] - revision = "f187355171c936ac84a82793659ebb4936bc1c23" - version = "v1.3.0" - -[[projects]] - name = "github.com/stretchr/objx" - packages = ["."] - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - "suite" - ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - name = "github.com/tidwall/gjson" - packages = ["."] - revision = "1e3f6aeaa5bad08d777ea7807b279a07885dd8b2" - version = "v1.1.3" - -[[projects]] - branch = "master" - name = "github.com/tidwall/match" - packages = ["."] - revision = "1731857f09b1f38450e2c12409748407822dc6be" - -[[projects]] - branch = "master" - name = "github.com/valyala/bytebufferpool" - packages = ["."] - revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" - -[[projects]] - branch = "master" - name = "github.com/valyala/fasttemplate" - packages = ["."] - revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" - -[[projects]] - name = "github.com/xanzy/ssh-agent" - packages = ["."] - revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c" - version = "v0.2.0" - -[[projects]] - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate" - ] - revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" - version = "v0.17.0" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = [ - "argon2", - "blake2b", - "cast5", - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "internal/chacha20", - "internal/subtle", - "md4", - "openpgp", - "openpgp/armor", - "openpgp/elgamal", - "openpgp/errors", - "openpgp/packet", - "openpgp/s2k", - "pbkdf2", - "poly1305", - "ssh", - "ssh/agent", - "ssh/knownhosts", - "ssh/terminal" - ] - revision = "f027049dab0ad238e394a753dba2d14753473a04" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace" - ] - revision = "f9ce57c11b242f0f1599cf25c89d8cb02c45295a" - -[[projects]] - branch = "master" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt" - ] - revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix", - "windows" - ] - revision = "904bdc257025c7b3f43c19360ad3ab85783fad78" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - name = "golang.org/x/time" - packages = ["rate"] - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" - -[[projects]] - branch = "master" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "imports", - "internal/fastwalk" - ] - revision = "ca6481ae56504398949d597084558e50ad07117a" - -[[projects]] - name = "google.golang.org/api" - packages = [ - "gensupport", - "googleapi", - "googleapi/internal/uritemplates", - "googleapi/transport", - "internal", - "iterator", - "option", - "storage/v1", - "transport/http", - "transport/http/internal/propagation" - ] - revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7" - version = "v0.3.2" - -[[projects]] - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch" - ] - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/annotations", - "googleapis/iam/v1", - "googleapis/rpc/code", - "googleapis/rpc/status" - ] - revision = "0e822944c569bf5c9afd034adaa56208bd2906ac" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap" - ] - revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" - version = "v1.15.0" - -[[projects]] - name = "gopkg.in/inf.v0" - packages = ["."] - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - name = "gopkg.in/jcmturner/aescts.v1" - packages = ["."] - revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" - version = "v1.0.1" - -[[projects]] - name = "gopkg.in/jcmturner/dnsutils.v1" - packages = ["."] - revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" - version = "v1.0.1" - -[[projects]] - name = "gopkg.in/jcmturner/gokrb5.v5" - packages = [ - "asn1tools", - "client", - "config", - "credentials", - "crypto", - "crypto/common", - "crypto/etype", - "crypto/rfc3961", - "crypto/rfc3962", - "crypto/rfc4757", - "crypto/rfc8009", - "gssapi", - "iana", - "iana/addrtype", - "iana/adtype", - "iana/asnAppTag", - "iana/chksumtype", - "iana/errorcode", - "iana/etypeID", - "iana/flags", - "iana/keyusage", - "iana/msgtype", - "iana/nametype", - "iana/patype", - "kadmin", - "keytab", - "krberror", - "messages", - "mstypes", - "pac", - "types" - ] - revision = "32ba44ca5b42f17a4a9f33ff4305e70665a1bc0f" - version = "v5.3.0" - -[[projects]] - name = "gopkg.in/jcmturner/rpc.v0" - packages = ["ndr"] - revision = "4480c480c9cd343b54b0acb5b62261cbd33d7adf" - version = "v0.0.2" - -[[projects]] - name = "gopkg.in/src-d/go-billy.v4" - packages = [ - ".", - "helper/chroot", - "helper/polyfill", - "osfs", - "util" - ] - revision = "83cf655d40b15b427014d7875d10850f96edba14" - version = "v4.2.0" - -[[projects]] - name = "gopkg.in/src-d/go-git.v4" - packages = [ - ".", - "config", - "internal/revision", - "plumbing", - "plumbing/cache", - "plumbing/filemode", - "plumbing/format/config", - "plumbing/format/diff", - "plumbing/format/gitignore", - "plumbing/format/idxfile", - "plumbing/format/index", - "plumbing/format/objfile", - "plumbing/format/packfile", - "plumbing/format/pktline", - "plumbing/object", - "plumbing/protocol/packp", - "plumbing/protocol/packp/capability", - "plumbing/protocol/packp/sideband", - "plumbing/revlist", - "plumbing/storer", - "plumbing/transport", - "plumbing/transport/client", - "plumbing/transport/file", - "plumbing/transport/git", - "plumbing/transport/http", - "plumbing/transport/internal/common", - "plumbing/transport/server", - "plumbing/transport/ssh", - "storage", - "storage/filesystem", - "storage/filesystem/dotgit", - "storage/memory", - "utils/binary", - "utils/diff", - "utils/ioutil", - "utils/merkletrie", - "utils/merkletrie/filesystem", - "utils/merkletrie/index", - "utils/merkletrie/internal/frame", - "utils/merkletrie/noder" - ] - revision = "3bd5e82b2512d85becae9677fa06b5a973fd4cfb" - version = "v4.5.0" - -[[projects]] - name = "gopkg.in/warnings.v0" - packages = ["."] - revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b" - version = "v0.1.2" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - branch = "release-1.12" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1alpha1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1" - ] - revision = "475331a8afff5587f47d0470a93f79c60c573c03" - -[[projects]] - branch = "release-1.12" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/httpstream", - "pkg/util/httpstream/spdy", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/remotecommand", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/netutil", - "third_party/forked/golang/reflect" - ] - revision = "f71dbbc36e126f5a371b85f6cca96bc8c57db2b6" - -[[projects]] - branch = "release-9.0" - name = "k8s.io/client-go" - packages = [ - "discovery", - "discovery/fake", - "dynamic", - "informers/internalinterfaces", - "kubernetes", - "kubernetes/fake", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1alpha1", - "kubernetes/typed/admissionregistration/v1alpha1/fake", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/admissionregistration/v1beta1/fake", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1/fake", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta1/fake", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/apps/v1beta2/fake", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1/fake", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authentication/v1beta1/fake", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1/fake", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/authorization/v1beta1/fake", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v1/fake", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta1/fake", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/autoscaling/v2beta2/fake", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1/fake", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v1beta1/fake", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/batch/v2alpha1/fake", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/certificates/v1beta1/fake", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/coordination/v1beta1/fake", - "kubernetes/typed/core/v1", - "kubernetes/typed/core/v1/fake", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/events/v1beta1/fake", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/extensions/v1beta1/fake", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1/fake", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/policy/v1beta1/fake", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1/fake", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1alpha1/fake", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/rbac/v1beta1/fake", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1alpha1/fake", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/scheduling/v1beta1/fake", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/settings/v1alpha1/fake", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1/fake", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1alpha1/fake", - "kubernetes/typed/storage/v1beta1", - "kubernetes/typed/storage/v1beta1/fake", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/azure", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "plugin/pkg/client/auth/oidc", - "rest", - "rest/watch", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/metrics", - "tools/pager", - "tools/reference", - "tools/remotecommand", - "tools/watch", - "transport", - "transport/spdy", - "util/buffer", - "util/cert", - "util/connrotation", - "util/exec", - "util/flowcontrol", - "util/homedir", - "util/integer", - "util/jsonpath", - "util/retry", - "util/workqueue" - ] - revision = "13596e875accbd333e0b5bd5fd9462185acd9958" - -[[projects]] - branch = "release-1.12" - name = "k8s.io/code-generator" - packages = [ - "cmd/client-gen", - "cmd/client-gen/args", - "cmd/client-gen/generators", - "cmd/client-gen/generators/fake", - "cmd/client-gen/generators/scheme", - "cmd/client-gen/generators/util", - "cmd/client-gen/path", - "cmd/client-gen/types", - "cmd/deepcopy-gen", - "cmd/deepcopy-gen/args", - "cmd/informer-gen", - "cmd/informer-gen/args", - "cmd/informer-gen/generators", - "cmd/lister-gen", - "cmd/lister-gen/args", - "cmd/lister-gen/generators", - "pkg/util" - ] - revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" - -[[projects]] - branch = "master" - name = "k8s.io/gengo" - packages = [ - "args", - "examples/deepcopy-gen/generators", - "examples/set-gen/sets", - "generator", - "namer", - "parser", - "types" - ] - revision = "c42f3cdacc394f43077ff17e327d1b351c0304e4" - -[[projects]] - branch = "master" - name = "k8s.io/kube-openapi" - packages = [ - "pkg/common", - "pkg/util/proto" - ] - revision = "e3762e86a74c878ffed47484592986685639c2cd" - -[[projects]] - branch = "master" - name = "k8s.io/utils" - packages = ["pointer"] - revision = "21c4ce38f2a793ec01e925ddc31216500183b773" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "efb1ee6ec26ba4329ef206c18289aa8369361a967d6444c0bbfe6193354fbb29" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Jenkinsfile b/Jenkinsfile index c738b0eca946..8b6ab84f21ae 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,14 +42,13 @@ pipeline { } } -// TODO uncomment once we have the infrastructure to submit tests -/* + stage('run tests') { steps { runUtilityCommand("go test ./...") } } -*/ + stage('build controller') { steps { @@ -86,8 +85,8 @@ pipeline { stage('push CLI to artifactory') { steps { withCredentials([usernamePassword(credentialsId: 'Artifactory', usernameVariable: 'ARTI_NAME', passwordVariable: 'ARTI_PASS')]) { - runUtilityCommand("curl -u ${ARTI_NAME}:${ARTI_PASS} -T /root/go/src/github.com/cyrusbiotechnology/argo/dist/argo-darwin-amd64 https://cyrusbio.jfrog.io/cyrusbio/argo-cli/argo-mac-${VERSION}") - runUtilityCommand("curl -u ${ARTI_NAME}:${ARTI_PASS} -T /root/go/src/github.com/cyrusbiotechnology/argo/dist/argo-linux-amd64 https://cyrusbio.jfrog.io/cyrusbio/argo-cli/argo-linux-${VERSION}") + runUtilityCommand("curl -u ${ARTI_NAME}:${ARTI_PASS} -T /go/src/github.com/cyrusbiotechnology/argo/dist/argo-darwin-amd64 https://cyrusbio.jfrog.io/cyrusbio/argo-cli/argo-mac-${VERSION}") + runUtilityCommand("curl -u ${ARTI_NAME}:${ARTI_PASS} -T /go/src/github.com/cyrusbiotechnology/argo/dist/argo-linux-amd64 https://cyrusbio.jfrog.io/cyrusbio/argo-cli/argo-linux-${VERSION}") } } } diff --git a/util/file/fileutil_test.go b/util/file/fileutil_test.go index 32379866afb2..f5b1fd1bae82 100644 --- a/util/file/fileutil_test.go +++ b/util/file/fileutil_test.go @@ -3,7 +3,7 @@ package file_test import ( "archive/tar" "bytes" - "github.com/argoproj/argo/util/file" + "github.com/cyrusbiotechnology/argo/util/file" "github.com/stretchr/testify/assert" "os" "testing" diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index e5fc2e9063de..e5dd72663b75 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -548,7 +548,7 @@ func (woc *wfOperationCtx) podReconciliation() error { wg.Add(1) go func(tmpPod apiv1.Pod) { defer wg.Done() - err = performAssessment(&pod) + err = performAssessment(&tmpPod) if err != nil { woc.log.Errorf("Failed to collect extended errors and warnings from pod %s: %s", pod.Name, err.Error()) } diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 48d468fe9af7..a5fc682a62fc 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -225,11 +225,13 @@ func TestWorkflowParallelismLimit(t *testing.T) { assert.Equal(t, 2, len(pods.Items)) // operate again and make sure we don't schedule any more pods makePodsRunning(t, controller.kubeclientset, wf.ObjectMeta.Namespace) + assert.Equal(t, int64(2), woc.countActivePods()) wf, err = wfcset.Get(wf.ObjectMeta.Name, metav1.GetOptions{}) assert.Nil(t, err) // wfBytes, _ := json.MarshalIndent(wf, "", " ") // log.Printf("%s", wfBytes) woc = newWorkflowOperationCtx(wf, controller) + assert.Equal(t, int64(2), woc.countActivePods()) woc.operate() pods, err = controller.kubeclientset.CoreV1().Pods("").List(metav1.ListOptions{}) assert.Nil(t, err) From 43e21815423a4b855afbba2b8e0a908a7abb1618 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 13:43:48 -0400 Subject: [PATCH 142/145] gopkg --- Gopkg.lock | 1188 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1188 insertions(+) create mode 100644 Gopkg.lock diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000000..e276f7df7c25 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,1188 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "iam", + "internal", + "internal/optional", + "internal/trace", + "internal/version", + "storage" + ] + revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" + version = "v0.26.0" + +[[projects]] + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/date" + ] + revision = "1ff28809256a84bb6966640ff3d0371af82ccba4" + +[[projects]] + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] + name = "github.com/Knetic/govaluate" + packages = ["."] + revision = "9aa49832a739dcd78a5542ff189fb82c3e423116" + +[[projects]] + name = "github.com/PuerkitoBio/purell" + packages = ["."] + revision = "44968752391892e1b0d0b821ee79e9a85fa13049" + version = "v1.1.1" + +[[projects]] + branch = "master" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + branch = "master" + name = "github.com/argoproj/pkg" + packages = [ + "cli", + "errors", + "exec", + "file", + "humanize", + "json", + "kube/cli", + "kube/errors", + "s3", + "stats", + "strftime", + "time" + ] + revision = "7e3ef65c8d44303738c7e815bd9b1b297b39f5c8" + +[[projects]] + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46" + version = "v1.0.0" + +[[projects]] + name = "github.com/colinmarc/hdfs" + packages = [ + ".", + "protocol/hadoop_common", + "protocol/hadoop_hdfs", + "rpc" + ] + revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + branch = "master" + name = "github.com/docker/spdystream" + packages = [ + ".", + "spdy" + ] + revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" + +[[projects]] + name = "github.com/dustin/go-humanize" + packages = ["."] + revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" + version = "v1.0.0" + +[[projects]] + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log" + ] + revision = "b9bbc5664f49b6deec52393bd68f39830687a347" + version = "v2.9.3" + +[[projects]] + name = "github.com/emirpasic/gods" + packages = [ + "containers", + "lists", + "lists/arraylist", + "trees", + "trees/binaryheap", + "utils" + ] + revision = "1615341f118ae12f353cc8a983f35b584342c9b3" + version = "v1.12.0" + +[[projects]] + name = "github.com/evanphx/json-patch" + packages = ["."] + revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" + version = "v4.1.0" + +[[projects]] + branch = "master" + name = "github.com/ghodss/yaml" + packages = ["."] + revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f" + +[[projects]] + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + version = "v0.19.0" + +[[projects]] + name = "github.com/go-openapi/jsonreference" + packages = ["."] + revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" + version = "v0.19.0" + +[[projects]] + name = "github.com/go-openapi/spec" + packages = ["."] + revision = "53d776530bf78a11b03a7b52dd8a083086b045e5" + version = "v0.19.0" + +[[projects]] + name = "github.com/go-openapi/swag" + packages = ["."] + revision = "b3e2804c8535ee0d1b89320afd98474d5b8e9e3b" + version = "v0.19.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys" + ] + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" + +[[projects]] + branch = "master" + name = "github.com/golang/glog" + packages = ["."] + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go", + "protoc-gen-go/descriptor", + "protoc-gen-go/generator", + "protoc-gen-go/generator/internal/remap", + "protoc-gen-go/grpc", + "protoc-gen-go/plugin", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" + +[[projects]] + name = "github.com/google/btree" + packages = ["."] + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + version = "v1.0.0" + +[[projects]] + name = "github.com/google/gofuzz" + packages = ["."] + revision = "f140a6486e521aad38f5917de355cbf147cc0496" + version = "v1.0.0" + +[[projects]] + name = "github.com/googleapis/gax-go" + packages = [ + ".", + "v2" + ] + revision = "beaecbbdd8af86aa3acf14180d53828ce69400b2" + version = "v2.0.4" + +[[projects]] + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions" + ] + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + name = "github.com/gorilla/websocket" + packages = ["."] + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + branch = "master" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache" + ] + revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" + +[[projects]] + name = "github.com/hashicorp/go-uuid" + packages = ["."] + revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43" + version = "v1.0.1" + +[[projects]] + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru" + ] + revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" + version = "v0.5.1" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/jbenet/go-context" + packages = ["io"] + revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" + +[[projects]] + branch = "master" + name = "github.com/jcmturner/gofork" + packages = [ + "encoding/asn1", + "x/crypto/pbkdf2" + ] + revision = "dc7c13fece037a4a36e2b3c69db4991498d30692" + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" + version = "v1.1.6" + +[[projects]] + name = "github.com/kevinburke/ssh_config" + packages = ["."] + revision = "81db2a75821ed34e682567d48be488a1c3121088" + version = "0.5" + +[[projects]] + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" + +[[projects]] + branch = "master" + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter" + ] + revision = "1ea4449da9834f4d333f1cc461c374aea217d249" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + name = "github.com/minio/minio-go" + packages = [ + ".", + "pkg/credentials", + "pkg/encrypt", + "pkg/s3signer", + "pkg/s3utils", + "pkg/set" + ] + revision = "a8704b60278f98501c10f694a9c4df8bdd1fac56" + version = "v6.0.23" + +[[projects]] + name = "github.com/mitchellh/go-homedir" + packages = ["."] + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/go-ps" + packages = ["."] + revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + name = "github.com/pelletier/go-buffruneio" + packages = ["."] + revision = "c37440a7cf42ac63b919c752ca73a85067e05992" + version = "v0.2.0" + +[[projects]] + branch = "master" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + name = "github.com/peterbourgon/diskv" + packages = ["."] + revision = "0be1b92a6df0e4f5cb0a5d15fb7f643d0ad93ce6" + version = "v3.0.0" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "a82f4c12f983cc2649298185f296632953e50d3e" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = ["."] + revision = "87a4384529e0652f5035fb5cc8095faf73ea9b0b" + +[[projects]] + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f" + version = "v1.4.1" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + name = "github.com/src-d/gcfg" + packages = [ + ".", + "scanner", + "token", + "types" + ] + revision = "1ac3a1ac202429a54835fe8408a92880156b489d" + version = "v1.4.0" + +[[projects]] + name = "github.com/stretchr/objx" + packages = ["."] + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock", + "require", + "suite" + ] + revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" + version = "v1.3.0" + +[[projects]] + name = "github.com/tidwall/gjson" + packages = ["."] + revision = "eee0b6226f0d1db2675a176fdfaa8419bcad4ca8" + version = "v1.2.1" + +[[projects]] + name = "github.com/tidwall/match" + packages = ["."] + revision = "33827db735fff6510490d69a8622612558a557ed" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/tidwall/pretty" + packages = ["."] + revision = "1166b9ac2b65e46a43d8618d30d1554f4652d49b" + +[[projects]] + name = "github.com/valyala/bytebufferpool" + packages = ["."] + revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" + version = "v1.0.0" + +[[projects]] + name = "github.com/valyala/fasttemplate" + packages = ["."] + revision = "8b5e4e491ab636663841c42ea3c5a9adebabaf36" + version = "v1.0.1" + +[[projects]] + name = "github.com/xanzy/ssh-agent" + packages = ["."] + revision = "6a3e2ff9e7c564f36873c2e36413f634534f1c44" + version = "v0.2.1" + +[[projects]] + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate" + ] + revision = "df6e2001952312404b06f5f6f03fcb4aec1648e5" + version = "v0.21.0" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = [ + "argon2", + "blake2b", + "cast5", + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "internal/chacha20", + "internal/subtle", + "md4", + "openpgp", + "openpgp/armor", + "openpgp/elgamal", + "openpgp/errors", + "openpgp/packet", + "openpgp/s2k", + "pbkdf2", + "poly1305", + "ssh", + "ssh/agent", + "ssh/knownhosts", + "ssh/terminal" + ] + revision = "c05e17bb3b2dca130fc919668a96b4bec9eb9442" + +[[projects]] + branch = "master" + name = "golang.org/x/exp" + packages = [ + "apidiff", + "cmd/apidiff" + ] + revision = "8c7d1c524af6eaf18eadc4f57955a748e7001194" + +[[projects]] + branch = "master" + name = "golang.org/x/lint" + packages = [ + ".", + "golint" + ] + revision = "959b441ac422379a43da2230f62be024250818b0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "publicsuffix", + "trace" + ] + revision = "4829fb13d2c62012c17688fa7f629f371014946d" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt" + ] + revision = "9f3314589c9a9136388751d9adae6b0ed400978a" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix", + "windows" + ] + revision = "16072639606ea9e22c7d86e4cbd6af6314f4193c" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width" + ] + revision = "c942b20a5d85b458c4dce1589326051d85e25d6d" + version = "v0.3.1" + +[[projects]] + branch = "master" + name = "golang.org/x/time" + packages = ["rate"] + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" + +[[projects]] + branch = "master" + name = "golang.org/x/tools" + packages = [ + "cmd/goimports", + "go/ast/astutil", + "go/buildutil", + "go/gcexportdata", + "go/internal/cgo", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/loader", + "go/packages", + "go/types/typeutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + "internal/module", + "internal/semver" + ] + revision = "36563e24a2627da92566d43aa1c7a2dd895fc60d" + +[[projects]] + name = "google.golang.org/api" + packages = [ + "gensupport", + "googleapi", + "googleapi/internal/uritemplates", + "googleapi/transport", + "internal", + "iterator", + "option", + "storage/v1", + "transport/http", + "transport/http/internal/propagation" + ] + revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7" + version = "v0.3.2" + +[[projects]] + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610" + version = "v1.5.0" + +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/iam/v1", + "googleapis/rpc/code", + "googleapis/rpc/status" + ] + revision = "e7d98fc518a78c9f8b5ee77be7b0b317475d89e1" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap" + ] + revision = "25c4f928eaa6d96443009bd842389fb4fa48664e" + version = "v1.20.1" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + name = "gopkg.in/ini.v1" + packages = ["."] + revision = "c85607071cf08ca1adaf48319cd1aa322e81d8c1" + version = "v1.42.0" + +[[projects]] + name = "gopkg.in/jcmturner/aescts.v1" + packages = ["."] + revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" + version = "v1.0.1" + +[[projects]] + name = "gopkg.in/jcmturner/dnsutils.v1" + packages = ["."] + revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" + version = "v1.0.1" + +[[projects]] + name = "gopkg.in/jcmturner/gokrb5.v5" + packages = [ + "asn1tools", + "client", + "config", + "credentials", + "crypto", + "crypto/common", + "crypto/etype", + "crypto/rfc3961", + "crypto/rfc3962", + "crypto/rfc4757", + "crypto/rfc8009", + "gssapi", + "iana", + "iana/addrtype", + "iana/adtype", + "iana/asnAppTag", + "iana/chksumtype", + "iana/errorcode", + "iana/etypeID", + "iana/flags", + "iana/keyusage", + "iana/msgtype", + "iana/nametype", + "iana/patype", + "kadmin", + "keytab", + "krberror", + "messages", + "mstypes", + "pac", + "types" + ] + revision = "32ba44ca5b42f17a4a9f33ff4305e70665a1bc0f" + version = "v5.3.0" + +[[projects]] + name = "gopkg.in/jcmturner/rpc.v0" + packages = ["ndr"] + revision = "4480c480c9cd343b54b0acb5b62261cbd33d7adf" + version = "v0.0.2" + +[[projects]] + name = "gopkg.in/src-d/go-billy.v4" + packages = [ + ".", + "helper/chroot", + "helper/polyfill", + "osfs", + "util" + ] + revision = "982626487c60a5252e7d0b695ca23fb0fa2fd670" + version = "v4.3.0" + +[[projects]] + name = "gopkg.in/src-d/go-git.v4" + packages = [ + ".", + "config", + "internal/revision", + "internal/url", + "plumbing", + "plumbing/cache", + "plumbing/filemode", + "plumbing/format/config", + "plumbing/format/diff", + "plumbing/format/gitignore", + "plumbing/format/idxfile", + "plumbing/format/index", + "plumbing/format/objfile", + "plumbing/format/packfile", + "plumbing/format/pktline", + "plumbing/object", + "plumbing/protocol/packp", + "plumbing/protocol/packp/capability", + "plumbing/protocol/packp/sideband", + "plumbing/revlist", + "plumbing/storer", + "plumbing/transport", + "plumbing/transport/client", + "plumbing/transport/file", + "plumbing/transport/git", + "plumbing/transport/http", + "plumbing/transport/internal/common", + "plumbing/transport/server", + "plumbing/transport/ssh", + "storage", + "storage/filesystem", + "storage/filesystem/dotgit", + "storage/memory", + "utils/binary", + "utils/diff", + "utils/ioutil", + "utils/merkletrie", + "utils/merkletrie/filesystem", + "utils/merkletrie/index", + "utils/merkletrie/internal/frame", + "utils/merkletrie/noder" + ] + revision = "aa6f288c256ff8baf8a7745546a9752323dc0d89" + version = "v4.11.0" + +[[projects]] + name = "gopkg.in/warnings.v0" + packages = ["."] + revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b" + version = "v0.1.2" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + name = "honnef.co/go/tools" + packages = [ + "arg", + "callgraph", + "callgraph/static", + "cmd/staticcheck", + "config", + "deprecated", + "functions", + "internal/sharedcheck", + "lint", + "lint/lintdsl", + "lint/lintutil", + "lint/lintutil/format", + "simple", + "ssa", + "ssa/ssautil", + "ssautil", + "staticcheck", + "staticcheck/vrp", + "stylecheck", + "unused", + "version" + ] + revision = "95959eaf5e3c41c66151dcfd91779616b84077a8" + version = "2019.1.1" + +[[projects]] + branch = "release-1.12" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1" + ] + revision = "6db15a15d2d3874a6c3ddb2140ac9f3bc7058428" + +[[projects]] + branch = "release-1.12" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/httpstream", + "pkg/util/httpstream/spdy", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/remotecommand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/netutil", + "third_party/forked/golang/reflect" + ] + revision = "01f179d85dbce0f2e0e4351a92394b38694b7cae" + +[[projects]] + branch = "release-9.0" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "dynamic", + "informers/internalinterfaces", + "kubernetes", + "kubernetes/fake", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1alpha1/fake", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", + "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", + "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", + "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/azure", + "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", + "plugin/pkg/client/auth/oidc", + "rest", + "rest/watch", + "testing", + "third_party/forked/golang/template", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/pager", + "tools/reference", + "tools/remotecommand", + "tools/watch", + "transport", + "transport/spdy", + "util/buffer", + "util/cert", + "util/connrotation", + "util/exec", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/jsonpath", + "util/retry", + "util/workqueue" + ] + revision = "77e032213d34c856222b4d4647c1c175ba8d22b9" + +[[projects]] + branch = "release-1.12" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/util" + ] + revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66" + +[[projects]] + branch = "master" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types" + ] + revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" + +[[projects]] + name = "k8s.io/klog" + packages = ["."] + revision = "e531227889390a39d9533dde61f590fe9f4b0035" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "k8s.io/kube-openapi" + packages = [ + "pkg/common", + "pkg/util/proto" + ] + revision = "6b3d3b2d5666c5912bab8b7bf26bf50f75a8f887" + +[[projects]] + branch = "master" + name = "k8s.io/utils" + packages = ["pointer"] + revision = "21c4ce38f2a793ec01e925ddc31216500183b773" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f32bcd98041871575601108af8703d15a31ac8b6c27338818fd2cb0033d9b01c" + solver-name = "gps-cdcl" + solver-version = 1 From 35c1071d8e2de7e174494b1b2fd39ed706daada2 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Thu, 25 Apr 2019 17:43:37 -0400 Subject: [PATCH 143/145] use GetSecretFromVolMount instead of GetSecrets --- workflow/controller/workflowpod.go | 4 ++++ workflow/executor/executor.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index d5456c08922d..80feeb0a5b69 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -919,6 +919,8 @@ func createArchiveLocationSecret(tmpl *wfv1.Template, volMap map[string]apiv1.Vo createSecretVal(volMap, gitRepo.UsernameSecret, uniqueKeyMap) createSecretVal(volMap, gitRepo.PasswordSecret, uniqueKeyMap) createSecretVal(volMap, gitRepo.SSHPrivateKeySecret, uniqueKeyMap) + } else if gcsRepo := tmpl.ArchiveLocation.GCS; gcsRepo != nil { + createSecretVal(volMap, &gcsRepo.CredentialsSecret, uniqueKeyMap) } } @@ -936,6 +938,8 @@ func createSecretVolume(volMap map[string]apiv1.Volume, art wfv1.Artifact, keyMa } else if art.HDFS != nil { createSecretVal(volMap, art.HDFS.KrbCCacheSecret, keyMap) createSecretVal(volMap, art.HDFS.KrbKeytabSecret, keyMap) + } else if art.GCS != nil { + createSecretVal(volMap, &art.GCS.CredentialsSecret, keyMap) } } diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index c11ce2ae55ab..9db2e8f08095 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -549,7 +549,7 @@ func (we *WorkflowExecutor) InitDriver(art wfv1.Artifact) (artifact.ArtifactDriv return &driver, nil } if art.GCS != nil { - credsJSONData, err := we.GetSecrets(we.Namespace, art.GCS.CredentialsSecret.Name, art.GCS.CredentialsSecret.Key) + credsJSONData, err := we.GetSecretFromVolMount(art.GCS.CredentialsSecret.Name, art.GCS.CredentialsSecret.Key) if err != nil { return nil, err } From afc87c8a0732fa0169fa770e115bfdc59f001791 Mon Sep 17 00:00:00 2001 From: Sam DeLuca Date: Fri, 26 Apr 2019 10:21:01 -0400 Subject: [PATCH 144/145] actually build argoexec --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8b6ab84f21ae..543ce07ae7a5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -58,7 +58,7 @@ pipeline { stage('build executor') { steps { - sh "docker build -t argoexec:${VERSION} --target argoexec-base ." + sh "docker build -t argoexec:${VERSION} --target argoexec ." } } From e9f2a46af1f691e3953aa3e473f80d6e1b55c692 Mon Sep 17 00:00:00 2001 From: Ilias Katsakioris Date: Sun, 28 Apr 2019 02:08:40 +0300 Subject: [PATCH 145/145] Fix #1340 parameter substitution bug Signed-off-by: Ilias Katsakioris --- workflow/controller/workflowpod.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 81dac30793aa..d3b0d14d5acd 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -185,7 +185,10 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont // Perform one last variable substitution here. Some variables come from the from workflow // configmap (e.g. archive location) or volumes attribute, and were not substituted // in executeTemplate. - podParams := woc.globalParams + podParams := make(map[string]string) + for gkey, gval := range woc.globalParams { + podParams[gkey] = gval + } for _, inParam := range tmpl.Inputs.Parameters { podParams["inputs.parameters."+inParam.Name] = *inParam.Value }