diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index ea65160986e..00000000000 --- a/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.10.2-alpine3.7 AS builder - -RUN apk add --no-cache git -RUN go get github.com/golang/dep/cmd/dep - -WORKDIR /go/src/github.com/kubeflow/pipelines/ -COPY . . - -RUN dep ensure -vendor-only -v -RUN go build -o /bin/controller ./resources/scheduledworkflow/*.go - -FROM alpine:3.7 -COPY --from=builder /bin/controller /bin/controller - -CMD ["/bin/controller"] diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 0655c35024e..00000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,539 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "cloud.google.com/go" - packages = ["compute/metadata"] - revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479" - version = "v0.23.0" - -[[projects]] - name = "github.com/PuerkitoBio/purell" - packages = ["."] - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - name = "github.com/argoproj/argo" - packages = [ - "errors", - "pkg/apis/workflow", - "pkg/apis/workflow/v1alpha1", - "pkg/client/clientset/versioned", - "pkg/client/clientset/versioned/scheme", - "pkg/client/clientset/versioned/typed/workflow/v1alpha1", - "pkg/client/informers/externalversions", - "pkg/client/informers/externalversions/internalinterfaces", - "pkg/client/informers/externalversions/workflow", - "pkg/client/informers/externalversions/workflow/v1alpha1", - "pkg/client/listers/workflow/v1alpha1", - "util/retry", - "workflow/common" - ] - revision = "9379638189cc194f1b34ff7295f0832eac1c1651" - version = "v2.1.0" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/docker/spdystream" - packages = [ - ".", - "spdy" - ] - revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db" - -[[projects]] - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log" - ] - revision = "3658237ded108b4134956c1b3050349d93e7b895" - version = "v2.7.1" - -[[projects]] - name = "github.com/ghodss/yaml" - packages = ["."] - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/spec" - packages = ["."] - revision = "bcff419492eeeb01f76e77d2ebc714dc97b607f5" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/swag" - packages = ["."] - revision = "811b1089cde9dad18d4d0c2d09fbdbf28dbd27a5" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys" - ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/golang/glog" - packages = ["."] - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - branch = "master" - name = "github.com/golang/groupcache" - packages = ["lru"] - revision = "24b0969c4cb722950103eed87108c8d291a8df00" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/google/btree" - packages = ["."] - revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4" - -[[projects]] - branch = "master" - name = "github.com/google/gofuzz" - packages = ["."] - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions" - ] - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - branch = "master" - name = "github.com/gregjones/httpcache" - packages = [ - ".", - "diskcache" - ] - revision = "9cad4c3443a7200dd6400aef47183728de563a38" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru" - ] - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" - -[[projects]] - branch = "master" - name = "github.com/howeyc/gopass" - packages = ["."] - revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" - -[[projects]] - name = "github.com/imdario/mergo" - packages = ["."] - revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c" - version = "v0.3.4" - -[[projects]] - name = "github.com/json-iterator/go" - packages = ["."] - revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" - version = "1.1.3" - -[[projects]] - name = "github.com/juju/ratelimit" - packages = ["."] - revision = "59fac5042749a5afb9af70e813da1dd5474f0167" - version = "1.0.1" - -[[projects]] - branch = "master" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter" - ] - revision = "9825584555aa620c53c265d4a09ace0df1346fd9" - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" - version = "1.0.0" - -[[projects]] - branch = "master" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" - -[[projects]] - name = "github.com/peterbourgon/diskv" - packages = ["."] - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/robfig/cron" - packages = ["."] - revision = "b41be1df696709bb6395fe435af20370037c0b4c" - version = "v1.1" - -[[projects]] - name = "github.com/sirupsen/logrus" - packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - name = "github.com/stretchr/testify" - packages = ["assert"] - revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" - version = "v1.2.1" - -[[projects]] - branch = "master" - name = "github.com/valyala/bytebufferpool" - packages = ["."] - revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" - -[[projects]] - branch = "master" - name = "github.com/valyala/fasttemplate" - packages = ["."] - revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - revision = "b47b1587369238182299fe4dad77d05b8b461e06" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http/httpguts", - "http2", - "http2/hpack", - "idna" - ] - revision = "1e491301e022f8f977054da4c2d852decd59571f" - -[[projects]] - branch = "master" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt" - ] - revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows" - ] - revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch" - ] - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" - -[[projects]] - name = "gopkg.in/inf.v0" - packages = ["."] - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - branch = "release-1.9" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1alpha1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1alpha1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1" - ] - revision = "9273ee02527c608cecc74969b3e489f5dba686da" - -[[projects]] - branch = "release-1.9" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1alpha1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/httpstream", - "pkg/util/httpstream/spdy", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/net", - "pkg/util/remotecommand", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/netutil", - "third_party/forked/golang/reflect" - ] - revision = "21efb2924c7cf1920f76af05b1fd6a325bf46dfc" - -[[projects]] - branch = "release-6.0" - name = "k8s.io/client-go" - packages = [ - "discovery", - "discovery/fake", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1alpha1", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/gcp", - "rest", - "rest/watch", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/reference", - "tools/remotecommand", - "transport", - "transport/spdy", - "util/buffer", - "util/cert", - "util/exec", - "util/flowcontrol", - "util/homedir", - "util/integer", - "util/jsonpath", - "util/workqueue" - ] - revision = "1692bdde78a61f6316cfb6988015bd460067b238" - -[[projects]] - branch = "master" - name = "k8s.io/kube-openapi" - packages = [ - "pkg/common", - "pkg/util/proto" - ] - revision = "8a9b82f00b3a86eac24681da3f9fe6c34c01cea2" - -[[projects]] - name = "k8s.io/kubernetes" - packages = ["pkg/apis/core"] - revision = "2bba0127d85d5a46ab4b778548be28623b32d0b0" - version = "v1.10.3" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "4cd7d782592a9a5abbf214589c767e7ccca736c733215bcd2d0efe222b5e29ce" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 9e6824437db..00000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,35 +0,0 @@ -[[constraint]] - name = "github.com/argoproj/argo" - version = "2.1.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "0.8.0" - -[[constraint]] - name = "github.com/robfig/cron" - version = "1.1.0" - -[[constraint]] - name = "github.com/sirupsen/logrus" - version = "1.0.5" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.1" - -[[constraint]] - branch = "release-1.9" - name = "k8s.io/api" - -[[constraint]] - branch = "release-1.9" - name = "k8s.io/apimachinery" - -[[constraint]] - branch = "release-6.0" - name = "k8s.io/client-go" - -[[constraint]] - name = "k8s.io/kubernetes" - version = "1.10.3" diff --git a/Makefile b/Makefile deleted file mode 100644 index 0c0de20a3d5..00000000000 --- a/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -.PHONY: build-controller -build-controller: - go build -o ./bin/controller ./resources/scheduledworkflow - -.PHONY: build-controller-image -build-controller-image: - docker build -t pipelines/scheduledworkflow-controller . diff --git a/README.md b/README.md deleted file mode 100644 index 92cb775aef5..00000000000 --- a/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# Pipelines - -This repository provides Custom Resource Definitions and tools for ML pipeline orchestration - -## Scheduled Workflow CRD/controller - -### How to run the ScheduledWorkflow controller from the command line? - -The following assumes that your Kubernetes configuration file is located at '$HOME/.kube/config'. - -To create the resource for the CRD, execute the following command: - -``` -kubectl create -f ./install/manifests/scheduledworkflow-crd.yaml -``` - -Output: - -``` -customresourcedefinition.apiextensions.k8s.io "scheduledworkflows.kubeflow.org" created -``` - -To run the controller locally, execute: -configuration file - -``` -go run ./resources/scheduledworkflow/*.go -kubeconfig=$HOME/.kube/config -alsologtostderr=true -``` - -Output: - -``` -Starting workers -Started workers -Wait for shut down -``` - -To run a sample workflow on a schedule, execute: - -``` -kubectl create -f ./example/every-minute-cron.yaml -``` - -Output: - -``` -scheduledworkflow.kubeflow.org "every-minute-cron" created -``` - -To see the current list of ScheduledWorkflows, execute: - -``` -kubectl get swf -``` - -Output: - -``` -NAME AGE -every-minute-cron 1m -``` - -To see the current status of the ScheduledWorklfow named 'every-minute-cron', execute: - -``` -kubectl describe swf every-minute-cron -``` - -Output: - -``` -Name: every-minute-cron -Namespace: default -Labels: scheduledworkflows.kubeflow.org/enabled=true - scheduledworkflows.kubeflow.org/status=Enabled -Annotations: -API Version: kubeflow.org/v1alpha1 -Kind: ScheduledWorkflow -Metadata: - Cluster Name: - Creation Timestamp: 2018-06-06T01:24:55Z - Generation: 0 - Initializers: - Resource Version: 3056202 - Self Link: /apis/kubeflow.org/v1alpha1/namespaces/default/scheduledworkflows/every-minute-cron - UID: 6b11874e-6928-11e8-9fd5-42010a8a0021 -Spec: - Enabled: true - Max History: 10 - Trigger: - Cron Schedule: - Cron: 1 * * * * * - Workflow: - Spec: - Arguments: - Parameters: - Name: message - Value: hello world - Entrypoint: whalesay - Templates: - Container: - Args: - {{inputs.parameters.message}} - Command: - cowsay - Image: docker/whalesay - Name: - Resources: - Inputs: - Parameters: - Name: message - Metadata: - Name: whalesay - Outputs: -Status: - Conditions: - Last Heartbeat Time: 2018-06-06T01:41:40Z - Last Transition Time: 2018-06-06T01:41:40Z - Message: The schedule is enabled. - Reason: Enabled - Status: True - Type: Enabled - Trigger: - Last Index: 17 - Last Triggered Time: 2018-06-06T01:41:01Z - Next Triggered Time: 2018-06-06T01:42:01Z - Workflow History: - Completed: - Phase: Succeeded - Created At: 2018-06-06T01:41:10Z - Finished At: 2018-06-06T01:41:13Z - Index: 17 - Name: every-minute-cron-17-2173648469 - Namespace: default - Scheduled At: 2018-06-06T01:41:01Z - Self Link: /apis/argoproj.io/v1alpha1/namespaces/default/workflows/every-minute-cron-17-2173648469 - Started At: 2018-06-06T01:41:10Z - UID: b0b63a82-692a-11e8-9fd5-42010a8a0021 - [...] -``` -### diff --git a/example/concurrency.yaml b/example/concurrency.yaml deleted file mode 100644 index 8f3df0f37a1..00000000000 --- a/example/concurrency.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: concurrency -spec: - description: "concurrency" - enabled: true - maxHistory: 10 - maxConcurrency: 5 - trigger: - cronSchedule: - cron: "1-10 * * * * *" - workflow: - spec: - entrypoint: sleep-n-sec - arguments: - parameters: - - name: seconds - value: "60" - templates: - - name: sleep-n-sec - inputs: - parameters: - - name: seconds - container: - image: alpine:latest - command: [sh, -c] - args: ["echo sleeping for {{inputs.parameters.seconds}} seconds; sleep {{inputs.parameters.seconds}}; echo done"] - - diff --git a/example/every-minute-cron.yaml b/example/every-minute-cron.yaml deleted file mode 100644 index 7f71fa28cb4..00000000000 --- a/example/every-minute-cron.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: every-minute-cron -spec: - description: "every-minute-cron" - enabled: true - maxHistory: 10 - trigger: - cronSchedule: - cron: 1 * * * * * - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] diff --git a/example/every-minute-periodic.yaml b/example/every-minute-periodic.yaml deleted file mode 100644 index 4f1cf9c95f6..00000000000 --- a/example/every-minute-periodic.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: every-minute-periodic -spec: - description: "every-minute-periodic" - enabled: true - maxHistory: 10 - trigger: - periodicSchedule: - intervalSecond: 60 - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] diff --git a/example/invalid.yaml b/example/invalid.yaml deleted file mode 100644 index 3ead1640b91..00000000000 --- a/example/invalid.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: invalid -spec: - description: "invalid" - enabled: true - maxHistory: !@#$%^&*()_+ - trigger: - cronSchedule: - cron: "1-10 * * * * *" - workflow: - spec: - entrypoint: sleep-n-sec - arguments: - parameters: - - name: seconds - value: "60" - templates: - - name: sleep-n-sec - inputs: - parameters: - - name: seconds - container: - image: alpine:latest - command: [sh, -c] - args: ["echo sleeping for {{inputs.parameters.seconds}} seconds; sleep {{inputs.parameters.seconds}}; echo done"] - - diff --git a/example/no-history.yaml b/example/no-history.yaml deleted file mode 100644 index 225c4587fda..00000000000 --- a/example/no-history.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: no-history -spec: - description: "no-history" - enabled: true - maxHistory: 0 - trigger: - cronSchedule: - cron: 1 * * * * * - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] - - diff --git a/example/parameterized.yaml b/example/parameterized.yaml deleted file mode 100644 index 4fe2d3b6201..00000000000 --- a/example/parameterized.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: "kubeflow.org/v1alpha1" -kind: ScheduledWorkflow -metadata: - name: parameterized -spec: - description: "parameterized" - enabled: true - maxHistory: 10 - trigger: - cronSchedule: - cron: 1 * * * * * - workflow: - parameters: - - name: message - # [[ScheduledTime]] is substituted by the scheduled time of the workflow (default format) - # [[CurrentTime]] is substituted by the current time (default format) - # [[Index]] is substituted by the index of the workflow (e.g. 3 mins that it was the 3rd workflow created) - # [[ScheduledTime.15-04-05]] is substituted by the sheduled time (custom format specified as a Go time format: https://golang.org/pkg/time/#Parse) - # [[CurrentTime.15-04-05]] is substituted by the current time (custom format specified as a Go time format: https://golang.org/pkg/time/#Parse) - value: "hello world [[ScheduledTime]] - [[CurrentTime]] - [[Index]] - [[ScheduledTime.Mon Jan]] - [[CurrentTime.15-04-05]]" - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: "my message" - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] diff --git a/example/single-run.yaml b/example/single-run.yaml deleted file mode 100644 index 5dfaccac476..00000000000 --- a/example/single-run.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: single-run -spec: - description: "single-run" - enabled: true - maxHistory: 10 - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] - diff --git a/example/start-end-cron.yaml b/example/start-end-cron.yaml deleted file mode 100644 index 5297b7d04fe..00000000000 --- a/example/start-end-cron.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: start-end-cron -spec: - description: "start-end-cron" - enabled: true - maxHistory: 10 - trigger: - cronSchedule: - cron: 1 * * * * * - startTime: 2018-05-19T05:18:01Z - endTime: 2020-05-19T05:22:01Z - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] - - diff --git a/example/start-end-periodic.yaml b/example/start-end-periodic.yaml deleted file mode 100644 index b0f25cef98e..00000000000 --- a/example/start-end-periodic.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kubeflow.org/v1alpha1 -kind: ScheduledWorkflow -metadata: - name: start-end-periodic -spec: - description: "start-end-periodic" - enabled: true - maxHistory: 10 - trigger: - periodicSchedule: - intervalSecond: 60 - startTime: 2018-05-19T05:17:01Z - endTime: 2020-05-19T05:20:01Z - workflow: - spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: hello world - templates: - - name: whalesay - inputs: - parameters: - - name: message - container: - image: docker/whalesay - command: [cowsay] - args: ["{{inputs.parameters.message}}"] \ No newline at end of file diff --git a/hack/custom-boilerplate.go.txt b/hack/custom-boilerplate.go.txt deleted file mode 100644 index adcfb4f1395..00000000000 --- a/hack/custom-boilerplate.go.txt +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright YEAR The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh deleted file mode 100755 index ad4944151cc..00000000000 --- a/hack/update-codegen.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This shell is used to auto generate some useful tools for k8s, such as lister, -# informer, deepcopy, defaulter and so on. - -set -o errexit -set -o nounset -set -o pipefail - -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -echo "SCRIPT_ROOT is $SCRIPT_ROOT" -CODEGEN_PKG=${SCRIPT_ROOT}/../../../k8s.io/code-generator -echo "CODEGEN_PKG is $CODEGEN_PKG" - -${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ - github.com/kubeflow/pipelines/pkg/client github.com/kubeflow/pipelines/pkg/apis \ - scheduledworkflow:v1alpha1 \ - --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh deleted file mode 100755 index 5048d36c22a..00000000000 --- a/hack/verify-codegen.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This shell is used to auto generate some useful tools for k8s, such as lister, -# informer, deepcopy, defaulter and so on. - -set -o errexit -set -o nounset -set -o pipefail - -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. - -DIFFROOT="${SCRIPT_ROOT}/pkg" -TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" -_tmp="${SCRIPT_ROOT}/_tmp" - -cleanup() { - rm -rf "${_tmp}" -} -trap "cleanup" EXIT SIGINT - -cleanup - -mkdir -p "${TMP_DIFFROOT}" -cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" - -"${SCRIPT_ROOT}/hack/update-codegen.sh" -echo "diffing ${DIFFROOT} against freshly generated codegen" -ret=0 -diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$? -cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}" -if [[ $ret -eq 0 ]] -then - echo "${DIFFROOT} up to date." -else - echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh" - exit 1 -fi diff --git a/install/manifests/scheduledworkflow-crd.yaml b/install/manifests/scheduledworkflow-crd.yaml deleted file mode 100644 index fe7d5bc91f5..00000000000 --- a/install/manifests/scheduledworkflow-crd.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: scheduledworkflows.kubeflow.org -spec: - group: kubeflow.org - version: v1alpha1 - scope: Namespaced - names: - kind: "ScheduledWorkflow" - listKind: "ScheduledWorkflowList" - plural: "scheduledworkflows" - shortNames: - - "swf" - singular: "scheduledworkflow" \ No newline at end of file diff --git a/pkg/apis/scheduledworkflow/register.go b/pkg/apis/scheduledworkflow/register.go deleted file mode 100644 index fdc5b74e2f5..00000000000 --- a/pkg/apis/scheduledworkflow/register.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scheduledworkflow - -const ( - Kind string = "ScheduledWorkflow" - GroupName string = "kubeflow.org" - Singular string = "scheduledWorkflow" - Plural string = "scheduledworkflows" - FullName string = Plural + "." + GroupName -) diff --git a/pkg/apis/scheduledworkflow/v1alpha1/doc.go b/pkg/apis/scheduledworkflow/v1alpha1/doc.go deleted file mode 100644 index f87c127b6e3..00000000000 --- a/pkg/apis/scheduledworkflow/v1alpha1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +k8s:deepcopy-gen=package - -// Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=scheduledworkflow.kubeflow.org -package v1alpha1 diff --git a/pkg/apis/scheduledworkflow/v1alpha1/register.go b/pkg/apis/scheduledworkflow/v1alpha1/register.go deleted file mode 100644 index 66b698357f4..00000000000 --- a/pkg/apis/scheduledworkflow/v1alpha1/register.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - controller "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: controller.GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &ScheduledWorkflow{}, - &ScheduledWorkflowList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/scheduledworkflow/v1alpha1/types.go b/pkg/apis/scheduledworkflow/v1alpha1/types.go deleted file mode 100644 index 97abad6da9e..00000000000 --- a/pkg/apis/scheduledworkflow/v1alpha1/types.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/apis/core" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledWorkflow is a specification for a ScheduledWorkflow resource -type ScheduledWorkflow struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ScheduledWorkflowSpec `json:"spec"` - Status ScheduledWorkflowStatus `json:"status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledWorkflowList is a list of ScheduledWorkflow resources -type ScheduledWorkflowList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []ScheduledWorkflow `json:"items"` -} - -// ScheduledWorkflowSpec is the spec for a ScheduledWorkflow resource -type ScheduledWorkflowSpec struct { - // If the schedule is disabled, it does not create any new workflow. - Enabled bool `json:"enabled,omitempty"` - - // Max number of created workflows that can coexist. - // If MaxConcurrency is not specified, maxConcurrency is 1. - // MaxConcurrency cannot be smaller than 1. - // MaxConcurrency cannot be larger than 10. - // +optional - MaxConcurrency *int64 `json:"maxConcurrency,omitempty"` - - // Max number of completed workflows to keep track of. - // If MaxHistory is not specified, MaxHistory is 10. - // MaxHistory cannot be smaller than 0. - // MaxHistory cannot be larger than 100. - // +optional - MaxHistory *int64 `json:"maxHistory,omitempty"` - - // Trigger describes when to create a new workflow. - Trigger `json:"trigger,omitempty"` - - // Specification of the workflow to schedule. - // +optional - Workflow *WorkflowResource `json:"workflow, omitempty"` - - // TODO: support additional resource types: K8 jobs, etc. - -} - -type WorkflowResource struct { - // List of parameters to substitute in the workflow template. - // The parameter values may include special strings that the controller will substitute: - // [[ScheduledTime]] is substituted by the scheduled time of the workflow (default format) - // [[CurrentTime]] is substituted by the current time (default format) - // [[Index]] is substituted by the index of the workflow (e.g. 3 means that it was the 3rd workflow created) - // [[ScheduledTime.15-04-05]] is substituted by the sheduled time (custom format specified as a Go time format: https://golang.org/pkg/time/#Parse) - // [[CurrentTime.15-04-05]] is substituted by the current time (custom format specified as a Go time format: https://golang.org/pkg/time/#Parse) - - Parameters []Parameter `json:"parameters,omitempty"` - - // Specification of the workflow to start. - Spec v1alpha1.WorkflowSpec `json:"spec,omitempty"` -} - -type Parameter struct { - // Name of the parameter. - Name string `json:"name,omitempty"` - - // Value of the parameter. - Value string `json:"value,omitempty"` -} - -// Trigger specifies when to create a new workflow. -type Trigger struct { - // If all the following fields are nil, the schedule create a single workflow - // immediately. - - // Create workflows according to a cron schedule. - CronSchedule *CronSchedule `json:"cronSchedule,omitempty"` - - // Create workflows periodically. - PeriodicSchedule *PeriodicSchedule `json:"periodicSchedule,omitempty"` -} - -type CronSchedule struct { - // Time at which scheduling starts. - // If no start time is specified, the StartTime is the creation time of the schedule. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - - // Time at which scheduling ends. - // If no end time is specified, the EndTime is the end of time. - // +optional - EndTime *metav1.Time `json:"endTime,omitempty"` - - // Cron string describing when a workflow should be created within the - // time interval defined by StartTime and EndTime. - // +optional - Cron string `json:"cron,omitempty"` -} - -type PeriodicSchedule struct { - // Time at which scheduling starts. - // If no start time is specified, the StartTime is the creation time of the schedule. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - - // Time at which scheduling ends. - // If no end time is specified, the EndTime is the end of time. - // +optional - EndTime *metav1.Time `json:"endTime,omitempty"` - - // Cron string describing when a workflow should be created within the - // time interval defined by StartTime and EndTime. - // +optional - IntervalSecond int64 `json:"intervalSecond,omitempty"` -} - -// ScheduledWorkflowStatus is the status for a ScheduledWorkflow resource. -type ScheduledWorkflowStatus struct { - - // The latest available observations of an object's current state. - // +optional - Conditions []ScheduledWorkflowCondition `json:"conditions,omitempty"` - - // TriggerStatus provides status info depending on the type of triggering. - Trigger TriggerStatus `json:"trigger,omitempty"` - - // Status of workflow resources. - WorkflowHistory *WorkflowHistory `json:"workflowHistory,omitempty"` -} - -type ScheduledWorkflowConditionType string - -// These are valid conditions of a ScheduledWorkflow. -const ( - ScheduledWorkflowEnabled ScheduledWorkflowConditionType = "Enabled" - ScheduledWorkflowDisabled ScheduledWorkflowConditionType = "Disabled" - ScheduledWorkflowRunning ScheduledWorkflowConditionType = "Running" - ScheduledWorkflowSucceeded ScheduledWorkflowConditionType = "Succeeded" - ScheduledWorkflowError ScheduledWorkflowConditionType = "Error" -) - -type ScheduledWorkflowCondition struct { - // Type of job condition. - Type ScheduledWorkflowConditionType `json:"type,omitempty"` - // Status of the condition, one of True, False, Unknown. - Status core.ConditionStatus `json:"status,omitempty"` - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time `json:"lastHeartbeatTime,omitempty"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -type TriggerStatus struct { - // Time of the last creation of a workflow. - LastTriggeredTime *metav1.Time `json:lastTriggeredTime,omitempty` - - // Time of the next creation of a workflow (assuming that the schedule is enabled). - NextTriggeredTime *metav1.Time `json:nextTriggeredTime,omitempty` - - // Index of the last workflow created. - LastIndex *int64 `json:lastWorkflowIndex,omitempty` -} - -type WorkflowHistory struct { - // The list of active workflows started by this schedule. - Active []WorkflowStatus `json:"active,omitempty"` - - // The list of completed workflows started by this schedule. - Completed []WorkflowStatus `json:"completed,omitempty"` -} - -type WorkflowStatus struct { - // The name of the workflow. - Name string `json:"name,omitempty"` - - // The namespace of the workflow. - Namespace string `json:"namespace,omitempty"` - - // URL representing this object. - SelfLink string `json:"selfLink,omitempty"` - - // UID is the unique identifier in time and space for the workflow. - UID types.UID `json:"uid,omitempty"` - - // Phase is a high level summary of the status of the workflow. - Phase v1alpha1.NodePhase `json:phase,omitempty` - - // A human readable message indicating details about why the workflow is in - // this condition. - Message string `json:"message,omitempty"` - - // Time at which this workflow was created. - CreatedAt metav1.Time `json:"createdAt,omitempty"` - - // Time at which this workflow started. - StartedAt metav1.Time `json:"startedAt,omitempty"` - - // Time at which this workflow completed - FinishedAt metav1.Time `json:"finishedAt,omitempty"` - - // Time at which the workflow was triggered. - ScheduledAt metav1.Time `json:"scheduledAt,omitempty"` - - // The index of the workflow. For instance, if this workflow is the second one - // to execute as part of this schedule, the index is 1. - Index int64 `json:"index,omitempty"` -} \ No newline at end of file diff --git a/pkg/apis/scheduledworkflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/scheduledworkflow/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 026f511ff47..00000000000 --- a/pkg/apis/scheduledworkflow/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,406 +0,0 @@ -// +build !ignore_autogenerated - -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronSchedule) DeepCopyInto(out *CronSchedule) { - *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - if in.EndTime != nil { - in, out := &in.EndTime, &out.EndTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronSchedule. -func (in *CronSchedule) DeepCopy() *CronSchedule { - if in == nil { - return nil - } - out := new(CronSchedule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Parameter) DeepCopyInto(out *Parameter) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. -func (in *Parameter) DeepCopy() *Parameter { - if in == nil { - return nil - } - out := new(Parameter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeriodicSchedule) DeepCopyInto(out *PeriodicSchedule) { - *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - if in.EndTime != nil { - in, out := &in.EndTime, &out.EndTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeriodicSchedule. -func (in *PeriodicSchedule) DeepCopy() *PeriodicSchedule { - if in == nil { - return nil - } - out := new(PeriodicSchedule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScheduledWorkflow) DeepCopyInto(out *ScheduledWorkflow) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledWorkflow. -func (in *ScheduledWorkflow) DeepCopy() *ScheduledWorkflow { - if in == nil { - return nil - } - out := new(ScheduledWorkflow) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ScheduledWorkflow) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScheduledWorkflowCondition) DeepCopyInto(out *ScheduledWorkflowCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledWorkflowCondition. -func (in *ScheduledWorkflowCondition) DeepCopy() *ScheduledWorkflowCondition { - if in == nil { - return nil - } - out := new(ScheduledWorkflowCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScheduledWorkflowList) DeepCopyInto(out *ScheduledWorkflowList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ScheduledWorkflow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledWorkflowList. -func (in *ScheduledWorkflowList) DeepCopy() *ScheduledWorkflowList { - if in == nil { - return nil - } - out := new(ScheduledWorkflowList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ScheduledWorkflowList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScheduledWorkflowSpec) DeepCopyInto(out *ScheduledWorkflowSpec) { - *out = *in - if in.MaxConcurrency != nil { - in, out := &in.MaxConcurrency, &out.MaxConcurrency - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.MaxHistory != nil { - in, out := &in.MaxHistory, &out.MaxHistory - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - in.Trigger.DeepCopyInto(&out.Trigger) - if in.Workflow != nil { - in, out := &in.Workflow, &out.Workflow - if *in == nil { - *out = nil - } else { - *out = new(WorkflowResource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledWorkflowSpec. -func (in *ScheduledWorkflowSpec) DeepCopy() *ScheduledWorkflowSpec { - if in == nil { - return nil - } - out := new(ScheduledWorkflowSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScheduledWorkflowStatus) DeepCopyInto(out *ScheduledWorkflowStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ScheduledWorkflowCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Trigger.DeepCopyInto(&out.Trigger) - if in.WorkflowHistory != nil { - in, out := &in.WorkflowHistory, &out.WorkflowHistory - if *in == nil { - *out = nil - } else { - *out = new(WorkflowHistory) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledWorkflowStatus. -func (in *ScheduledWorkflowStatus) DeepCopy() *ScheduledWorkflowStatus { - if in == nil { - return nil - } - out := new(ScheduledWorkflowStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Trigger) DeepCopyInto(out *Trigger) { - *out = *in - if in.CronSchedule != nil { - in, out := &in.CronSchedule, &out.CronSchedule - if *in == nil { - *out = nil - } else { - *out = new(CronSchedule) - (*in).DeepCopyInto(*out) - } - } - if in.PeriodicSchedule != nil { - in, out := &in.PeriodicSchedule, &out.PeriodicSchedule - if *in == nil { - *out = nil - } else { - *out = new(PeriodicSchedule) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger. -func (in *Trigger) DeepCopy() *Trigger { - if in == nil { - return nil - } - out := new(Trigger) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) { - *out = *in - if in.LastTriggeredTime != nil { - in, out := &in.LastTriggeredTime, &out.LastTriggeredTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - if in.NextTriggeredTime != nil { - in, out := &in.NextTriggeredTime, &out.NextTriggeredTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } - } - if in.LastIndex != nil { - in, out := &in.LastIndex, &out.LastIndex - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus. -func (in *TriggerStatus) DeepCopy() *TriggerStatus { - if in == nil { - return nil - } - out := new(TriggerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowHistory) DeepCopyInto(out *WorkflowHistory) { - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]WorkflowStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Completed != nil { - in, out := &in.Completed, &out.Completed - *out = make([]WorkflowStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowHistory. -func (in *WorkflowHistory) DeepCopy() *WorkflowHistory { - if in == nil { - return nil - } - out := new(WorkflowHistory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowResource) DeepCopyInto(out *WorkflowResource) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]Parameter, len(*in)) - copy(*out, *in) - } - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowResource. -func (in *WorkflowResource) DeepCopy() *WorkflowResource { - if in == nil { - return nil - } - out := new(WorkflowResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { - *out = *in - in.CreatedAt.DeepCopyInto(&out.CreatedAt) - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - in.ScheduledAt.DeepCopyInto(&out.ScheduledAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. -func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { - if in == nil { - return nil - } - out := new(WorkflowStatus) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go deleted file mode 100644 index 11311ab0d51..00000000000 --- a/pkg/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - scheduledworkflowv1alpha1 "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - ScheduledworkflowV1alpha1() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduledworkflow() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - scheduledworkflowV1alpha1 *scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Client -} - -// ScheduledworkflowV1alpha1 retrieves the ScheduledworkflowV1alpha1Client -func (c *Clientset) ScheduledworkflowV1alpha1() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface { - return c.scheduledworkflowV1alpha1 -} - -// Deprecated: Scheduledworkflow retrieves the default version of ScheduledworkflowClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduledworkflow() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface { - return c.scheduledworkflowV1alpha1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.scheduledworkflowV1alpha1, err = scheduledworkflowv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.scheduledworkflowV1alpha1 = scheduledworkflowv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.scheduledworkflowV1alpha1 = scheduledworkflowv1alpha1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index bce96cb6a1f..00000000000 --- a/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go deleted file mode 100644 index 2e325337605..00000000000 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - clientset "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - scheduledworkflowv1alpha1 "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1" - fakescheduledworkflowv1alpha1 "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -var _ clientset.Interface = &Clientset{} - -// ScheduledworkflowV1alpha1 retrieves the ScheduledworkflowV1alpha1Client -func (c *Clientset) ScheduledworkflowV1alpha1() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface { - return &fakescheduledworkflowv1alpha1.FakeScheduledworkflowV1alpha1{Fake: &c.Fake} -} - -// Scheduledworkflow retrieves the ScheduledworkflowV1alpha1Client -func (c *Clientset) Scheduledworkflow() scheduledworkflowv1alpha1.ScheduledworkflowV1alpha1Interface { - return &fakescheduledworkflowv1alpha1.FakeScheduledworkflowV1alpha1{Fake: &c.Fake} -} diff --git a/pkg/client/clientset/versioned/fake/doc.go b/pkg/client/clientset/versioned/fake/doc.go deleted file mode 100644 index ed08b811440..00000000000 --- a/pkg/client/clientset/versioned/fake/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go deleted file mode 100644 index a020cce62d7..00000000000 --- a/pkg/client/clientset/versioned/fake/register.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - scheduledworkflowv1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(scheme) -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -func AddToScheme(scheme *runtime.Scheme) { - scheduledworkflowv1alpha1.AddToScheme(scheme) -} diff --git a/pkg/client/clientset/versioned/scheme/doc.go b/pkg/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index a9e70c54909..00000000000 --- a/pkg/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go deleted file mode 100644 index 6bb8aee8cd7..00000000000 --- a/pkg/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - scheduledworkflowv1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(Scheme) -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -func AddToScheme(scheme *runtime.Scheme) { - scheduledworkflowv1alpha1.AddToScheme(scheme) -} diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/doc.go deleted file mode 100644 index 7c6f02e5302..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/doc.go deleted file mode 100644 index c3f1566b39f..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow.go deleted file mode 100644 index a905035f5d4..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeScheduledWorkflows implements ScheduledWorkflowInterface -type FakeScheduledWorkflows struct { - Fake *FakeScheduledworkflowV1alpha1 - ns string -} - -var scheduledworkflowsResource = schema.GroupVersionResource{Group: "scheduledworkflow.kubeflow.org", Version: "v1alpha1", Resource: "scheduledworkflows"} - -var scheduledworkflowsKind = schema.GroupVersionKind{Group: "scheduledworkflow.kubeflow.org", Version: "v1alpha1", Kind: "ScheduledWorkflow"} - -// Get takes name of the scheduledWorkflow, and returns the corresponding scheduledWorkflow object, and an error if there is any. -func (c *FakeScheduledWorkflows) Get(name string, options v1.GetOptions) (result *v1alpha1.ScheduledWorkflow, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(scheduledworkflowsResource, c.ns, name), &v1alpha1.ScheduledWorkflow{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ScheduledWorkflow), err -} - -// List takes label and field selectors, and returns the list of ScheduledWorkflows that match those selectors. -func (c *FakeScheduledWorkflows) List(opts v1.ListOptions) (result *v1alpha1.ScheduledWorkflowList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(scheduledworkflowsResource, scheduledworkflowsKind, c.ns, opts), &v1alpha1.ScheduledWorkflowList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ScheduledWorkflowList{ListMeta: obj.(*v1alpha1.ScheduledWorkflowList).ListMeta} - for _, item := range obj.(*v1alpha1.ScheduledWorkflowList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested scheduledWorkflows. -func (c *FakeScheduledWorkflows) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(scheduledworkflowsResource, c.ns, opts)) - -} - -// Create takes the representation of a scheduledWorkflow and creates it. Returns the server's representation of the scheduledWorkflow, and an error, if there is any. -func (c *FakeScheduledWorkflows) Create(scheduledWorkflow *v1alpha1.ScheduledWorkflow) (result *v1alpha1.ScheduledWorkflow, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(scheduledworkflowsResource, c.ns, scheduledWorkflow), &v1alpha1.ScheduledWorkflow{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ScheduledWorkflow), err -} - -// Update takes the representation of a scheduledWorkflow and updates it. Returns the server's representation of the scheduledWorkflow, and an error, if there is any. -func (c *FakeScheduledWorkflows) Update(scheduledWorkflow *v1alpha1.ScheduledWorkflow) (result *v1alpha1.ScheduledWorkflow, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(scheduledworkflowsResource, c.ns, scheduledWorkflow), &v1alpha1.ScheduledWorkflow{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ScheduledWorkflow), err -} - -// Delete takes name of the scheduledWorkflow and deletes it. Returns an error if one occurs. -func (c *FakeScheduledWorkflows) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(scheduledworkflowsResource, c.ns, name), &v1alpha1.ScheduledWorkflow{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeScheduledWorkflows) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(scheduledworkflowsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.ScheduledWorkflowList{}) - return err -} - -// Patch applies the patch and returns the patched scheduledWorkflow. -func (c *FakeScheduledWorkflows) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledWorkflow, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(scheduledworkflowsResource, c.ns, name, data, subresources...), &v1alpha1.ScheduledWorkflow{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ScheduledWorkflow), err -} diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow_client.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow_client.go deleted file mode 100644 index 33698639dda..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/fake/fake_scheduledworkflow_client.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeScheduledworkflowV1alpha1 struct { - *testing.Fake -} - -func (c *FakeScheduledworkflowV1alpha1) ScheduledWorkflows(namespace string) v1alpha1.ScheduledWorkflowInterface { - return &FakeScheduledWorkflows{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeScheduledworkflowV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/generated_expansion.go deleted file mode 100644 index cab82287301..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ScheduledWorkflowExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow.go deleted file mode 100644 index ae886112f1d..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - scheme "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ScheduledWorkflowsGetter has a method to return a ScheduledWorkflowInterface. -// A group's client should implement this interface. -type ScheduledWorkflowsGetter interface { - ScheduledWorkflows(namespace string) ScheduledWorkflowInterface -} - -// ScheduledWorkflowInterface has methods to work with ScheduledWorkflow resources. -type ScheduledWorkflowInterface interface { - Create(*v1alpha1.ScheduledWorkflow) (*v1alpha1.ScheduledWorkflow, error) - Update(*v1alpha1.ScheduledWorkflow) (*v1alpha1.ScheduledWorkflow, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ScheduledWorkflow, error) - List(opts v1.ListOptions) (*v1alpha1.ScheduledWorkflowList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledWorkflow, err error) - ScheduledWorkflowExpansion -} - -// scheduledWorkflows implements ScheduledWorkflowInterface -type scheduledWorkflows struct { - client rest.Interface - ns string -} - -// newScheduledWorkflows returns a ScheduledWorkflows -func newScheduledWorkflows(c *ScheduledworkflowV1alpha1Client, namespace string) *scheduledWorkflows { - return &scheduledWorkflows{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the scheduledWorkflow, and returns the corresponding scheduledWorkflow object, and an error if there is any. -func (c *scheduledWorkflows) Get(name string, options v1.GetOptions) (result *v1alpha1.ScheduledWorkflow, err error) { - result = &v1alpha1.ScheduledWorkflow{} - err = c.client.Get(). - Namespace(c.ns). - Resource("scheduledworkflows"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ScheduledWorkflows that match those selectors. -func (c *scheduledWorkflows) List(opts v1.ListOptions) (result *v1alpha1.ScheduledWorkflowList, err error) { - result = &v1alpha1.ScheduledWorkflowList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("scheduledworkflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested scheduledWorkflows. -func (c *scheduledWorkflows) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("scheduledworkflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a scheduledWorkflow and creates it. Returns the server's representation of the scheduledWorkflow, and an error, if there is any. -func (c *scheduledWorkflows) Create(scheduledWorkflow *v1alpha1.ScheduledWorkflow) (result *v1alpha1.ScheduledWorkflow, err error) { - result = &v1alpha1.ScheduledWorkflow{} - err = c.client.Post(). - Namespace(c.ns). - Resource("scheduledworkflows"). - Body(scheduledWorkflow). - Do(). - Into(result) - return -} - -// Update takes the representation of a scheduledWorkflow and updates it. Returns the server's representation of the scheduledWorkflow, and an error, if there is any. -func (c *scheduledWorkflows) Update(scheduledWorkflow *v1alpha1.ScheduledWorkflow) (result *v1alpha1.ScheduledWorkflow, err error) { - result = &v1alpha1.ScheduledWorkflow{} - err = c.client.Put(). - Namespace(c.ns). - Resource("scheduledworkflows"). - Name(scheduledWorkflow.Name). - Body(scheduledWorkflow). - Do(). - Into(result) - return -} - -// Delete takes name of the scheduledWorkflow and deletes it. Returns an error if one occurs. -func (c *scheduledWorkflows) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("scheduledworkflows"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *scheduledWorkflows) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("scheduledworkflows"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched scheduledWorkflow. -func (c *scheduledWorkflows) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledWorkflow, err error) { - result = &v1alpha1.ScheduledWorkflow{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("scheduledworkflows"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow_client.go b/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow_client.go deleted file mode 100644 index f83104ad1fa..00000000000 --- a/pkg/client/clientset/versioned/typed/scheduledworkflow/v1alpha1/scheduledworkflow_client.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" -) - -type ScheduledworkflowV1alpha1Interface interface { - RESTClient() rest.Interface - ScheduledWorkflowsGetter -} - -// ScheduledworkflowV1alpha1Client is used to interact with features provided by the scheduledworkflow.kubeflow.org group. -type ScheduledworkflowV1alpha1Client struct { - restClient rest.Interface -} - -func (c *ScheduledworkflowV1alpha1Client) ScheduledWorkflows(namespace string) ScheduledWorkflowInterface { - return newScheduledWorkflows(c, namespace) -} - -// NewForConfig creates a new ScheduledworkflowV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*ScheduledworkflowV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ScheduledworkflowV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new ScheduledworkflowV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ScheduledworkflowV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ScheduledworkflowV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ScheduledworkflowV1alpha1Client { - return &ScheduledworkflowV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ScheduledworkflowV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go deleted file mode 100644 index 7f0f6ec6c0d..00000000000 --- a/pkg/client/informers/externalversions/factory.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - versioned "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - internalinterfaces "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/internalinterfaces" - scheduledworkflow "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/scheduledworkflow" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client versioned.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Scheduledworkflow() scheduledworkflow.Interface -} - -func (f *sharedInformerFactory) Scheduledworkflow() scheduledworkflow.Interface { - return scheduledworkflow.New(f, f.namespace, f.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go deleted file mode 100644 index 71fc73f2d60..00000000000 --- a/pkg/client/informers/externalversions/generic.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=scheduledworkflow.kubeflow.org, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("scheduledworkflows"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduledworkflow().V1alpha1().ScheduledWorkflows().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index 74b01203081..00000000000 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - versioned "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" -) - -type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/client/informers/externalversions/scheduledworkflow/interface.go b/pkg/client/informers/externalversions/scheduledworkflow/interface.go deleted file mode 100644 index d5e344f95c4..00000000000 --- a/pkg/client/informers/externalversions/scheduledworkflow/interface.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package scheduledworkflow - -import ( - internalinterfaces "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/interface.go b/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/interface.go deleted file mode 100644 index 4a5f2ac98e5..00000000000 --- a/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/interface.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // ScheduledWorkflows returns a ScheduledWorkflowInformer. - ScheduledWorkflows() ScheduledWorkflowInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// ScheduledWorkflows returns a ScheduledWorkflowInformer. -func (v *version) ScheduledWorkflows() ScheduledWorkflowInformer { - return &scheduledWorkflowInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/scheduledworkflow.go b/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/scheduledworkflow.go deleted file mode 100644 index 837e2898548..00000000000 --- a/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1/scheduledworkflow.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - scheduledworkflow_v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - versioned "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - internalinterfaces "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kubeflow/pipelines/pkg/client/listers/scheduledworkflow/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ScheduledWorkflowInformer provides access to a shared informer and lister for -// ScheduledWorkflows. -type ScheduledWorkflowInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ScheduledWorkflowLister -} - -type scheduledWorkflowInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewScheduledWorkflowInformer constructs a new informer for ScheduledWorkflow type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewScheduledWorkflowInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredScheduledWorkflowInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredScheduledWorkflowInformer constructs a new informer for ScheduledWorkflow type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredScheduledWorkflowInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ScheduledworkflowV1alpha1().ScheduledWorkflows(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ScheduledworkflowV1alpha1().ScheduledWorkflows(namespace).Watch(options) - }, - }, - &scheduledworkflow_v1alpha1.ScheduledWorkflow{}, - resyncPeriod, - indexers, - ) -} - -func (f *scheduledWorkflowInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredScheduledWorkflowInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *scheduledWorkflowInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&scheduledworkflow_v1alpha1.ScheduledWorkflow{}, f.defaultInformer) -} - -func (f *scheduledWorkflowInformer) Lister() v1alpha1.ScheduledWorkflowLister { - return v1alpha1.NewScheduledWorkflowLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/listers/scheduledworkflow/v1alpha1/expansion_generated.go b/pkg/client/listers/scheduledworkflow/v1alpha1/expansion_generated.go deleted file mode 100644 index 52e8db5820e..00000000000 --- a/pkg/client/listers/scheduledworkflow/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// ScheduledWorkflowListerExpansion allows custom methods to be added to -// ScheduledWorkflowLister. -type ScheduledWorkflowListerExpansion interface{} - -// ScheduledWorkflowNamespaceListerExpansion allows custom methods to be added to -// ScheduledWorkflowNamespaceLister. -type ScheduledWorkflowNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/scheduledworkflow/v1alpha1/scheduledworkflow.go b/pkg/client/listers/scheduledworkflow/v1alpha1/scheduledworkflow.go deleted file mode 100644 index 200c7fa53c3..00000000000 --- a/pkg/client/listers/scheduledworkflow/v1alpha1/scheduledworkflow.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScheduledWorkflowLister helps list ScheduledWorkflows. -type ScheduledWorkflowLister interface { - // List lists all ScheduledWorkflows in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.ScheduledWorkflow, err error) - // ScheduledWorkflows returns an object that can list and get ScheduledWorkflows. - ScheduledWorkflows(namespace string) ScheduledWorkflowNamespaceLister - ScheduledWorkflowListerExpansion -} - -// scheduledWorkflowLister implements the ScheduledWorkflowLister interface. -type scheduledWorkflowLister struct { - indexer cache.Indexer -} - -// NewScheduledWorkflowLister returns a new ScheduledWorkflowLister. -func NewScheduledWorkflowLister(indexer cache.Indexer) ScheduledWorkflowLister { - return &scheduledWorkflowLister{indexer: indexer} -} - -// List lists all ScheduledWorkflows in the indexer. -func (s *scheduledWorkflowLister) List(selector labels.Selector) (ret []*v1alpha1.ScheduledWorkflow, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ScheduledWorkflow)) - }) - return ret, err -} - -// ScheduledWorkflows returns an object that can list and get ScheduledWorkflows. -func (s *scheduledWorkflowLister) ScheduledWorkflows(namespace string) ScheduledWorkflowNamespaceLister { - return scheduledWorkflowNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScheduledWorkflowNamespaceLister helps list and get ScheduledWorkflows. -type ScheduledWorkflowNamespaceLister interface { - // List lists all ScheduledWorkflows in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.ScheduledWorkflow, err error) - // Get retrieves the ScheduledWorkflow from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.ScheduledWorkflow, error) - ScheduledWorkflowNamespaceListerExpansion -} - -// scheduledWorkflowNamespaceLister implements the ScheduledWorkflowNamespaceLister -// interface. -type scheduledWorkflowNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ScheduledWorkflows in the indexer for a given namespace. -func (s scheduledWorkflowNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ScheduledWorkflow, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ScheduledWorkflow)) - }) - return ret, err -} - -// Get retrieves the ScheduledWorkflow from the indexer for a given namespace and name. -func (s scheduledWorkflowNamespaceLister) Get(name string) (*v1alpha1.ScheduledWorkflow, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("scheduledworkflow"), name) - } - return obj.(*v1alpha1.ScheduledWorkflow), nil -} diff --git a/pkg/signals/signal.go b/pkg/signals/signal.go deleted file mode 100644 index 52b41deb30f..00000000000 --- a/pkg/signals/signal.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package signals - -import ( - "os" - "os/signal" -) - -var onlyOneSignalHandler = make(chan struct{}) - -// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned -// which is closed on one of these signals. If a second signal is caught, the program -// is terminated with exit code 1. -func SetupSignalHandler() (stopCh <-chan struct{}) { - close(onlyOneSignalHandler) // panics when called twice - - stop := make(chan struct{}) - c := make(chan os.Signal, 2) - signal.Notify(c, shutdownSignals...) - go func() { - <-c - close(stop) - <-c - os.Exit(1) // second signal. Exit directly. - }() - - return stop -} \ No newline at end of file diff --git a/pkg/signals/signal_posix.go b/pkg/signals/signal_posix.go deleted file mode 100644 index 2abdc64f1af..00000000000 --- a/pkg/signals/signal_posix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package signals - -import ( -"os" -"syscall" -) - -var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} \ No newline at end of file diff --git a/resources/scheduledworkflow/client/kube_client.go b/resources/scheduledworkflow/client/kube_client.go deleted file mode 100644 index b29aba7e5ec..00000000000 --- a/resources/scheduledworkflow/client/kube_client.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/record" -) - -const ( - successSynced = "Synced" - failedSynced = "Failed" - messageResourceSuccessSynced = "Scheduled workflow synced successfull" - messageResourceFailedSynced = "Schedulde workflow synced failed" -) - -// KubeClient is a client to call the core Kubernetes APIs. -type KubeClient struct { - // The Kubernetes API client. - kubeClientSet kubernetes.Interface - // Recorder is an event recorder for recording Event resources to the Kubernetes API. - recorder record.EventRecorder -} - -// NewKubeClient creates a new client to call the core Kubernetes APIs. -func NewKubeClient(kubeClientSet kubernetes.Interface, recorder record.EventRecorder) *KubeClient { - return &KubeClient{ - kubeClientSet: kubeClientSet, - recorder: recorder, - } -} - -// RecordSyncSuccess records the success of a sync. -func (k *KubeClient) RecordSyncSuccess(swf *swfapi.ScheduledWorkflow, message string) { - k.recorder.Event(swf, corev1.EventTypeNormal, successSynced, - fmt.Sprintf("%v: %v", messageResourceSuccessSynced, message)) -} - -// RecordSyncFailure records the failure of a sync. -func (k *KubeClient) RecordSyncFailure(swf *swfapi.ScheduledWorkflow, message string) { - k.recorder.Event(swf, corev1.EventTypeWarning, failedSynced, - fmt.Sprintf("%v: %v", messageResourceFailedSynced, message)) -} diff --git a/resources/scheduledworkflow/client/swf_client.go b/resources/scheduledworkflow/client/swf_client.go deleted file mode 100644 index 70bf5cca239..00000000000 --- a/resources/scheduledworkflow/client/swf_client.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - swfclientset "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - "github.com/kubeflow/pipelines/pkg/client/informers/externalversions/scheduledworkflow/v1alpha1" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/util" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/cache" -) - -// ScheduledWorkflowClient is a client to call the ScheduledWorkflow API. -type ScheduledWorkflowClient struct { - clientSet swfclientset.Interface - informer v1alpha1.ScheduledWorkflowInformer -} - -// NewScheduledWorkflowClient creates an instance of the client. -func NewScheduledWorkflowClient(clientSet swfclientset.Interface, - informer v1alpha1.ScheduledWorkflowInformer) *ScheduledWorkflowClient { - return &ScheduledWorkflowClient{ - clientSet: clientSet, - informer: informer, - } -} - -// AddEventHandler adds an event handler. -func (p *ScheduledWorkflowClient) AddEventHandler(funcs *cache.ResourceEventHandlerFuncs) { - p.informer.Informer().AddEventHandler(funcs) -} - -// HasSynced returns true if the shared informer's store has synced. -func (p *ScheduledWorkflowClient) HasSynced() func() bool { - return p.informer.Informer().HasSynced -} - -// Get returns a ScheduledWorkflow, given a namespace and a name. -func (p *ScheduledWorkflowClient) Get(namespace string, name string) (*util.ScheduledWorkflow, error) { - schedule, err := p.informer.Lister().ScheduledWorkflows(namespace).Get(name) - if err != nil { - return nil, err - } - - return util.NewScheduledWorkflow(schedule), nil -} - -// Update Updates a ScheduledWorkflow in the Kubernetes API server. -func (p *ScheduledWorkflowClient) Update(namespace string, - schedule *util.ScheduledWorkflow) error { - _, err := p.clientSet.ScheduledworkflowV1alpha1().ScheduledWorkflows(namespace). - Update(schedule.Get()) - return err -} diff --git a/resources/scheduledworkflow/client/workflow_client.go b/resources/scheduledworkflow/client/workflow_client.go deleted file mode 100644 index 5ce1f6d43ee..00000000000 --- a/resources/scheduledworkflow/client/workflow_client.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - workflowclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - "github.com/argoproj/argo/pkg/client/informers/externalversions/workflow/v1alpha1" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/util" - wraperror "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/cache" - "time" -) - -// WorkflowClient is a client to call the Workflow API. -type WorkflowClient struct { - clientSet workflowclientset.Interface - informer v1alpha1.WorkflowInformer -} - -// NewWorkflowClient creates an instance of the WorkflowClient. -func NewWorkflowClient(clientSet workflowclientset.Interface, - informer v1alpha1.WorkflowInformer) *WorkflowClient { - return &WorkflowClient{ - clientSet: clientSet, - informer: informer, - } -} - -// AddEventHandler adds an event handler. -func (p *WorkflowClient) AddEventHandler(funcs *cache.ResourceEventHandlerFuncs) { - p.informer.Informer().AddEventHandler(funcs) -} - -// HasSynced returns true if the shared informer's store has synced. -func (p *WorkflowClient) HasSynced() func() bool { - return p.informer.Informer().HasSynced -} - -// Get returns a Workflow, given a namespace and name. -func (p *WorkflowClient) Get(namespace string, name string) ( - wf *util.Workflow, isNotFoundError bool, err error) { - workflow, err := p.informer.Lister().Workflows(namespace).Get(name) - if err != nil { - return nil, util.IsNotFound(err), wraperror.Wrapf(err, - "Error retrieving workflow (%v) in namespace (%v): %v", name, namespace, err) - } - return util.NewWorkflow(workflow), false, nil -} - -// List returns a list of workflows given the name of their ScheduledWorkflow, -// whether they are completed, and their minimum index (to avoid returning the whole list). -func (p *WorkflowClient) List(swfName string, completed bool, minIndex int64) ( - status []swfapi.WorkflowStatus, err error) { - - labelSelector := getLabelSelectorToGetWorkflows(swfName, completed, minIndex) - - workflows, err := p.informer.Lister().List(*labelSelector) - if err != nil { - return nil, wraperror.Wrapf(err, - "Could not retrieve workflows for scheduled workflow (%v): %v", swfName, err) - } - - result := toWorkflowStatuses(workflows) - - return result, nil -} - -func toWorkflowStatuses(workflows []*workflowapi.Workflow) []swfapi.WorkflowStatus { - result := make([]swfapi.WorkflowStatus, 0) - for _, workflow := range workflows { - result = append(result, *toWorkflowStatus(workflow)) - } - return result -} - -func toWorkflowStatus(workflow *workflowapi.Workflow) *swfapi.WorkflowStatus { - return &swfapi.WorkflowStatus{ - Name: workflow.Name, - Namespace: workflow.Namespace, - SelfLink: workflow.SelfLink, - UID: workflow.UID, - Phase: workflow.Status.Phase, - Message: workflow.Status.Message, - CreatedAt: workflow.CreationTimestamp, - StartedAt: workflow.Status.StartedAt, - FinishedAt: workflow.Status.FinishedAt, - ScheduledAt: retrieveScheduledTime(workflow), - Index: retrieveIndex(workflow), - } -} - -func retrieveScheduledTime(workflow *workflowapi.Workflow) metav1.Time { - value, ok := workflow.Labels[util.LabelKeyWorkflowEpoch] - if !ok { - return workflow.CreationTimestamp - } - result, err := util.RetrieveInt64FromLabel(value) - if err != nil { - return workflow.CreationTimestamp - } - return metav1.NewTime(time.Unix(result, 0).UTC()) -} - -func retrieveIndex(workflow *workflowapi.Workflow) int64 { - value, ok := workflow.Labels[util.LabelKeyWorkflowIndex] - if !ok { - return 0 - } - result, err := util.RetrieveInt64FromLabel(value) - if err != nil { - return 0 - } - return result -} - -// Create creates a workflow given a namespace and its specification. -func (p *WorkflowClient) Create(namespace string, workflow *util.Workflow) ( - *util.Workflow, error) { - result, err := p.clientSet.ArgoprojV1alpha1().Workflows(namespace).Create(workflow.Get()) - if err != nil { - return nil, wraperror.Wrapf(err, "Error creating workflow in namespace (%v): %v: %+v", namespace, - err, workflow.Get()) - } - return util.NewWorkflow(result), nil -} - -func getLabelSelectorToGetWorkflows(swfName string, completed bool, minIndex int64) *labels.Selector { - labelSelector := labels.NewSelector() - // The Argo workflow should be active or completed - labelSelector = labelSelector.Add(*util.GetRequirementForCompletedWorkflowOrFatal(completed)) - // The Argo workflow should be labelled with this scheduled workflow name. - labelSelector = labelSelector.Add(*util.GetRequirementForScheduleNameOrFatal(swfName)) - // The Argo workflow should have an index greater than... - labelSelector = labelSelector.Add(*util.GetRequirementForMinIndexOrFatal(minIndex)) - return &labelSelector -} diff --git a/resources/scheduledworkflow/client/workflow_client_test.go b/resources/scheduledworkflow/client/workflow_client_test.go deleted file mode 100644 index 475ea8d18d3..00000000000 --- a/resources/scheduledworkflow/client/workflow_client_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - workflowcommon "github.com/argoproj/argo/workflow/common" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/util" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "testing" - "time" -) - -func TestToWorkflowStatuses(t *testing.T) { - workflow := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - Namespace: "NAMESPACE", - SelfLink: "SELF_LINK", - UID: "UID", - CreationTimestamp: metav1.NewTime(time.Unix(50, 0).UTC()), - Labels: map[string]string{ - util.LabelKeyWorkflowEpoch: "54", - util.LabelKeyWorkflowIndex: "55", - }, - }, - Status: workflowapi.WorkflowStatus{ - Phase: workflowapi.NodeRunning, - Message: "WORKFLOW_MESSAGE", - StartedAt: metav1.NewTime(time.Unix(51, 0).UTC()), - FinishedAt: metav1.NewTime(time.Unix(52, 0).UTC()), - }, - } - - result := toWorkflowStatuses([]*workflowapi.Workflow{workflow}) - - expected := &swfapi.WorkflowStatus{ - Name: "WORKFLOW_NAME", - Namespace: "NAMESPACE", - SelfLink: "SELF_LINK", - UID: "UID", - Phase: workflowapi.NodeRunning, - Message: "WORKFLOW_MESSAGE", - CreatedAt: metav1.NewTime(time.Unix(50, 0).UTC()), - StartedAt: metav1.NewTime(time.Unix(51, 0).UTC()), - FinishedAt: metav1.NewTime(time.Unix(52, 0).UTC()), - ScheduledAt: metav1.NewTime(time.Unix(54, 0).UTC()), - Index: 55, - } - - assert.Equal(t, []swfapi.WorkflowStatus{*expected}, result) -} - -func TestToWorkflowStatuses_NullOrEmpty(t *testing.T) { - workflow := &workflowapi.Workflow{} - - result := toWorkflowStatuses([]*workflowapi.Workflow{workflow}) - - expected := &swfapi.WorkflowStatus{ - Name: "", - Namespace: "", - SelfLink: "", - UID: "", - Phase: "", - Message: "", - CreatedAt: metav1.NewTime(time.Time{}.UTC()), - StartedAt: metav1.NewTime(time.Time{}.UTC()), - FinishedAt: metav1.NewTime(time.Time{}.UTC()), - ScheduledAt: metav1.NewTime(time.Time{}.UTC()), - Index: 0, - } - - assert.Equal(t, []swfapi.WorkflowStatus{*expected}, result) -} - -func TestRetrieveScheduledTime(t *testing.T) { - - // Base case. - workflow := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: metav1.NewTime(time.Unix(50, 0).UTC()), - Labels: map[string]string{ - util.LabelKeyWorkflowEpoch: "54", - }, - }, - } - result := retrieveScheduledTime(workflow) - assert.Equal(t, metav1.NewTime(time.Unix(54, 0).UTC()), result) - - // No label - workflow = &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: metav1.NewTime(time.Unix(50, 0).UTC()), - Labels: map[string]string{ - "WRONG_LABEL": "54", - }, - }, - } - result = retrieveScheduledTime(workflow) - assert.Equal(t, metav1.NewTime(time.Unix(50, 0).UTC()), result) - - // Parsing problem - workflow = &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: metav1.NewTime(time.Unix(50, 0).UTC()), - Labels: map[string]string{ - util.LabelKeyWorkflowEpoch: "UNPARSABLE_@%^%@^#%", - }, - }, - } - result = retrieveScheduledTime(workflow) - assert.Equal(t, metav1.NewTime(time.Unix(50, 0).UTC()), result) -} - -func TestRetrieveIndex(t *testing.T) { - - // Base case. - workflow := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - util.LabelKeyWorkflowIndex: "100", - }, - }, - } - result := retrieveIndex(workflow) - assert.Equal(t, int64(100), result) - - // No label - workflow = &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "WRONG_LABEL": "100", - }, - }, - } - result = retrieveIndex(workflow) - assert.Equal(t, int64(0), result) - - // Parsing problem - workflow = &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - util.LabelKeyWorkflowIndex: "UNPARSABLE_LABEL_!@^^!%@#%", - }, - }, - } - result = retrieveIndex(workflow) - assert.Equal(t, int64(0), result) -} - -func TestLabelSelectorToGetWorkflows(t *testing.T) { - - // Completed - result := getLabelSelectorToGetWorkflows( - "PIPELINE_NAME", - true, /* completed */ - 50 /* min index */) - - expected := labels.NewSelector() - - req, err := labels.NewRequirement(workflowcommon.LabelKeyCompleted, selection.Equals, - []string{"true"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - req, err = labels.NewRequirement(util.LabelKeyWorkflowScheduledWorkflowName, selection.Equals, - []string{"PIPELINE_NAME"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - req, err = labels.NewRequirement(util.LabelKeyWorkflowIndex, selection.GreaterThan, - []string{"50"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - assert.Equal(t, expected, *result) - - // Not completed - result = getLabelSelectorToGetWorkflows( - "PIPELINE_NAME", - false, /* completed */ - 50 /* min index */) - - expected = labels.NewSelector() - - req, err = labels.NewRequirement(workflowcommon.LabelKeyCompleted, selection.NotEquals, - []string{"true"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - req, err = labels.NewRequirement(util.LabelKeyWorkflowScheduledWorkflowName, selection.Equals, - []string{"PIPELINE_NAME"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - req, err = labels.NewRequirement(util.LabelKeyWorkflowIndex, selection.GreaterThan, - []string{"50"}) - assert.Nil(t, err) - expected = expected.Add(*req) - - assert.Equal(t, expected, *result) -} diff --git a/resources/scheduledworkflow/controller.go b/resources/scheduledworkflow/controller.go deleted file mode 100644 index d780a4c62ba..00000000000 --- a/resources/scheduledworkflow/controller.go +++ /dev/null @@ -1,523 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - workflowclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - workflowinformers "github.com/argoproj/argo/pkg/client/informers/externalversions" - swfregister "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - swfclientset "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - swfScheme "github.com/kubeflow/pipelines/pkg/client/clientset/versioned/scheme" - swfinformers "github.com/kubeflow/pipelines/pkg/client/informers/externalversions" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/client" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/util" - wraperror "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "time" -) - -const ( - Workflow = "Workflow" - ScheduledWorkflow = "ScheduledWorkflow" -) - -var ( - // DefaultJobBackOff is the max backoff period - DefaultJobBackOff = 10 * time.Second - // MaxJobBackOff is the max backoff period - MaxJobBackOff = 360 * time.Second -) - -// Controller is the controller implementation for ScheduledWorkflow resources -type Controller struct { - kubeClient *client.KubeClient - swfClient *client.ScheduledWorkflowClient - workflowClient *client.WorkflowClient - - // workqueue is a rate limited work queue. This is used to queue work to be - // processed instead of performing it as soon as a change happens. This - // means we can ensure we only process a fixed amount of resources at a - // time, and makes it easy to ensure we are never processing the same item - // simultaneously in two different workers. - workqueue workqueue.RateLimitingInterface - - // An interface to generate the current time. - time util.TimeInterface -} - -// NewController returns a new sample controller -func NewController( - kubeClientSet kubernetes.Interface, - swfClientSet swfclientset.Interface, - workflowClientSet workflowclientset.Interface, - swfInformerFactory swfinformers.SharedInformerFactory, - workflowInformerFactory workflowinformers.SharedInformerFactory, - time util.TimeInterface) *Controller { - - // obtain references to shared informers - swfInformer := swfInformerFactory.Scheduledworkflow().V1alpha1().ScheduledWorkflows() - workflowInformer := workflowInformerFactory.Argoproj().V1alpha1().Workflows() - - // Add controller types to the default Kubernetes Scheme so Events can be - // logged for controller types. - swfScheme.AddToScheme(scheme.Scheme) - - // Create event broadcaster - log.Info("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(log.Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClientSet.CoreV1().Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: util.ControllerAgentName}) - - controller := &Controller{ - kubeClient: client.NewKubeClient(kubeClientSet, recorder), - swfClient: client.NewScheduledWorkflowClient(swfClientSet, swfInformer), - workflowClient: client.NewWorkflowClient(workflowClientSet, workflowInformer), - workqueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemExponentialFailureRateLimiter(DefaultJobBackOff, MaxJobBackOff), swfregister.Kind), - time: time, - } - - log.Info("Setting up event handlers") - - // Set up an event handler for when the Scheduled Workflow changes - controller.swfClient.AddEventHandler(&cache.ResourceEventHandlerFuncs{ - AddFunc: controller.enqueueScheduledWorkflow, - UpdateFunc: func(old, new interface{}) { - controller.enqueueScheduledWorkflow(new) - }, - DeleteFunc: controller.enqueueScheduledWorkflowForDelete, - }) - - // Set up an event handler for when WorkflowHistory resources change. This - // handler will lookup the owner of the given WorkflowHistory, and if it is - // owned by a ScheduledWorkflow, it will enqueue that ScheduledWorkflow for - // processing. This way, we don't need to implement custom logic for - // handling WorkflowHistory resources. More info on this pattern: - // https://github.com/kubernetes/community/blob/8cafef897a22026d42f5e5bb3f104febe7e29830/contributors/devel/controllers.md - controller.workflowClient.AddEventHandler(&cache.ResourceEventHandlerFuncs{ - AddFunc: controller.handleWorkflow, - UpdateFunc: func(old, new interface{}) { - newWorkflow := new.(*workflowapi.Workflow) - oldWorkflow := old.(*workflowapi.Workflow) - if newWorkflow.ResourceVersion == oldWorkflow.ResourceVersion { - // Periodic resync will send update events for all known Workflows. - // Two different versions of the same WorkflowHistory will always have different RVs. - return - } - controller.handleWorkflow(new) - }, - DeleteFunc: controller.handleWorkflow, - }) - - return controller -} - -// Run will set up the event handlers for types we are interested in, as well -// as syncing informer caches and starting workers. It will block until stopCh -// is closed, at which point it will shutdown the workqueue and wait for -// workers to finish processing their current work items. -func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - defer runtime.HandleCrash() - defer c.workqueue.ShutDown() - - // Start the informer factories to begin populating the informer caches - log.Info("Starting ScheduledWorkflow controller") - - // Wait for the caches to be synced before starting workers - log.Info("Waiting for informer caches to sync") - - if ok := cache.WaitForCacheSync(stopCh, - c.workflowClient.HasSynced(), - c.swfClient.HasSynced()); !ok { - return fmt.Errorf("Failed to wait for caches to sync") - } - - // Launch multiple workers to process ScheduledWorkflows - log.Info("Starting workers") - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - log.Info("Started workers") - - log.Info("Wait for shut down") - <-stopCh - log.Info("Shutting down workers") - - return nil -} - -// runWorker is a long-running function that will continually call the -// processNextWorkItem function in order to read and process a message on the -// workqueue. It enforces that the syncHandler is never invoked concurrently with the same key. -func (c *Controller) runWorker() { - for c.processNextWorkItem() { - } -} - -// enqueueScheduledWorkflow takes a ScheduledWorkflow and converts it into a namespace/name -// string which is then put onto the work queue. This method should *not* be -// passed resources of any type other than ScheduledWorkflow. -func (c *Controller) enqueueScheduledWorkflow(obj interface{}) { - var key string - var err error - if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - runtime.HandleError(fmt.Errorf("Equeuing object: error: %v: %+v", err, obj)) - return - } - c.workqueue.AddRateLimited(key) -} - -func (c *Controller) enqueueScheduledWorkflowForDelete(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - c.workqueue.Add(key) - } -} - -// handleWorkflow will take any resource implementing metav1.Object and attempt -// to find the ScheduledWorkflow that 'owns' it. It does this by looking at the -// objects metadata.ownerReferences field for an appropriate OwnerReference. -// It then enqueues that ScheduledWorkflow to be processed. If the object does not -// have an appropriate OwnerReference, it will simply be skipped. -func (c *Controller) handleWorkflow(obj interface{}) { - var object metav1.Object - var ok bool - if object, ok = obj.(metav1.Object); !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - runtime.HandleError(fmt.Errorf("Error decoding object, invalid type.")) - return - } - object, ok = tombstone.Obj.(metav1.Object) - if !ok { - runtime.HandleError(fmt.Errorf("Error decoding object tombstone, invalid type.")) - return - } - log.WithFields(log.Fields{ - Workflow: object.GetName(), - }).Infof("Recovered deleted object '%s' from tombstone.", object.GetName()) - } - - if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { - // If this object is not owned by a ScheduledWorkflow, we should not do anything more - // with it. - if ownerRef.Kind != swfregister.Kind { - log.WithFields(log.Fields{ - Workflow: object.GetName(), - }).Infof("Processing object (%s): owner is not a Scheduled Workflow.", object.GetName()) - return - } - - swf, err := c.swfClient.Get(object.GetNamespace(), ownerRef.Name) - if err != nil { - log.WithFields(log.Fields{ - Workflow: object.GetName(), - }).Infof("Processing object (%s): ignoring orphaned object of scheduled Workflow (%s).", - object.GetName(), ownerRef.Name) - return - } - - log.WithFields(log.Fields{ - Workflow: object.GetName(), - ScheduledWorkflow: ownerRef.Name, - }).Infof("Processing object (%s): owner is a ScheduledWorkflow (%s).", object.GetName(), - ownerRef.Name) - c.enqueueScheduledWorkflow(swf.Get()) - return - } - log.WithFields(log.Fields{ - Workflow: object.GetName(), - }).Infof("Processing object (%s): object has no owner.", object.GetName()) - return -} - -// processNextWorkItem will read a single work item off the workqueue and -// attempt to process it, by calling the syncHandler. -func (c *Controller) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() - - if shutdown { - return false - } - - // We wrap this block in a func so we can defer c.workqueue.Done. - return func(obj interface{}) bool { - // We call Done here so the workqueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the workqueue and attempted again after a back-off - // period. - defer c.workqueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. - if key, ok = obj.(string); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - c.workqueue.Forget(obj) - runtime.HandleError(fmt.Errorf("Expected string in workqueue but got %#v", obj)) - return true - } - - // Notes on workqueues: - // - when using: workqueue.Forget - // The item is reprocessed after the next SharedInformerFactory defaultResync. - // - when using: workqueue.Forget && workqueue.Add() - // The item is reprocessed immediately. - // This is not recommended as the status changes may not have propagated, leading to - // a (recoverable) versioning error. - // - when using: workqueue.Forget && workqueue.AddAfter(X seconds) - // The item is reprocessed after X seconds. - // It can be re-processes earlier depending on SharedInformerFactory defaultResync. - // Deleting and recreating the resource using kubectl does not trigger early processing. - // - when using: workqueue.Forget && workqueue.AddRateLimited() - // The item is reprocessed after the baseDelay - // - when using: workqueue.AddRateLimited() - // The item is reprocessed folowing the exponential backoff strategy: - // baseDelay * 10^(failure count) - // It is not reprocessed earlier due to SharedInformerFactory defaultResync. - // It is not reprocessed earlier even if the resource is deleted/re-created. - // - when using: workqueue.Add() - // The item is reprocessed immediately (not recommended) - // - when using: workqueue.AddAfter(X seconds) - // The item is reprocessed immediately - // - when using: nothing - // The item is reprocessed using the exponential backoff strategy. - - // Run the syncHandler, passing it the namespace/name string of the - // ScheduledWorkflow to be synced. - syncAgain, retryOnError, swf, err := c.syncHandler(key) - if err != nil && retryOnError { - // Transient failure. We will retry. - c.workqueue.AddRateLimited(obj) // Exponential backoff. - runtime.HandleError(fmt.Errorf("Transient failure: %+v", err)) - if swf != nil { - c.kubeClient.RecordSyncFailure(swf.Get(), - fmt.Sprintf("Transient failure: %v", err.Error())) - } - return true - } else if err != nil && !retryOnError { - // Permanent failure. We won't retry. - // Will resync after the SharedInformerFactory defaultResync delay. - c.workqueue.Forget(obj) - runtime.HandleError(fmt.Errorf("Permanent failure: %+v", err)) - if swf != nil { - c.kubeClient.RecordSyncFailure(swf.Get(), - fmt.Sprintf("Permanent failure: %v", err.Error())) - } - return true - } else if err == nil && !syncAgain { - // Success. - // Will resync after the SharedInformerFactory defaultResync delay. - c.workqueue.Forget(obj) - if swf != nil { - c.kubeClient.RecordSyncSuccess(swf.Get(), "All done") - } - return true - } else { - // Success and sync again soon. - c.workqueue.Forget(obj) - c.workqueue.AddAfter(obj, 10*time.Second) // Need status changes to propagate. - if swf != nil { - c.kubeClient.RecordSyncSuccess(swf.Get(), "Partially done, syncing again soon") - } - return true - } - }(obj) -} - -// syncHandler compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the ScheduledWorkflow -// with the current status of the resource. -func (c *Controller) syncHandler(key string) ( - syncAgain bool, retryOnError bool, swf *util.ScheduledWorkflow, err error) { - - // Convert the namespace/name string into a distinct namespace and name - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - // Permanent failure. - return false, false, nil, - wraperror.Wrapf(err, "Invalid resource key (%s): %v", key, err) - } - - // Get the ScheduledWorkflow with this namespace/name - swf, err = c.swfClient.Get(namespace, name) - if err != nil { - // Permanent failure. - // The ScheduledWorkflow may no longer exist, we stop processing and do not retry. - return false, false, nil, - wraperror.Wrapf(err, "ScheduledWorkflow (%s) in work queue no longer exists: %v", key, err) - } - - // Get the current time - // NOTE: call time.Now() only once per event so that all the functions have a consistent - // number for the current time. - nowEpoch := c.time.Now().Unix() - - // Get active workflows for this ScheduledWorkflow. - active, err := c.workflowClient.List(swf.Name, - false, /* active workflow */ - 0 /* retrieve all workflows */) - if err != nil { - return false, true, swf, - wraperror.Wrapf(err, "Syncing ScheduledWorkflow (%v): transient failure, can't fetch active workflows: %v", name, err) - } - - // Get completed workflows for this ScheduledWorkflow. - completed, err := c.workflowClient.List(swf.Name, - true, /* completed workflows */ - swf.MinIndex()) - if err != nil { - return false, true, swf, - wraperror.Wrapf(err, "Syncing ScheduledWorkflow (%v): transient failure, can't fetch completed workflows: %v", name, err) - } - - workflow, nextScheduledEpoch, err := c.submitNextWorkflowIfNeeded(swf, len(active), nowEpoch) - if err != nil { - return false, true, swf, - wraperror.Wrapf(err, "Syncing ScheduledWorkflow (%v): transient failure, can't fetch completed workflows: %v", name, err) - } - - err = c.updateStatus(swf, workflow, active, completed, nextScheduledEpoch, nowEpoch) - if err != nil { - return false, true, swf, - wraperror.Wrapf(err, "Syncing ScheduledWorkflow (%v): transient failure, can't update swf status: %v", name, err) - } - - if workflow != nil { - // Success. Since we created a new workflow, sync again soon since there might be one more - // resource to create. - log.WithFields(log.Fields{ - ScheduledWorkflow: name, - }).Infof("Syncing ScheduledWorkflow (%v): success, requeuing for further processing.", name) - return true, false, swf, nil - } - - // Success. We did not create any new resource. We can sync again when something changes. - log.WithFields(log.Fields{ - ScheduledWorkflow: name, - }).Infof("Syncing ScheduledWorkflow (%v): success, processing complete.", name) - return false, false, swf, nil -} - -// Submits the next workflow if a workflow is due to execute. Returns the submitted workflow, -// an error (if any), and a boolean indicating (in case of an error) whether handling the -// ScheduledWorkflow should be attempted again at a later time. -func (c *Controller) submitNextWorkflowIfNeeded(swf *util.ScheduledWorkflow, - activeWorkflowCount int, nowEpoch int64) ( - workflow *util.Workflow, nextScheduledEpoch int64, err error) { - // Compute the next scheduled time. - nextScheduledEpoch, shouldRunNow := swf.GetNextScheduledEpoch( - int64(activeWorkflowCount), nowEpoch) - - if !shouldRunNow { - log.WithFields(log.Fields{ - ScheduledWorkflow: swf.Name, - }).Infof("Submitting workflow for ScheduledWorkflow (%v): nothing to submit (next scheduled at: %v)", - swf.Name, util.FormatTimeForLogging(nextScheduledEpoch)) - return nil, nextScheduledEpoch, nil - } - - workflow, err = c.submitNewWorkflowIfNotAlreadySubmitted(swf, nextScheduledEpoch, nowEpoch) - if err != nil { - log.WithFields(log.Fields{ - ScheduledWorkflow: swf.Name, - }).Errorf("Submitting workflow for ScheduledWorkflow (%v): transient error while submitting workflow: %v", - swf.Name, err) - // There was an error submitting a new workflow. - // We should attempt to handle the schedule again at a later time. - return nil, nextScheduledEpoch, err - } - log.WithFields(log.Fields{ - ScheduledWorkflow: swf.Name, - Workflow: workflow.Get().Name, - }).Infof("Submitting workflow for ScheduledWorkflow (%v): workflow (%v) successfully submitted (scheduled at: %v)", - swf.Name, workflow.Get().Name, util.FormatTimeForLogging(nextScheduledEpoch)) - return workflow, nextScheduledEpoch, nil -} - -func (c *Controller) submitNewWorkflowIfNotAlreadySubmitted( - swf *util.ScheduledWorkflow, nextScheduledEpoch int64, nowEpoch int64) ( - *util.Workflow, error) { - - workflowName := swf.NextResourceName() - - // Try to fetch this workflow - // If it already exists, it means that it was already created in a previous iteration - // of this controller but that the controller failed to save this data. - foundWorkflow, isNotFoundError, err := c.workflowClient.Get(swf.Namespace, - workflowName) - if err == nil { - // The workflow was already created by a previous iteration of this controller. - // Nothing to do except returning the information needed by the controller to update - // the ScheduledWorkflow status. - return foundWorkflow, nil - } - - if !isNotFoundError { - // There was an error while attempting to retrieve the workflow - return nil, err - } - - // If the workflow is not found, we need to create it. - newWorkflow := swf.NewWorkflow(nextScheduledEpoch, nowEpoch) - createdWorkflow, err := c.workflowClient.Create(swf.Namespace, newWorkflow) - - if err != nil { - return nil, err - } - return createdWorkflow, nil -} - -func (c *Controller) updateStatus( - swf *util.ScheduledWorkflow, - workflow *util.Workflow, - active []swfapi.WorkflowStatus, - completed []swfapi.WorkflowStatus, - nextScheduledEpoch int64, - nowEpoch int64) error { - // NEVER modify objects from the store. It's a read-only, local cache. - // You can use DeepCopy() to make a deep copy of original object and modify this copy - // Or create a copy manually for better performance - swfCopy := util.NewScheduledWorkflow(swf.Get().DeepCopy()) - swfCopy.UpdateStatus(nowEpoch, workflow, nextScheduledEpoch, active, completed) - - // Until #38113 is merged, we must use Update instead of UpdateStatus to - // update the Status block of the ScheduledWorkflow. UpdateStatus will not - // allow changes to the Spec of the resource, which is ideal for ensuring - // nothing other than resource status has been updated. - return c.swfClient.Update(swf.Namespace, swfCopy) -} diff --git a/resources/scheduledworkflow/main.go b/resources/scheduledworkflow/main.go deleted file mode 100644 index 41579254a8c..00000000000 --- a/resources/scheduledworkflow/main.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "flag" - workflowclientSet "github.com/argoproj/argo/pkg/client/clientset/versioned" - workflowinformers "github.com/argoproj/argo/pkg/client/informers/externalversions" - swfclientset "github.com/kubeflow/pipelines/pkg/client/clientset/versioned" - swfinformers "github.com/kubeflow/pipelines/pkg/client/informers/externalversions" - "github.com/kubeflow/pipelines/pkg/signals" - "github.com/kubeflow/pipelines/resources/scheduledworkflow/util" - log "github.com/sirupsen/logrus" - "k8s.io/client-go/kubernetes" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/clientcmd" - "time" -) - -var ( - masterURL string - kubeconfig string -) - -func main() { - flag.Parse() - - // set up signals so we handle the first shutdown signal gracefully - stopCh := signals.SetupSignalHandler() - - cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) - if err != nil { - log.Fatalf("Error building kubeconfig: %s", err.Error()) - } - - kubeClient, err := kubernetes.NewForConfig(cfg) - if err != nil { - log.Fatalf("Error building kubernetes clientset: %s", err.Error()) - } - - scheduleClient, err := swfclientset.NewForConfig(cfg) - if err != nil { - log.Fatalf("Error building schedule clientset: %s", err.Error()) - } - - workflowClient, err := workflowclientSet.NewForConfig(cfg) - if err != nil { - log.Fatalf("Error building workflow clientset: %s", err.Error()) - } - - scheduleInformerFactory := swfinformers.NewSharedInformerFactory(scheduleClient, time.Second*30) - workflowInformerFactory := workflowinformers.NewSharedInformerFactory(workflowClient, time.Second*30) - - controller := NewController( - kubeClient, - scheduleClient, - workflowClient, - scheduleInformerFactory, - workflowInformerFactory, - util.NewRealTime()) - - go scheduleInformerFactory.Start(stopCh) - go workflowInformerFactory.Start(stopCh) - - if err = controller.Run(2, stopCh); err != nil { - log.Fatalf("Error running controller: %s", err.Error()) - } -} - -func init() { - flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") - flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") -} diff --git a/resources/scheduledworkflow/util/constants.go b/resources/scheduledworkflow/util/constants.go deleted file mode 100644 index a927f10f0ce..00000000000 --- a/resources/scheduledworkflow/util/constants.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - constants "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow" -) - -const ( - // ControllerAgentName is the name of the controller. - ControllerAgentName = "scheduled-workflow-controller" - - // LabelKeyWorkflowEpoch is a label on a Workflow. - // It captures the epoch at which the workflow was scheduled. - LabelKeyWorkflowEpoch = constants.FullName + "/workflowEpoch" - // LabelKeyWorkflowIndex is a label on a Workflow. - // It captures the index of creation the workflow by the ScheduledWorkflow. - LabelKeyWorkflowIndex = constants.FullName + "/workflowIndex" - // LabelKeyWorkflowIsOwnedByScheduledWorkflow is a label on a Workflow. - // It captures whether the workflow is owned by a ScheduledWorkflow. - LabelKeyWorkflowIsOwnedByScheduledWorkflow = constants.FullName + "/isOwnedByScheduledWorkflow" - // LabelKeyWorkflowScheduledWorkflowName is a label on a Workflow. - // It captures whether the name of the owning ScheduledWorkflow. - LabelKeyWorkflowScheduledWorkflowName = constants.FullName + "/scheduledWorkflowName" - - // LabelKeyScheduledWorkflowEnabled is a label on a ScheduledWorkflow. - // It captures whether the ScheduledWorkflow is enabled. - LabelKeyScheduledWorkflowEnabled = constants.FullName + "/enabled" - // LabelKeyScheduledWorkflowStatus is a label on a ScheduledWorkflow. - // It captures the status of the scheduled workflow. - LabelKeyScheduledWorkflowStatus = constants.FullName + "/status" -) diff --git a/resources/scheduledworkflow/util/convert.go b/resources/scheduledworkflow/util/convert.go deleted file mode 100644 index 0f5f98c0dc7..00000000000 --- a/resources/scheduledworkflow/util/convert.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StringPointer converts a string to a string pointer. -func StringPointer(s string) *string { - return &s -} - -// BooleanPointer converts a bool to a bool pointer. -func BooleanPointer(b bool) *bool { - return &b -} - -// Metav1TimePointer converts a metav1.Time to a pointer. -func Metav1TimePointer(t metav1.Time) *metav1.Time { - return &t -} - -// Int64Pointer converts an int64 to a pointer. -func Int64Pointer(i int64) *int64 { - return &i -} - -func toInt64Pointer(t *metav1.Time) *int64 { - if t == nil { - return nil - } else { - return Int64Pointer(t.Unix()) - } -} diff --git a/resources/scheduledworkflow/util/cron_schedule.go b/resources/scheduledworkflow/util/cron_schedule.go deleted file mode 100644 index 9bd43be1d1b..00000000000 --- a/resources/scheduledworkflow/util/cron_schedule.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - wraperror "github.com/pkg/errors" - "github.com/robfig/cron" - log "github.com/sirupsen/logrus" - "math" - "time" -) - -// CronSchedule is a type to help manipulate CronSchedule objects. -type CronSchedule struct { - *swfapi.CronSchedule -} - -// NewCronSchedule creates a CronSchedule. -func NewCronSchedule(cronSchedule *swfapi.CronSchedule) *CronSchedule { - if cronSchedule == nil { - log.Fatalf("The cronSchedule should never be nil") - } - - return &CronSchedule{ - cronSchedule, - } -} - -// GetNextScheduledEpoch returns the next epoch at which a workflow must be -// scheduled. -func (s *CronSchedule) GetNextScheduledEpoch(lastJobEpoch *int64, - defaultStartEpoch int64) int64 { - effectiveLastJobEpoch := defaultStartEpoch - if lastJobEpoch != nil { - effectiveLastJobEpoch = *lastJobEpoch - } else if s.StartTime != nil { - effectiveLastJobEpoch = s.StartTime.Unix() - } - return s.getNextScheduledEpoch(effectiveLastJobEpoch) -} - -func (s *CronSchedule) getNextScheduledEpoch(lastJobEpoch int64) int64 { - schedule, err := cron.Parse(s.Cron) - if err != nil { - // This should never happen, validation should have caught this at resource creation. - log.Errorf("%+v", wraperror.Errorf( - "Found invalid schedule (%v): %v", s.Cron, err)) - return math.MaxInt64 - } - - startEpoch := lastJobEpoch - if s.StartTime != nil && s.StartTime.Unix() > startEpoch { - startEpoch = s.StartTime.Unix() - } - result := schedule.Next(time.Unix(startEpoch, 0).UTC()).Unix() - - if s.EndTime != nil && - s.EndTime.Unix() < result { - return math.MaxInt64 - } - - return result -} diff --git a/resources/scheduledworkflow/util/cron_schedule_test.go b/resources/scheduledworkflow/util/cron_schedule_test.go deleted file mode 100644 index 7e7b107b969..00000000000 --- a/resources/scheduledworkflow/util/cron_schedule_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "math" - "testing" - "time" -) - -const ( - second = 1 - minute = 60 * second - hour = 60 * minute -) - -func TestCronSchedule_getNextScheduledEpoch_Cron_StartDate_EndDate(t *testing.T) { - // First job. - schedule := NewCronSchedule(&swfapi.CronSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - Cron: "0 * * * * * ", - }) - lastJobEpoch := int64(0) - assert.Equal(t, int64(10*hour+minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) - - // Not the first job. - lastJobEpoch = int64(10*hour + 5*minute) - assert.Equal(t, int64(10*hour+6*minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) - - // Last job - lastJobEpoch = int64(13 * hour) - assert.Equal(t, int64(math.MaxInt64), - schedule.getNextScheduledEpoch(lastJobEpoch)) - -} - -func TestCronSchedule_getNextScheduledEpoch_CronOnly(t *testing.T) { - schedule := NewCronSchedule(&swfapi.CronSchedule{ - Cron: "0 * * * * * ", - }) - lastJobEpoch := int64(10 * hour) - assert.Equal(t, int64(10*hour+minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) -} - -func TestCronSchedule_getNextScheduledEpoch_NoCron(t *testing.T) { - schedule := NewCronSchedule(&swfapi.CronSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - Cron: "", - }) - lastJobEpoch := int64(0) - assert.Equal(t, int64(math.MaxInt64), - schedule.getNextScheduledEpoch(lastJobEpoch)) -} - -func TestCronSchedule_getNextScheduledEpoch_InvalidCron(t *testing.T) { - schedule := NewCronSchedule(&swfapi.CronSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - Cron: "*$&%*(W&", - }) - lastJobEpoch := int64(0) - assert.Equal(t, int64(math.MaxInt64), - schedule.getNextScheduledEpoch(lastJobEpoch)) -} - -func TestCronSchedule_GetNextScheduledEpoch(t *testing.T) { - // There was a previous job. - schedule := NewCronSchedule(&swfapi.CronSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour+10*minute, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - Cron: "0 * * * * * ", - }) - lastJobEpoch := int64(10*hour + 20*minute) - defaultStartEpoch := int64(10*hour + 15*minute) - assert.Equal(t, int64(10*hour+20*minute+minute), - schedule.GetNextScheduledEpoch(&lastJobEpoch, defaultStartEpoch)) - - // There is no previous job, falling back on the start date of the schedule. - assert.Equal(t, int64(10*hour+10*minute+minute), - schedule.GetNextScheduledEpoch(nil, defaultStartEpoch)) - - // There is no previous job, no schedule start date, falling back on the - // creation date of the workflow. - schedule = NewCronSchedule(&swfapi.CronSchedule{ - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - Cron: "0 * * * * * ", - }) - assert.Equal(t, int64(10*hour+15*minute+minute), - schedule.GetNextScheduledEpoch(nil, defaultStartEpoch)) -} diff --git a/resources/scheduledworkflow/util/error.go b/resources/scheduledworkflow/util/error.go deleted file mode 100644 index ddd2f1ed4c0..00000000000 --- a/resources/scheduledworkflow/util/error.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// IsNotFound returns whether an error indicates that a resource was "not found". -func IsNotFound(err error) bool { - return reasonForError(err) == metav1.StatusReasonNotFound -} - -// ReasonForError returns the HTTP status for a particular error. -func reasonForError(err error) metav1.StatusReason { - switch t := err.(type) { - case errors.APIStatus: - return t.Status().Reason - case *errors.StatusError: - return t.Status().Reason - } - return metav1.StatusReasonUnknown -} diff --git a/resources/scheduledworkflow/util/error_test.go b/resources/scheduledworkflow/util/error_test.go deleted file mode 100644 index 02d0f2d0418..00000000000 --- a/resources/scheduledworkflow/util/error_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "testing" -) - -func TestIsNotFound(t *testing.T) { - assert.Equal(t, true, IsNotFound(errors.NewNotFound(schema.GroupResource{}, "NAME"))) - assert.Equal(t, false, IsNotFound(errors.NewAlreadyExists(schema.GroupResource{}, "NAME"))) -} diff --git a/resources/scheduledworkflow/util/label.go b/resources/scheduledworkflow/util/label.go deleted file mode 100644 index 0eab4581e89..00000000000 --- a/resources/scheduledworkflow/util/label.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "fmt" - "github.com/argoproj/argo/workflow/common" - log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "strconv" -) - -// GetRequirementForCompletedWorkflowOrFatal returns a label requirement indicating -// whether a workflow is completed. -func GetRequirementForCompletedWorkflowOrFatal(completed bool) *labels.Requirement { - operator := selection.NotEquals - if completed == true { - operator = selection.Equals - } - req, err := labels.NewRequirement(common.LabelKeyCompleted, operator, - []string{"true"}) - if err != nil { - log.Fatalf("Error while creating requirement: %s", err) - } - return req -} - -// GetRequirementForScheduleNameOrFatal returns a label requirement for a specific -// ScheduledWorkflow name. -func GetRequirementForScheduleNameOrFatal(swf string) *labels.Requirement { - req, err := labels.NewRequirement(LabelKeyWorkflowScheduledWorkflowName, selection.Equals, []string{swf}) - if err != nil { - log.Fatalf("Error while creating requirement: %s", err) - } - return req -} - -// GetRequirementForScheduleNameOrFatal returns a label requirement for a minimum -// index of creation of a workflow (to avoid querying the whole list). -func GetRequirementForMinIndexOrFatal(minIndex int64) *labels.Requirement { - req, err := labels.NewRequirement(LabelKeyWorkflowIndex, selection.GreaterThan, - []string{formatInt64ForLabel(minIndex)}) - if err != nil { - log.Fatalf("Error while creating requirement: %s", err) - } - return req -} - -func formatInt64ForLabel(epoch int64) string { - return fmt.Sprintf("%d", epoch) -} - -// RetrieveInt64FromLabel converts a string label value into an epoch. -func RetrieveInt64FromLabel(epoch string) (int64, error) { - return strconv.ParseInt(epoch, 10, 64) -} diff --git a/resources/scheduledworkflow/util/label_test.go b/resources/scheduledworkflow/util/label_test.go deleted file mode 100644 index e744dbceb7e..00000000000 --- a/resources/scheduledworkflow/util/label_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestFormatInt64ForLabel(t *testing.T) { - assert.Equal(t, "100", formatInt64ForLabel(100)) -} - -func TestRetrieveInt64FromLabel(t *testing.T) { - result, err := RetrieveInt64FromLabel("100") - assert.Nil(t, err) - assert.Equal(t, int64(100), result) -} diff --git a/resources/scheduledworkflow/util/parameter_formatter.go b/resources/scheduledworkflow/util/parameter_formatter.go deleted file mode 100644 index 87514549467..00000000000 --- a/resources/scheduledworkflow/util/parameter_formatter.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "fmt" - "regexp" - "strings" - "time" -) - -const ( - scheduledTimeExpression = "[[ScheduledTime]]" - currentTimeExpression = "[[CurrentTime]]" - IndexExpression = "[[Index]]" - scheduledTimePrefix = "[[ScheduledTime." - currentTimePrefix = "[[CurrentTime." - defaultTimeFormat = "20060102150405" - suffix = "]]" -) - -// ParameterFormatter is an object that substitutes specific strings -// in workflow parameters by information about the workflow execution (time at -// which the workflow was started, time at which the workflow was scheduled, etc.) -type ParameterFormatter struct { - scheduledEpoch int64 - nowEpoch int64 - index int64 -} - -// NewParameterFormatter returns a new ParameterFormatter. -func NewParameterFormatter(scheduledEpoch int64, nowEpoch int64, - index int64) *ParameterFormatter { - return &ParameterFormatter{ - scheduledEpoch: scheduledEpoch, - nowEpoch: nowEpoch, - index: index, - } -} - -// Format substitutes special strings in the provided string. -func (p *ParameterFormatter) Format(s string) string { - re := regexp.MustCompile(`\[\[(.*?)\]\]`) - matches := re.FindAllString(s, -1) - if matches == nil { - return s - } - - result := s - - for _, match := range matches { - substitute := p.createSubtitute(match) - result = strings.Replace(result, match, substitute, 1) - } - - return result -} - -func (p *ParameterFormatter) createSubtitute(match string) string { - - if strings.HasPrefix(match, scheduledTimeExpression) { - return time.Unix(p.scheduledEpoch, 0).UTC().Format(defaultTimeFormat) - } else if strings.HasPrefix(match, currentTimeExpression) { - return time.Unix(p.nowEpoch, 0).UTC().Format(defaultTimeFormat) - } else if strings.HasPrefix(match, IndexExpression) { - return fmt.Sprintf("%v", p.index) - } else if strings.HasPrefix(match, scheduledTimePrefix) { - match = strings.Replace(match, scheduledTimePrefix, "", 1) - match = strings.Replace(match, suffix, "", 1) - return time.Unix(p.scheduledEpoch, 0).UTC().Format(match) - } else if strings.HasPrefix(match, currentTimePrefix) { - match = strings.Replace(match, currentTimePrefix, "", 1) - match = strings.Replace(match, suffix, "", 1) - return time.Unix(p.nowEpoch, 0).UTC().Format(match) - } else { - return match - } -} diff --git a/resources/scheduledworkflow/util/parameter_formatter_test.go b/resources/scheduledworkflow/util/parameter_formatter_test.go deleted file mode 100644 index 3e0b5969722..00000000000 --- a/resources/scheduledworkflow/util/parameter_formatter_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestParameterFormatter_Format(t *testing.T) { - formatter := NewParameterFormatter( - 25, /* scheduled time */ - 26, /* current time */ - 27 /* index */) - - // Test [[ScheduledTime]] substitution - assert.Equal(t, "FOO 19700101000025 FOO", formatter.Format("FOO [[ScheduledTime]] FOO")) - - // Test [[CurrentTime]] substitution - assert.Equal(t, "FOO 19700101000026 FOO", formatter.Format("FOO [[CurrentTime]] FOO")) - - // Test [[Index]] - assert.Equal(t, "FOO 27 FOO", formatter.Format("FOO [[Index]] FOO")) - - // Test [[ScheduledTime.15-04-05]] substition - assert.Equal(t, "FOO 00-00-25 FOO", formatter.Format("FOO [[ScheduledTime.15-04-05]] FOO")) - - // Test [[CurrentTime.15-04-05]] substitution - assert.Equal(t, "FOO 00-00-26 FOO", formatter.Format("FOO [[CurrentTime.15-04-05]] FOO")) - - // Test multiple substitution - assert.Equal(t, "19700101000025 19700101000025 27", formatter.Format("[[ScheduledTime]] [[ScheduledTime]] [[Index]]")) - - // Test no substitution - assert.Equal(t, "FOO FOO FOO", formatter.Format("FOO FOO FOO")) - - // Test empty string - assert.Equal(t, "", formatter.Format("")) -} diff --git a/resources/scheduledworkflow/util/periodic_schedule.go b/resources/scheduledworkflow/util/periodic_schedule.go deleted file mode 100644 index 13b99f3423e..00000000000 --- a/resources/scheduledworkflow/util/periodic_schedule.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - log "github.com/sirupsen/logrus" - "math" -) - -// PeriodicSchedule is a type to help manipulate PeriodicSchedule objects. -type PeriodicSchedule struct { - *swfapi.PeriodicSchedule -} - -// NewPeriodicSchedule creates a new PeriodicSchedule. -func NewPeriodicSchedule(periodicSchedule *swfapi.PeriodicSchedule) *PeriodicSchedule { - if periodicSchedule == nil { - log.Fatalf("The periodicSchedule should never be nil") - } - - return &PeriodicSchedule{ - periodicSchedule, - } -} - -// GetNextScheduledEpoch returns the next epoch at which a workflow should be -// scheduled. -func (s *PeriodicSchedule) GetNextScheduledEpoch(lastJobEpoch *int64, - defaultStartEpoch int64) int64 { - effectiveLastJobEpoch := defaultStartEpoch - if lastJobEpoch != nil { - effectiveLastJobEpoch = *lastJobEpoch - } else if s.StartTime != nil { - effectiveLastJobEpoch = s.StartTime.Unix() - } - return s.getNextScheduledEpoch(effectiveLastJobEpoch) -} - -func (s *PeriodicSchedule) getNextScheduledEpoch(lastJobEpoch int64) int64 { - startEpoch := lastJobEpoch - if s.StartTime != nil && s.StartTime.Unix() > startEpoch { - startEpoch = s.StartTime.Unix() - } - - interval := s.IntervalSecond - if interval == 0 { - interval = 1 - } - - result := startEpoch + interval - - if s.EndTime != nil && - s.EndTime.Unix() < result { - return math.MaxInt64 - } - - return result -} diff --git a/resources/scheduledworkflow/util/periodic_schedule_test.go b/resources/scheduledworkflow/util/periodic_schedule_test.go deleted file mode 100644 index 4399616b514..00000000000 --- a/resources/scheduledworkflow/util/periodic_schedule_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "math" - "testing" - "time" -) - -func TestPeriodicSchedule_getNextScheduledEpoch_StartDate_EndDate(t *testing.T) { - // First job. - schedule := NewPeriodicSchedule(&swfapi.PeriodicSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - IntervalSecond: minute, - }) - lastJobEpoch := int64(0) - assert.Equal(t, int64(10*hour+minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) - - // Not the first job. - lastJobEpoch = int64(10*hour + 5*minute) - assert.Equal(t, int64(10*hour+6*minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) - - // Last job - lastJobEpoch = int64(13 * hour) - assert.Equal(t, int64(math.MaxInt64), - schedule.getNextScheduledEpoch(lastJobEpoch)) - -} - -func TestPeriodicSchedule_getNextScheduledEpoch_PeriodOnly(t *testing.T) { - schedule := NewPeriodicSchedule(&swfapi.PeriodicSchedule{ - IntervalSecond: minute, - }) - lastJobEpoch := int64(10 * hour) - assert.Equal(t, int64(10*hour+minute), - schedule.getNextScheduledEpoch(lastJobEpoch)) -} - -func TestPeriodicSchedule_getNextScheduledEpoch_NoPeriod(t *testing.T) { - schedule := NewPeriodicSchedule(&swfapi.PeriodicSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - IntervalSecond: 0, - }) - lastJobEpoch := int64(10 * hour) - assert.Equal(t, int64(10*hour+second), - schedule.getNextScheduledEpoch(lastJobEpoch)) -} - -func TestPeriodicSchedule_GetNextScheduledEpoch(t *testing.T) { - // There was a previous job. - schedule := NewPeriodicSchedule(&swfapi.PeriodicSchedule{ - StartTime: Metav1TimePointer(v1.NewTime(time.Unix(10*hour+10*minute, 0).UTC())), - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - IntervalSecond: 60, - }) - lastJobEpoch := int64(10*hour + 20*minute) - defaultStartEpoch := int64(10*hour + 15*minute) - assert.Equal(t, int64(10*hour+20*minute+minute), - schedule.GetNextScheduledEpoch(&lastJobEpoch, defaultStartEpoch)) - - // There is no previous job, falling back on the start date of the schedule. - assert.Equal(t, int64(10*hour+10*minute+minute), - schedule.GetNextScheduledEpoch(nil, defaultStartEpoch)) - - // There is no previous job, no schedule start date, falling back on the - // creation date of the workflow. - schedule = NewPeriodicSchedule(&swfapi.PeriodicSchedule{ - EndTime: Metav1TimePointer(v1.NewTime(time.Unix(11*hour, 0).UTC())), - IntervalSecond: 60, - }) - assert.Equal(t, int64(10*hour+15*minute+minute), - schedule.GetNextScheduledEpoch(nil, defaultStartEpoch)) -} diff --git a/resources/scheduledworkflow/util/scheduled_workflow.go b/resources/scheduledworkflow/util/scheduled_workflow.go deleted file mode 100644 index ebd97069766..00000000000 --- a/resources/scheduledworkflow/util/scheduled_workflow.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "fmt" - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "hash/fnv" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/core" - "math" - "sort" - "strconv" - "time" -) - -const ( - defaultMaxConcurrency = int64(1) - minMaxConcurrency = int64(1) - maxMaxConcurrency = int64(10) - defaultMaxHistory = int64(10) - minMaxHistory = int64(0) - maxMaxHistory = int64(100) -) - -// ScheduledWorkflow is a type to help manipulate ScheduledWorkflow objects. -type ScheduledWorkflow struct { - *swfapi.ScheduledWorkflow -} - -// NewScheduledWorkflow creates an instance of ScheduledWorkflow. -func NewScheduledWorkflow(swf *swfapi.ScheduledWorkflow) *ScheduledWorkflow { - return &ScheduledWorkflow{ - swf, - } -} - -// Get converts this object to a swfapi.ScheduledWorkflow. -func (s *ScheduledWorkflow) Get() *swfapi.ScheduledWorkflow { - return s.ScheduledWorkflow -} - -func (s *ScheduledWorkflow) creationEpoch() int64 { - return s.CreationTimestamp.Unix() -} - -func (s *ScheduledWorkflow) enabled() bool { - return s.Spec.Enabled -} - -func (s *ScheduledWorkflow) maxConcurrency() int64 { - if s.Spec.MaxConcurrency == nil { - return defaultMaxConcurrency - } - - if *s.Spec.MaxConcurrency < minMaxConcurrency { - return minMaxConcurrency - } - - if *s.Spec.MaxConcurrency > maxMaxConcurrency { - return maxMaxConcurrency - } - - return *s.Spec.MaxConcurrency -} - -func (s *ScheduledWorkflow) maxHistory() int64 { - if s.Spec.MaxHistory == nil { - return defaultMaxHistory - } - - if *s.Spec.MaxHistory < minMaxHistory { - return minMaxHistory - } - - if *s.Spec.MaxHistory > maxMaxHistory { - return maxMaxHistory - } - - return *s.Spec.MaxHistory -} - -func (s *ScheduledWorkflow) hasRunAtLeastOnce() bool { - return s.Status.Trigger.LastTriggeredTime != nil -} - -func (s *ScheduledWorkflow) lastIndex() int64 { - if s.Status.Trigger.LastIndex == nil { - return 0 - } else { - return *s.Status.Trigger.LastIndex - } -} - -func (s *ScheduledWorkflow) nextIndex() int64 { - return s.lastIndex() + 1 -} - -// MinIndex returns the minimum index of the workflow to retrieve as part of the workflow -// history. -func (s *ScheduledWorkflow) MinIndex() int64 { - result := s.lastIndex() - s.maxHistory() - if result < 0 { - return 0 - } - return result -} - -func (s *ScheduledWorkflow) isOneOffRun() bool { - return s.Spec.Trigger.CronSchedule == nil && - s.Spec.Trigger.PeriodicSchedule == nil -} - -func (s *ScheduledWorkflow) nextResourceID() string { - return s.Name + "-" + strconv.FormatInt(s.nextIndex(), 10) -} - -// NextResourceName creates a deterministic resource name for the next resource. -func (s *ScheduledWorkflow) NextResourceName() string { - nextResourceID := s.nextResourceID() - h := fnv.New32a() - _, _ = h.Write([]byte(nextResourceID)) - return fmt.Sprintf("%s-%v", nextResourceID, h.Sum32()) -} - -func (s *ScheduledWorkflow) getWorkflowParametersAsMap() map[string]string { - resultAsArray := s.Spec.Workflow.Parameters - resultAsMap := make(map[string]string) - for _, param := range resultAsArray { - resultAsMap[param.Name] = param.Value - } - return resultAsMap -} - -func (s *ScheduledWorkflow) getFormattedWorkflowParametersAsMap( - formatter *ParameterFormatter) map[string]string { - - result := make(map[string]string) - for key, value := range s.getWorkflowParametersAsMap() { - formatted := formatter.Format(value) - result[key] = formatted - } - return result -} - -// NewWorkflow creates a workflow for this schedule. It also sets -// the appropriate OwnerReferences on the resource so handleObject can discover -// the Schedule resource that 'owns' it. -func (s *ScheduledWorkflow) NewWorkflow( - nextScheduledEpoch int64, nowEpoch int64) *Workflow { - - const ( - workflowKind = "Workflow" - workflowApiVersion = "argoproj.io/v1alpha1" - ) - - // Creating the workflow. - workflow := &workflowapi.Workflow{ - Spec: *s.Spec.Workflow.Spec.DeepCopy(), - } - workflow.Kind = workflowKind - workflow.APIVersion = workflowApiVersion - result := NewWorkflow(workflow) - - // Set the name of the worfklow. - result.OverrideName(s.NextResourceName()) - - // Get the workflow parameters and format them. - formatter := NewParameterFormatter(nextScheduledEpoch, nowEpoch, s.nextIndex()) - formattedParams := s.getFormattedWorkflowParametersAsMap(formatter) - - // Set the parameters. - result.OverrideParameters(formattedParams) - - // Set the labels. - result.SetCanonicalLabels(s.Name, nextScheduledEpoch, s.nextIndex()) - - // The the owner references. - result.SetOwnerReferences(s.ScheduledWorkflow) - - return result -} - -// GetNextScheduledEpoch returns the next epoch at which a workflow should be scheduled, -// and whether it should be run now. -func (s *ScheduledWorkflow) GetNextScheduledEpoch(activeWorkflowCount int64, nowEpoch int64) ( - nextScheduleEpoch int64, shouldRunNow bool) { - - // Get the next scheduled time. - nextScheduledEpoch := s.getNextScheduledEpoch() - - // If the schedule is not enabled, we should not schedule the workflow now. - if s.enabled() == false { - return nextScheduledEpoch, false - } - - // If the maxConcurrency is exceeded, return. - if activeWorkflowCount >= s.maxConcurrency() { - return nextScheduledEpoch, false - } - - // If it is not yet time to schedule the next workflow... - if nextScheduledEpoch > nowEpoch { - return nextScheduledEpoch, false - } - - return nextScheduledEpoch, true -} - -func (s *ScheduledWorkflow) getNextScheduledEpoch() int64 { - // Periodic schedule - if s.Spec.Trigger.PeriodicSchedule != nil { - return NewPeriodicSchedule(s.Spec.Trigger.PeriodicSchedule). - GetNextScheduledEpoch( - toInt64Pointer(s.Status.Trigger.LastTriggeredTime), - s.creationEpoch()) - } - - // Cron schedule - if s.Spec.Trigger.CronSchedule != nil { - return NewCronSchedule(s.Spec.Trigger.CronSchedule). - GetNextScheduledEpoch( - toInt64Pointer(s.Status.Trigger.LastTriggeredTime), - s.creationEpoch()) - } - - return s.getNextScheduledEpochForOneTimeRun() -} - -func (s *ScheduledWorkflow) getNextScheduledEpochForOneTimeRun() int64 { - if s.Status.Trigger.LastTriggeredTime != nil { - return math.MaxInt64 - } - - return s.creationEpoch() -} - -func (s *ScheduledWorkflow) setLabel(key string, value string) { - if s.Labels == nil { - s.Labels = make(map[string]string) - } - s.Labels[key] = value -} - -// UpdateStatus updates the status of a workflow in the Kubernetes API server. -func (s *ScheduledWorkflow) UpdateStatus(updatedEpoch int64, workflow *Workflow, - scheduledEpoch int64, active []swfapi.WorkflowStatus, - completed []swfapi.WorkflowStatus) { - - updatedTime := metav1.NewTime(time.Unix(updatedEpoch, 0).UTC()) - - conditionType, status, message := s.getStatusAndMessage(len(active)) - - condition := swfapi.ScheduledWorkflowCondition{ - Type: conditionType, - Status: status, - LastProbeTime: updatedTime, - LastTransitionTime: updatedTime, - Reason: string(conditionType), - Message: message, - } - - conditions := make([]swfapi.ScheduledWorkflowCondition, 0) - conditions = append(conditions, condition) - - s.Status.Conditions = conditions - - // Sort and set inactive workflows. - sort.Slice(active, func(i, j int) bool { - return active[i].ScheduledAt.Unix() > active[j].ScheduledAt.Unix() - }) - - sort.Slice(completed, func(i, j int) bool { - return completed[i].ScheduledAt.Unix() > completed[j].ScheduledAt.Unix() - }) - - s.Status.WorkflowHistory = &swfapi.WorkflowHistory{ - Active: active, - Completed: completed, - } - - s.setLabel(LabelKeyScheduledWorkflowEnabled, strconv.FormatBool( - s.enabled())) - s.setLabel(LabelKeyScheduledWorkflowStatus, string(conditionType)) - - if workflow != nil { - s.updateLastTriggeredTime(scheduledEpoch) - s.Status.Trigger.LastIndex = Int64Pointer(s.nextIndex()) - s.updateNextTriggeredTime(s.getNextScheduledEpoch()) - } else { - // LastTriggeredTime is unchanged. - s.updateNextTriggeredTime(scheduledEpoch) - // LastIndex is unchanged - } -} - -func (s *ScheduledWorkflow) updateLastTriggeredTime(epoch int64) { - s.Status.Trigger.LastTriggeredTime = Metav1TimePointer( - metav1.NewTime(time.Unix(epoch, 0).UTC())) -} - -func (s *ScheduledWorkflow) updateNextTriggeredTime(epoch int64) { - if epoch != math.MaxInt64 { - s.Status.Trigger.NextTriggeredTime = Metav1TimePointer( - metav1.NewTime(time.Unix(epoch, 0).UTC())) - } else { - s.Status.Trigger.NextTriggeredTime = nil - } -} - -func (s *ScheduledWorkflow) getStatusAndMessage(activeCount int) ( - conditionType swfapi.ScheduledWorkflowConditionType, - status core.ConditionStatus, message string) { - // Schedule messages - const ( - ScheduleEnabledMessage = "The schedule is enabled." - ScheduleDisabledMessage = "The schedule is disabled." - ScheduleRunningMessage = "The one-off schedule is running." - ScheduleSucceededMessage = "The one-off schedule has succeeded." - ) - - if s.isOneOffRun() { - if s.hasRunAtLeastOnce() && activeCount == 0 { - return swfapi.ScheduledWorkflowSucceeded, core.ConditionTrue, ScheduleSucceededMessage - } else { - return swfapi.ScheduledWorkflowRunning, core.ConditionTrue, ScheduleRunningMessage - } - } else { - if s.enabled() { - return swfapi.ScheduledWorkflowEnabled, core.ConditionTrue, ScheduleEnabledMessage - } else { - return swfapi.ScheduledWorkflowDisabled, core.ConditionTrue, ScheduleDisabledMessage - } - } -} diff --git a/resources/scheduledworkflow/util/scheduled_workflow_test.go b/resources/scheduledworkflow/util/scheduled_workflow_test.go deleted file mode 100644 index e41a4111268..00000000000 --- a/resources/scheduledworkflow/util/scheduled_workflow_test.go +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/core" - "math" - "strconv" - "testing" - "time" -) - -func TestScheduledWorkflow_maxConcurrency(t *testing.T) { - // nil - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{}) - assert.Equal(t, int64(1), schedule.maxConcurrency()) - - // lower than min - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxConcurrency: Int64Pointer(0), - }, - }) - assert.Equal(t, int64(1), schedule.maxConcurrency()) - - // higher than max - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxConcurrency: Int64Pointer(2000000), - }, - }) - assert.Equal(t, int64(10), schedule.maxConcurrency()) -} - -func TestScheduledWorkflow_maxHistory(t *testing.T) { - // nil - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{}) - assert.Equal(t, int64(10), schedule.maxHistory()) - - // lower than min - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxHistory: Int64Pointer(0), - }, - }) - assert.Equal(t, int64(0), schedule.maxHistory()) - - // higher than max - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxHistory: Int64Pointer(2000000), - }, - }) - assert.Equal(t, int64(100), schedule.maxHistory()) -} - -func TestScheduledWorkflow_hasRunAtLeastOnce(t *testing.T) { - // Never ran a workflow - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastTriggeredTime: nil, - }, - }, - }) - assert.Equal(t, false, schedule.hasRunAtLeastOnce()) - - // Ran one workflow - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastTriggeredTime: Metav1TimePointer(metav1.NewTime(time.Unix(50, 0).UTC())), - }, - }, - }) - assert.Equal(t, true, schedule.hasRunAtLeastOnce()) -} - -func TestScheduledWorkflow_lastIndex(t *testing.T) { - // Never ran a workflow - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{}) - assert.Equal(t, int64(0), schedule.lastIndex()) - - // Ran one workflow - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, int64(50), schedule.lastIndex()) -} - -func TestScheduledWorkflow_nextIndex(t *testing.T) { - // Never ran a workflow - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{}) - assert.Equal(t, int64(1), schedule.nextIndex()) - - // Ran one workflow - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, int64(51), schedule.nextIndex()) -} - -func TestScheduledWorkflow_MinIndex(t *testing.T) { - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxHistory: Int64Pointer(100), - }, - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, int64(0), schedule.MinIndex()) - - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - MaxHistory: Int64Pointer(20), - }, - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, int64(30), schedule.MinIndex()) -} - -func TestScheduledWorkflow_isOneOffRun(t *testing.T) { - // No schedule - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{}) - assert.Equal(t, true, schedule.isOneOffRun()) - - // Cron schedule - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - Trigger: swfapi.Trigger{ - CronSchedule: &swfapi.CronSchedule{}, - }, - }, - }) - assert.Equal(t, false, schedule.isOneOffRun()) - - // Periodic schedule - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - Spec: swfapi.ScheduledWorkflowSpec{ - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{}, - }, - }, - }) - assert.Equal(t, false, schedule.isOneOffRun()) -} - -func TestScheduledWorkflow_nextResourceID(t *testing.T) { - // No schedule - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, "WORKFLOW_NAME-51", schedule.nextResourceID()) -} - -func TestScheduledWorkflow_NextResourceName(t *testing.T) { - // No schedule - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastIndex: Int64Pointer(50), - }, - }, - }) - assert.Equal(t, "WORKFLOW_NAME-51-2626342551", schedule.NextResourceName()) -} - -func TestScheduledWorkflow_GetNextScheduledEpoch_OneTimeRun(t *testing.T) { - - // Must run now - nowEpoch := int64(10 * hour) - pastEpoch := int64(1 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - lastTimeRun := metav1.NewTime(time.Unix(11*hour, 0).UTC()) - never := int64(math.MaxInt64) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - }, - }) - nextScheduledEpoch, mustRunNow := schedule.GetNextScheduledEpoch( - int64(0) /* active workflow count */, nowEpoch) - assert.Equal(t, true, mustRunNow) - assert.Equal(t, creationTimestamp.Unix(), nextScheduledEpoch) - - // Has already run - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - }, - Status: swfapi.ScheduledWorkflowStatus{ - Trigger: swfapi.TriggerStatus{ - LastTriggeredTime: &lastTimeRun, - }, - }, - }) - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(0) /* active workflow count */, nowEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, never, nextScheduledEpoch) - - // Should not run yet because it is not time - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - }, - }) - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(0) /* active workflow count */, pastEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, creationTimestamp.Unix(), nextScheduledEpoch) - - // Should not run because the schedule is disabled - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: false, - }, - }) - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(0) /* active workflow count */, nowEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, creationTimestamp.Unix(), nextScheduledEpoch) - - // Should not run because there are active workflows - schedule = NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - }, - }) - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(1) /* active workflow count */, nowEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, creationTimestamp.Unix(), nextScheduledEpoch) -} - -func TestScheduledWorkflow_GetNextScheduledEpoch_CronSchedule(t *testing.T) { - - // Must run now - nowEpoch := int64(10 * hour) - pastEpoch := int64(3 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - CronSchedule: &swfapi.CronSchedule{ - Cron: "0 * * * * *", - }, - }, - }, - }) - nextScheduledEpoch, mustRunNow := schedule.GetNextScheduledEpoch( - int64(9) /* active workflow count */, nowEpoch) - assert.Equal(t, true, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) - - // Must run later - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(9) /* active workflow count */, pastEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) - - // Cannot run because of concurrency - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(10) /* active workflow count */, nowEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) -} - -func TestScheduledWorkflow_GetNextScheduledEpoch_PeriodicSchedule(t *testing.T) { - - // Must run now - nowEpoch := int64(10 * hour) - pastEpoch := int64(3 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - }, - }) - nextScheduledEpoch, mustRunNow := schedule.GetNextScheduledEpoch( - int64(9) /* active workflow count */, nowEpoch) - assert.Equal(t, true, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) - - // Must run later - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(9) /* active workflow count */, pastEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) - - // Cannot run because of concurrency - nextScheduledEpoch, mustRunNow = schedule.GetNextScheduledEpoch( - int64(10) /* active workflow count */, nowEpoch) - assert.Equal(t, false, mustRunNow) - assert.Equal(t, int64(9*hour+minute), nextScheduledEpoch) - -} - -func TestScheduledWorkflow_GetNextScheduledEpoch_UpdateStatus_NoWorkflow(t *testing.T) { - // Must run now - scheduledEpoch := int64(10 * hour) - updatedEpoch := int64(11 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - }, - }) - - status1 := createStatus("WORKFLOW1", 5) - status2 := createStatus("WORKFLOW2", 3) - status3 := createStatus("WORKFLOW3", 7) - status4 := createStatus("WORKFLOW4", 4) - - schedule.UpdateStatus( - updatedEpoch, - nil, /* no workflow created during this run */ - scheduledEpoch, - []swfapi.WorkflowStatus{*status1, *status2, *status3}, - []swfapi.WorkflowStatus{*status1, *status2, *status3, *status4}) - - expected := &swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - Labels: map[string]string{ - LabelKeyScheduledWorkflowEnabled: "true", - LabelKeyScheduledWorkflowStatus: string(swfapi.ScheduledWorkflowEnabled), - }, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - }, - Status: swfapi.ScheduledWorkflowStatus{ - Conditions: []swfapi.ScheduledWorkflowCondition{{ - Type: swfapi.ScheduledWorkflowEnabled, - Status: core.ConditionTrue, - LastProbeTime: metav1.NewTime(time.Unix(updatedEpoch, 0).UTC()), - LastTransitionTime: metav1.NewTime(time.Unix(updatedEpoch, 0).UTC()), - Reason: string(swfapi.ScheduledWorkflowEnabled), - Message: "The schedule is enabled.", - }, - }, - WorkflowHistory: &swfapi.WorkflowHistory{ - Active: []swfapi.WorkflowStatus{*status3, *status1, *status2}, - Completed: []swfapi.WorkflowStatus{*status3, *status1, *status4, *status2}, - }, - Trigger: swfapi.TriggerStatus{ - NextTriggeredTime: Metav1TimePointer( - metav1.NewTime(time.Unix(scheduledEpoch, 0).UTC())), - }, - }, - } - - assert.Equal(t, expected, schedule.Get()) -} - -func createStatus(workflowName string, scheduledEpoch int64) *swfapi.WorkflowStatus { - return &swfapi.WorkflowStatus{ - Name: workflowName, - ScheduledAt: metav1.NewTime(time.Unix(scheduledEpoch, 0).UTC()), - } -} - -func TestScheduledWorkflow_GetNextScheduledEpoch_UpdateStatus_WithWorkflow(t *testing.T) { - // Must run now - scheduledEpoch := int64(10 * hour) - updatedEpoch := int64(11 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - }, - }) - - status1 := createStatus("WORKFLOW1", 5) - status2 := createStatus("WORKFLOW2", 3) - status3 := createStatus("WORKFLOW3", 7) - status4 := createStatus("WORKFLOW4", 4) - - workflow := NewWorkflow(&workflowapi.Workflow{}) - - schedule.UpdateStatus( - updatedEpoch, - workflow, /* no workflow created during this run */ - scheduledEpoch, - []swfapi.WorkflowStatus{*status1, *status2, *status3}, - []swfapi.WorkflowStatus{*status1, *status2, *status3, *status4}) - - expected := &swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: creationTimestamp, - Labels: map[string]string{ - LabelKeyScheduledWorkflowEnabled: "true", - LabelKeyScheduledWorkflowStatus: string(swfapi.ScheduledWorkflowEnabled), - }, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - }, - Status: swfapi.ScheduledWorkflowStatus{ - Conditions: []swfapi.ScheduledWorkflowCondition{{ - Type: swfapi.ScheduledWorkflowEnabled, - Status: core.ConditionTrue, - LastProbeTime: metav1.NewTime(time.Unix(updatedEpoch, 0).UTC()), - LastTransitionTime: metav1.NewTime(time.Unix(updatedEpoch, 0).UTC()), - Reason: string(swfapi.ScheduledWorkflowEnabled), - Message: "The schedule is enabled.", - }}, - WorkflowHistory: &swfapi.WorkflowHistory{ - Active: []swfapi.WorkflowStatus{*status3, *status1, *status2}, - Completed: []swfapi.WorkflowStatus{*status3, *status1, *status4, *status2}, - }, - Trigger: swfapi.TriggerStatus{ - LastTriggeredTime: Metav1TimePointer( - metav1.NewTime(time.Unix(scheduledEpoch, 0).UTC())), - NextTriggeredTime: Metav1TimePointer( - metav1.NewTime(time.Unix(scheduledEpoch+minute, 0).UTC())), - LastIndex: Int64Pointer(int64(1)), - }, - }, - } - - assert.Equal(t, expected, schedule.Get()) -} - -func TestScheduledWorkflow_NewWorkflow(t *testing.T) { - // Must run now - scheduledEpoch := int64(10 * hour) - nowEpoch := int64(11 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "SCHEDULE1", - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - Workflow: &swfapi.WorkflowResource{ - Parameters: []swfapi.Parameter{ - {Name: "PARAM1", Value: "NEW_VALUE1"}, - {Name: "PARAM3", Value: "NEW_VALUE3"}, - }, - Spec: workflowapi.WorkflowSpec{ - ServiceAccountName: "SERVICE_ACCOUNT", - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("VALUE1")}, - {Name: "PARAM2", Value: StringPointer("VALUE2")}, - }, - }, - }, - }, - }, - }) - - result := schedule.NewWorkflow(scheduledEpoch, nowEpoch) - - expected := &workflowapi.Workflow{ - TypeMeta: metav1.TypeMeta{ - Kind: "Workflow", - APIVersion: "argoproj.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "SCHEDULE1-1-3321103997", - Labels: map[string]string{ - "scheduledworkflows.kubeflow.org/isOwnedByScheduledWorkflow": "true", - "scheduledworkflows.kubeflow.org/scheduledWorkflowName": "SCHEDULE1", - "scheduledworkflows.kubeflow.org/workflowEpoch": strconv.Itoa(int(scheduledEpoch)), - "scheduledworkflows.kubeflow.org/workflowIndex": "1"}, - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: "kubeflow.org/v1alpha1", - Kind: "ScheduledWorkflow", - Name: "SCHEDULE1", - UID: "", - Controller: BooleanPointer(true), - BlockOwnerDeletion: BooleanPointer(true)}}, - }, - Spec: workflowapi.WorkflowSpec{ - ServiceAccountName: "SERVICE_ACCOUNT", - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("NEW_VALUE1")}, - {Name: "PARAM2", Value: StringPointer("VALUE2")}, - }, - }, - }, - } - - assert.Equal(t, expected, result.Get()) -} - -func TestScheduledWorkflow_NewWorkflow_Parameterized(t *testing.T) { - // Must run now - scheduledEpoch := int64(10 * hour) - nowEpoch := int64(11 * hour) - creationTimestamp := metav1.NewTime(time.Unix(9*hour, 0).UTC()) - - schedule := NewScheduledWorkflow(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "SCHEDULE1", - CreationTimestamp: creationTimestamp, - }, - Spec: swfapi.ScheduledWorkflowSpec{ - Enabled: true, - MaxConcurrency: Int64Pointer(int64(10)), - Trigger: swfapi.Trigger{ - PeriodicSchedule: &swfapi.PeriodicSchedule{ - IntervalSecond: int64(60), - }, - }, - Workflow: &swfapi.WorkflowResource{ - Parameters: []swfapi.Parameter{ - {Name: "PARAM1", Value: "NEW_VALUE1_[[ScheduledTime]]"}, - {Name: "PARAM2", Value: "NEW_VALUE2_[[Index]]"}, - }, - Spec: workflowapi.WorkflowSpec{ - ServiceAccountName: "SERVICE_ACCOUNT", - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("VALUE1")}, - {Name: "PARAM2", Value: StringPointer("VALUE2")}, - }, - }, - }, - }, - }, - }) - - result := schedule.NewWorkflow(scheduledEpoch, nowEpoch) - - expected := &workflowapi.Workflow{ - TypeMeta: metav1.TypeMeta{ - Kind: "Workflow", - APIVersion: "argoproj.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "SCHEDULE1-1-3321103997", - Labels: map[string]string{ - "scheduledworkflows.kubeflow.org/isOwnedByScheduledWorkflow": "true", - "scheduledworkflows.kubeflow.org/scheduledWorkflowName": "SCHEDULE1", - "scheduledworkflows.kubeflow.org/workflowEpoch": strconv.Itoa(int(scheduledEpoch)), - "scheduledworkflows.kubeflow.org/workflowIndex": "1"}, - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: "kubeflow.org/v1alpha1", - Kind: "ScheduledWorkflow", - Name: "SCHEDULE1", - UID: "", - Controller: BooleanPointer(true), - BlockOwnerDeletion: BooleanPointer(true)}}, - }, - Spec: workflowapi.WorkflowSpec{ - ServiceAccountName: "SERVICE_ACCOUNT", - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("NEW_VALUE1_19700101100000")}, - {Name: "PARAM2", Value: StringPointer("NEW_VALUE2_1")}, - }, - }, - }, - } - - assert.Equal(t, expected, result.Get()) -} diff --git a/resources/scheduledworkflow/util/time_interface.go b/resources/scheduledworkflow/util/time_interface.go deleted file mode 100644 index baa301b49b7..00000000000 --- a/resources/scheduledworkflow/util/time_interface.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "math" - "time" -) - -// TimeInterface is an interface for objects generating the current time. -type TimeInterface interface { - Now() time.Time -} - -// RealTime is an implementation of TimeInterface that generates the current time. -type RealTime struct { -} - -// NewRealTime creates an instance of RealTime. -func NewRealTime() TimeInterface { - return &RealTime{} -} - -// Now returns the current time. -func (r *RealTime) Now() time.Time { - return time.Now().UTC() -} - -// FakeTime is a fake implementation of TimeInterface for testing. -type FakeTime struct { - now time.Time -} - -// NewFakeTime creates an instance of FakeTime that will return a fixed time. -func NewFakeTime(now time.Time) TimeInterface { - return &FakeTime{ - now: now.UTC(), - } -} - -// NewFakeTimeForEpoch creates an instance of FakeTime that will return a fixed epoch. -func NewFakeTimeForEpoch() TimeInterface { - return &FakeTime{ - now: time.Unix(0, 0).UTC(), - } -} - -// Now returns the current (fake) time. -func (f *FakeTime) Now() time.Time { - f.now = time.Unix(f.now.Unix()+1, 0).UTC() - return f.now -} - -// FormatTimeForLogging formats an epoch for logging purposes. -func FormatTimeForLogging(epoch int64) string { - if epoch <= 0 { - return "INVALID TIME" - } else if epoch == math.MaxInt64 { - return "NEVER" - } else { - return time.Unix(epoch, 0).UTC().String() - } -} diff --git a/resources/scheduledworkflow/util/workflow.go b/resources/scheduledworkflow/util/workflow.go deleted file mode 100644 index c2bbb8f6efd..00000000000 --- a/resources/scheduledworkflow/util/workflow.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - swfregister "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Workflow is a type to help manipulate Workflow objects. -type Workflow struct { - *workflowapi.Workflow -} - -// NewWorkflow creates an Workflow. -func NewWorkflow(workflow *workflowapi.Workflow) *Workflow { - return &Workflow{ - workflow, - } -} - -// Get converts this object to a workflowapi.Workflow. -func (w *Workflow) Get() *workflowapi.Workflow { - return w.Workflow -} - -// OverrideName sets the name of a Workflow. -func (w *Workflow) OverrideName(name string) { - w.GenerateName = "" - w.Name = name -} - -// OverrideParameters overrides some of the parameters of a Workflow. -func (w *Workflow) OverrideParameters(desiredMap map[string]string) { - desiredSlice := make([]workflowapi.Parameter, 0) - for _, currentParam := range w.Spec.Arguments.Parameters { - - var desiredValue *string = nil - if param, ok := desiredMap[currentParam.Name]; ok { - desiredValue = ¶m - } else { - desiredValue = currentParam.Value - } - desiredSlice = append(desiredSlice, workflowapi.Parameter{ - Name: currentParam.Name, - Value: desiredValue, - }) - } - - w.Spec.Arguments.Parameters = desiredSlice -} - -// SetCanonicalLabels sets the labels needed by the ScheduledWorkflow on the Workflow. -func (w *Workflow) SetCanonicalLabels(scheduleName string, - nextScheduledEpoch int64, index int64) { - if w.Labels == nil { - w.Labels = make(map[string]string) - } - w.Labels[LabelKeyWorkflowScheduledWorkflowName] = scheduleName - w.Labels[LabelKeyWorkflowEpoch] = formatInt64ForLabel( - nextScheduledEpoch) - w.Labels[LabelKeyWorkflowIndex] = formatInt64ForLabel(index) - w.Labels[LabelKeyWorkflowIsOwnedByScheduledWorkflow] = "true" -} - -// SetOwnerReferences sets owner references on a Workflow. -func (w *Workflow) SetOwnerReferences(schedule *swfapi.ScheduledWorkflow) { - w.OwnerReferences = []metav1.OwnerReference{ - *metav1.NewControllerRef(schedule, schema.GroupVersionKind{ - Group: swfapi.SchemeGroupVersion.Group, - Version: swfapi.SchemeGroupVersion.Version, - Kind: swfregister.Kind, - }), - } -} diff --git a/resources/scheduledworkflow/util/workflow_test.go b/resources/scheduledworkflow/util/workflow_test.go deleted file mode 100644 index 0b5d64b728a..00000000000 --- a/resources/scheduledworkflow/util/workflow_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2018 The Kubeflow Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - workflowapi "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - swfapi "github.com/kubeflow/pipelines/pkg/apis/scheduledworkflow/v1alpha1" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" -) - -func TestWorkflow_OverrideName(t *testing.T) { - workflow := NewWorkflow(&workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - }) - - workflow.OverrideName("NEW_WORKFLOW_NAME") - - expected := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "NEW_WORKFLOW_NAME", - }, - } - - assert.Equal(t, expected, workflow.Get()) -} - -func TestWorkflow_OverrideParameters(t *testing.T) { - workflow := NewWorkflow(&workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - Spec: workflowapi.WorkflowSpec{ - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("VALUE1")}, - {Name: "PARAM2", Value: StringPointer("VALUE2")}, - {Name: "PARAM3", Value: StringPointer("VALUE3")}, - {Name: "PARAM4", Value: StringPointer("")}, - {Name: "PARAM5", Value: StringPointer("VALUE5")}, - }, - }, - }, - }) - - workflow.OverrideParameters(map[string]string{ - "PARAM1": "NEW_VALUE1", - "PARAM3": "NEW_VALUE3", - "PARAM4": "NEW_VALUE4", - "PARAM5": "", - "PARAM9": "NEW_VALUE9", - }) - - expected := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - Spec: workflowapi.WorkflowSpec{ - Arguments: workflowapi.Arguments{ - Parameters: []workflowapi.Parameter{ - {Name: "PARAM1", Value: StringPointer("NEW_VALUE1")}, - {Name: "PARAM2", Value: StringPointer("VALUE2")}, - {Name: "PARAM3", Value: StringPointer("NEW_VALUE3")}, - {Name: "PARAM4", Value: StringPointer("NEW_VALUE4")}, - {Name: "PARAM5", Value: StringPointer("")}, - }, - }, - }, - } - assert.Equal(t, expected, workflow.Get()) -} - -func TestWorkflow_SetCanonicalLabels(t *testing.T) { - workflow := NewWorkflow(&workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - }) - - const index = 50 - const nextScheduledEpoch = 100 - workflow.SetCanonicalLabels("SCHEDULED_WORKFLOW_NAME", nextScheduledEpoch, index) - - expected := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - Labels: map[string]string{ - "scheduledworkflows.kubeflow.org/isOwnedByScheduledWorkflow": "true", - "scheduledworkflows.kubeflow.org/scheduledWorkflowName": "SCHEDULED_WORKFLOW_NAME", - "scheduledworkflows.kubeflow.org/workflowEpoch": "100", - "scheduledworkflows.kubeflow.org/workflowIndex": "50"}, - }, - } - - assert.Equal(t, expected, workflow.Get()) -} - -func TestWorkflow_SetOwnerReferences(t *testing.T) { - workflow := NewWorkflow(&workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - }, - }) - - workflow.SetOwnerReferences(&swfapi.ScheduledWorkflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "SCHEDULE_NAME", - }, - }) - - expected := &workflowapi.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: "WORKFLOW_NAME", - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: "kubeflow.org/v1alpha1", - Kind: "ScheduledWorkflow", - Name: "SCHEDULE_NAME", - Controller: BooleanPointer(true), - BlockOwnerDeletion: BooleanPointer(true), - }}, - }, - } - - assert.Equal(t, expected, workflow.Get()) -}