diff --git a/Gopkg.lock b/Gopkg.lock index c8a180a036..27e0a9bd1a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -186,49 +186,6 @@ revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82" version = "1.1.4" -[[projects]] - digest = "1:e968e93cc59d6e1d913f881809239827f45c067e5ee5025002e68e39bba7dc87" - name = "github.com/kubernetes-sigs/kube-batch" - packages = [ - "cmd/kube-batch/app", - "cmd/kube-batch/app/options", - "pkg/apis/scheduling/v1alpha1", - "pkg/apis/utils", - "pkg/client/clientset/versioned", - "pkg/client/clientset/versioned/scheme", - "pkg/client/clientset/versioned/typed/scheduling/v1alpha1", - "pkg/client/informers/externalversions", - "pkg/client/informers/externalversions/internalinterfaces", - "pkg/client/informers/externalversions/scheduling", - "pkg/client/informers/externalversions/scheduling/v1alpha1", - "pkg/client/listers/scheduling/v1alpha1", - "pkg/scheduler", - "pkg/scheduler/actions", - "pkg/scheduler/actions/allocate", - "pkg/scheduler/actions/backfill", - "pkg/scheduler/actions/preempt", - "pkg/scheduler/actions/reclaim", - "pkg/scheduler/api", - "pkg/scheduler/api/helpers", - "pkg/scheduler/cache", - "pkg/scheduler/conf", - "pkg/scheduler/framework", - "pkg/scheduler/metrics", - "pkg/scheduler/plugins", - "pkg/scheduler/plugins/conformance", - "pkg/scheduler/plugins/drf", - "pkg/scheduler/plugins/gang", - "pkg/scheduler/plugins/nodeorder", - "pkg/scheduler/plugins/predicates", - "pkg/scheduler/plugins/priority", - "pkg/scheduler/plugins/proportion", - "pkg/scheduler/util", - "pkg/version", - ] - pruneopts = "UT" - revision = "3a1f9d7d0a7f7f12be6dd52927fcd0cd9de72ec7" - version = "v0.4.1" - [[projects]] digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" @@ -1023,30 +980,20 @@ analyzer-version = 1 input-imports = [ "github.com/golang/glog", - "github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app", - "github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options", - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1", - "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned", - "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions", - "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1", - "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics", - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins", "github.com/onsi/ginkgo", "github.com/onsi/gomega", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promauto", + "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/spf13/cobra", "github.com/spf13/pflag", "golang.org/x/crypto/ssh", + "gopkg.in/yaml.v2", "k8s.io/api/admission/v1beta1", "k8s.io/api/admissionregistration/v1beta1", "k8s.io/api/apps/v1", "k8s.io/api/core/v1", + "k8s.io/api/policy/v1beta1", "k8s.io/api/scheduling/v1beta1", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/api/meta", @@ -1061,6 +1008,7 @@ "k8s.io/apimachinery/pkg/util/runtime", "k8s.io/apimachinery/pkg/util/strategicpatch", "k8s.io/apimachinery/pkg/util/uuid", + "k8s.io/apimachinery/pkg/util/validation/field", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/watch", "k8s.io/apiserver/pkg/util/flag", @@ -1068,6 +1016,9 @@ "k8s.io/client-go/discovery/fake", "k8s.io/client-go/informers", "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/informers/policy/v1beta1", + "k8s.io/client-go/informers/scheduling/v1beta1", + "k8s.io/client-go/informers/storage/v1", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", @@ -1090,12 +1041,14 @@ "k8s.io/code-generator/cmd/lister-gen", "k8s.io/gengo/args", "k8s.io/gengo/examples/deepcopy-gen/generators", + "k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/apis/scheduling", "k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/volumebinder", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 8d75af9c12..561dd7c3fa 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -36,9 +36,9 @@ required = [ branch = "master" name = "github.com/golang/glog" -[[constraint]] - name = "github.com/kubernetes-sigs/kube-batch" - version = "0.4.1" +#[[constraint]] +# name = "volcano.sh/volcano" +# version = "0.4.1" [[constraint]] name = "github.com/onsi/ginkgo" diff --git a/Makefile b/Makefile index 9dab543df3..2bbb454972 100644 --- a/Makefile +++ b/Makefile @@ -2,43 +2,68 @@ BIN_DIR=_output/bin CHART_DIR=_output/chart IMAGE_DIR=_output/image IMAGE=volcano -TAG=0.1 +REL_OSARCH="linux/amd64" +TAG=v0.4.2 VERSION?=${TAG} -CHART_VERSION?=${VERSION} +RELEASE_VER?=${TAG} +IMAGE_PREFIX=kubesigs/vk +LD_FLAGS=" \ + -X '${REPO_PATH}/pkg/version.GitSHA=${GitSHA}' \ + -X '${REPO_PATH}/pkg/version.Built=${Date}' \ + -X '${REPO_PATH}/pkg/version.Version=${RELEASE_VER}'" .EXPORT_ALL_VARIABLES: -all: controllers scheduler cli admission +all: kube-batch vk-controllers vk-admission vkctl -init: - mkdir -p ${BIN_DIR} - mkdir -p ${CHART_DIR} - mkdir -p ${IMAGE_DIR} - -controllers: - go build -o ${BIN_DIR}/vk-controllers ./cmd/controllers +kube-batch: init + go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/kube-batch ./cmd/kube-batch -scheduler: - go build -o ${BIN_DIR}/vk-scheduler ./cmd/scheduler +vk-controllers: init + go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/vk-controllers ./cmd/controllers -cli: - go build -o ${BIN_DIR}/vkctl ./cmd/cli +vk-admission: init + go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/vk-admission ./cmd/admission -admission: - go build -o ${BIN_DIR}/vk-admission ./cmd/admission +vkctl: init + go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/vkctl ./cmd/cli -release: - CGO_ENABLED=0 go build -o ${BIN_DIR}/rel/vk-controllers ./cmd/controllers - CGO_ENABLED=0 go build -o ${BIN_DIR}/rel/vk-scheduler ./cmd/scheduler - CGO_ENABLED=0 go build -o ${BIN_DIR}/rel/vk-admission ./cmd/admission +init: + mkdir -p ${BIN_DIR} + mkdir -p ${CHART_DIR} + mkdir -p ${IMAGE_DIR} -docker: release - for name in controllers scheduler admission; do\ - cp ${BIN_DIR}/rel/vk-$$name ./installer/dockerfile/$$name/; \ - docker build --no-cache -t $(IMAGE)-$$name:$(TAG) ./installer/dockerfile/$$name; \ - rm installer/dockerfile/$$name/vk-$$name; \ +rel_bins: + go get github.com/mitchellh/gox + #Build kube-batch binary + CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -ldflags ${LD_FLAGS} \ + -output=${BIN_DIR}/{{.OS}}/{{.Arch}}/kube-batch ./cmd/kube-batch + #Build job controller & job admission + #TODO: Add version support in job controller and admission to make LD_FLAGS work + for name in controllers admission; do\ + CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -ldflags ${LD_FLAGS} -output ${BIN_DIR}/{{.OS}}/{{.Arch}}/vk-$$name ./cmd/$$name; \ done +images: rel_bins + #Build kube-batch images + cp ${BIN_DIR}/${REL_OSARCH}/kube-batch ./deployment/images/ + docker build ./deployment/images -t kubesigs/kube-batch:${RELEASE_VER} + rm -f ./deployment/images/kube-batch + #Build job controller and admission images + for name in controllers admission; do\ + cp ${BIN_DIR}/${REL_OSARCH}/vk-$$name ./deployment/images/$$name/; \ + docker build --no-cache -t $(IMAGE_PREFIX)-$$name:$(RELEASE_VER) ./deployment/images/$$name; \ + rm deployment/images/$$name/vk-$$name; \ + done + +docker: images + +generate-deepcopy: init + go build -o ${BIN_DIR}/deepcopy-gen ./cmd/deepcopy-gen/ + ${BIN_DIR}/deepcopy-gen -i ./pkg/apis/scheduling/v1alpha1/ -O zz_generated.deepcopy --go-header-file hack/boilerplate/boilerplate.generatego.txt + ${BIN_DIR}/deepcopy-gen -i ./pkg/apis/batch/v1alpha1/ -O zz_generated.deepcopy --go-header-file hack/boilerplate/boilerplate.generatego.txt + ${BIN_DIR}/deepcopy-gen -i ./pkg/apis/bus/v1alpha1/ -O zz_generated.deepcopy --go-header-file hack/boilerplate/boilerplate.generatego.txt + generate-code: ./hack/update-gencode.sh @@ -61,11 +86,12 @@ verify: generate-code hack/verify-gencode.sh chart: init - helm package installer/chart/volcano --version=${CHART_VERSION} --destination=${CHART_DIR} + helm package ./deployment/volcano --version=${VERSION} --destination=${CHART_DIR} -package: docker chart cli - for name in controllers scheduler admission; do \ - docker save $(IMAGE)-$$name:$(TAG) > ${IMAGE_DIR}/$(IMAGE)-$$name.$(TAG).tar; \ +package: clean images chart vkctl + docker save kubesigs/kube-batch:${RELEASE_VER} > ${IMAGE_DIR}/kube-batch.$(RELEASE_VER).tar; + for name in controllers admission; do \ + docker save $(IMAGE_PREFIX)-$$name:$(RELEASE_VER) > ${IMAGE_DIR}/$(IMAGE)-$$name.$(RELEASE_VER).tar; \ done gzip ${IMAGE_DIR}/*.tar tar -zcvf _output/Volcano-package-${VERSION}.tgz -C _output/ ./bin/vkctl ./chart ./image diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options/options.go b/cmd/kube-batch/app/options/options.go similarity index 62% rename from vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options/options.go rename to cmd/kube-batch/app/options/options.go index 6fc4d636c2..b5a9cba40d 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options/options.go +++ b/cmd/kube-batch/app/options/options.go @@ -23,20 +23,31 @@ import ( "github.com/spf13/pflag" ) +const ( + defaultSchedulerName = "kube-batch" + defaultSchedulerPeriod = time.Second + defaultQueue = "default" + defaultListenAddress = ":8080" +) + // ServerOption is the main context object for the controller manager. type ServerOption struct { Master string Kubeconfig string SchedulerName string SchedulerConf string - SchedulePeriod string + SchedulePeriod time.Duration EnableLeaderElection bool LockObjectNamespace string DefaultQueue string PrintVersion bool ListenAddress string + EnablePriorityClass bool } +// ServerOpts server options +var ServerOpts *ServerOption + // NewServerOption creates a new CMServer with a default config. func NewServerOption() *ServerOption { s := ServerOption{} @@ -47,26 +58,31 @@ func NewServerOption() *ServerOption { func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information") - // kube-batch will ignore pods with scheduler names other than specified with the option - fs.StringVar(&s.SchedulerName, "scheduler-name", "kube-batch", "kube-batch will handle pods with the scheduler-name") + // volcano will ignore pods with scheduler names other than specified with the option + fs.StringVar(&s.SchedulerName, "scheduler-name", defaultSchedulerName, "volcano will handle pods whose .spec.SchedulerName is same as scheduler-name") fs.StringVar(&s.SchedulerConf, "scheduler-conf", "", "The absolute path of scheduler configuration file") - fs.StringVar(&s.SchedulePeriod, "schedule-period", "1s", "The period between each scheduling cycle") - fs.StringVar(&s.DefaultQueue, "default-queue", "default", "The default queue name of the job") + fs.DurationVar(&s.SchedulePeriod, "schedule-period", defaultSchedulerPeriod, "The period between each scheduling cycle") + fs.StringVar(&s.DefaultQueue, "default-queue", defaultQueue, "The default queue name of the job") fs.BoolVar(&s.EnableLeaderElection, "leader-elect", s.EnableLeaderElection, "Start a leader election client and gain leadership before "+ - "executing the main loop. Enable this when running replicated kube-batch for high availability") + "executing the main loop. Enable this when running replicated volcano for high availability") fs.BoolVar(&s.PrintVersion, "version", false, "Show version and quit") - fs.StringVar(&s.LockObjectNamespace, "lock-object-namespace", s.LockObjectNamespace, "Define the namespace of the lock object") - fs.StringVar(&s.ListenAddress, "listen-address", ":8080", "The address to listen on for HTTP requests.") + fs.StringVar(&s.LockObjectNamespace, "lock-object-namespace", s.LockObjectNamespace, "Define the namespace of the lock object that is used for leader election") + fs.StringVar(&s.ListenAddress, "listen-address", defaultListenAddress, "The address to listen on for HTTP requests.") + fs.BoolVar(&s.EnablePriorityClass, "priority-class", true, + "Enable PriorityClass to provide the capacity of preemption at pod group level; to disable it, set it false") } +// CheckOptionOrDie check lock-object-namespace when LeaderElection is enabled func (s *ServerOption) CheckOptionOrDie() error { if s.EnableLeaderElection && s.LockObjectNamespace == "" { return fmt.Errorf("lock-object-namespace must not be nil when LeaderElection is enabled") } - if _, err := time.ParseDuration(s.SchedulePeriod); err != nil { - return fmt.Errorf("failed to parse --schedule-period: %v", err) - } return nil } + +// RegisterOptions registers options +func (s *ServerOption) RegisterOptions() { + ServerOpts = s +} diff --git a/cmd/kube-batch/app/options/options_test.go b/cmd/kube-batch/app/options/options_test.go new file mode 100644 index 0000000000..a1d51d154a --- /dev/null +++ b/cmd/kube-batch/app/options/options_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "reflect" + "testing" + "time" + + "github.com/spf13/pflag" +) + +func TestAddFlags(t *testing.T) { + fs := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError) + s := NewServerOption() + s.AddFlags(fs) + + args := []string{ + "--schedule-period=5m", + "--priority-class=false", + } + fs.Parse(args) + + // This is a snapshot of expected options parsed by args. + expected := &ServerOption{ + SchedulerName: defaultSchedulerName, + SchedulePeriod: 5 * time.Minute, + DefaultQueue: defaultQueue, + ListenAddress: defaultListenAddress, + } + + if !reflect.DeepEqual(expected, s) { + t.Errorf("Got different run options than expected.\nGot: %+v\nExpected: %+v\n", s, expected) + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/server.go b/cmd/kube-batch/app/server.go similarity index 95% rename from vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/server.go rename to cmd/kube-batch/app/server.go index 50f41ddb4d..ac90d1c342 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/server.go +++ b/cmd/kube-batch/app/server.go @@ -24,9 +24,9 @@ import ( "time" "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler" - "github.com/kubernetes-sigs/kube-batch/pkg/version" + "volcano.sh/volcano/cmd/kube-batch/app/options" + "volcano.sh/volcano/pkg/scheduler" + "volcano.sh/volcano/pkg/version" "github.com/prometheus/client_golang/prometheus/promhttp" v1 "k8s.io/api/core/v1" @@ -34,6 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + // Register gcp auth _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest" @@ -57,6 +58,7 @@ func buildConfig(master, kubeconfig string) (*rest.Config, error) { return rest.InClusterConfig() } +// Run the kubeBatch scheduler func Run(opt *options.ServerOption) error { if opt.PrintVersion { version.PrintVersionAndExit(apiVersion) @@ -111,7 +113,7 @@ func Run(opt *options.ServerOption) error { rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock, opt.LockObjectNamespace, - "kube-batch", + "volcano", leaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ Identity: id, diff --git a/cmd/scheduler/main.go b/cmd/kube-batch/main.go similarity index 87% rename from cmd/scheduler/main.go rename to cmd/kube-batch/main.go index 79624a2d4a..fc860d4204 100644 --- a/cmd/scheduler/main.go +++ b/cmd/kube-batch/main.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Vulcan Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package main import ( @@ -26,11 +27,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/util/flag" + "volcano.sh/volcano/cmd/kube-batch/app" + "volcano.sh/volcano/cmd/kube-batch/app/options" + + // Import default actions/plugins. _ "volcano.sh/volcano/pkg/scheduler/actions" _ "volcano.sh/volcano/pkg/scheduler/plugins" - - "github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app" - "github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options" ) var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") @@ -38,6 +40,7 @@ var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum func main() { s := options.NewServerOption() s.AddFlags(pflag.CommandLine) + s.RegisterOptions() flag.InitFlags() if err := s.CheckOptionOrDie(); err != nil { diff --git a/deployment/images/Dockerfile b/deployment/images/Dockerfile new file mode 100644 index 0000000000..d804c4f3e2 --- /dev/null +++ b/deployment/images/Dockerfile @@ -0,0 +1,5 @@ +From alpine:3.9 + +ADD kube-batch /usr/local/bin + +ENTRYPOINT ["/usr/local/bin/kube-batch"] diff --git a/installer/dockerfile/admission/Dockerfile b/deployment/images/admission/Dockerfile similarity index 93% rename from installer/dockerfile/admission/Dockerfile rename to deployment/images/admission/Dockerfile index 1827514ef0..abb92369b6 100644 --- a/installer/dockerfile/admission/Dockerfile +++ b/deployment/images/admission/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2019 The Volcano Authors. +# Copyright 2019 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/installer/dockerfile/controllers/Dockerfile b/deployment/images/controllers/Dockerfile similarity index 93% rename from installer/dockerfile/controllers/Dockerfile rename to deployment/images/controllers/Dockerfile index d9c7b3eef7..210854c78e 100644 --- a/installer/dockerfile/controllers/Dockerfile +++ b/deployment/images/controllers/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2019 The Volcano Authors. +# Copyright 2019 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/deployment/kube-batch/.helmignore b/deployment/kube-batch/.helmignore new file mode 100644 index 0000000000..f0c1319444 --- /dev/null +++ b/deployment/kube-batch/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/kube-batch/Chart.yaml b/deployment/kube-batch/Chart.yaml new file mode 100644 index 0000000000..0f4581ec2e --- /dev/null +++ b/deployment/kube-batch/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1alpha1 +description: The batch scheduler of Kubernetes +name: kube-batch +version: 0.4.2 diff --git a/deployment/kube-batch/templates/NOTES.txt b/deployment/kube-batch/templates/NOTES.txt new file mode 100644 index 0000000000..4f488ab2d2 --- /dev/null +++ b/deployment/kube-batch/templates/NOTES.txt @@ -0,0 +1 @@ +The batch scheduler of Kubernetes. diff --git a/deployment/kube-batch/templates/_helpers.tpl b/deployment/kube-batch/templates/_helpers.tpl new file mode 100644 index 0000000000..f0d83d2edb --- /dev/null +++ b/deployment/kube-batch/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/installer/chart/volcano/templates/default-queue.yaml b/deployment/kube-batch/templates/default.yaml similarity index 100% rename from installer/chart/volcano/templates/default-queue.yaml rename to deployment/kube-batch/templates/default.yaml diff --git a/deployment/kube-batch/templates/deployment.yaml b/deployment/kube-batch/templates/deployment.yaml new file mode 100644 index 0000000000..c7758f216a --- /dev/null +++ b/deployment/kube-batch/templates/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-batch + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: kube-batch + template: + metadata: + labels: + app: kube-batch + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}/kube-batch:{{ .Values.image.tag }}" + args: ["--logtostderr", "--v", "3"] + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: +{{ toYaml .Values.resources | indent 10 }} + diff --git a/installer/chart/volcano-init/templates/scheduling_v1alpha1_podgroup.yaml b/deployment/kube-batch/templates/scheduling_v1alpha1_podgroup.yaml similarity index 85% rename from installer/chart/volcano-init/templates/scheduling_v1alpha1_podgroup.yaml rename to deployment/kube-batch/templates/scheduling_v1alpha1_podgroup.yaml index 1432fc160f..adacda07d4 100644 --- a/installer/chart/volcano-init/templates/scheduling_v1alpha1_podgroup.yaml +++ b/deployment/kube-batch/templates/scheduling_v1alpha1_podgroup.yaml @@ -2,6 +2,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: podgroups.scheduling.incubator.k8s.io + annotations: + "helm.sh/hook": "crd-install" spec: group: scheduling.incubator.k8s.io names: @@ -22,6 +24,10 @@ spec: minMember: format: int32 type: integer + queue: + type: string + priorityClassName: + type: string type: object status: properties: diff --git a/installer/chart/volcano-init/templates/scheduling_v1alpha1_queue.yaml b/deployment/kube-batch/templates/scheduling_v1alpha1_queue.yaml similarity index 92% rename from installer/chart/volcano-init/templates/scheduling_v1alpha1_queue.yaml rename to deployment/kube-batch/templates/scheduling_v1alpha1_queue.yaml index df11525951..fde92db2ec 100644 --- a/installer/chart/volcano-init/templates/scheduling_v1alpha1_queue.yaml +++ b/deployment/kube-batch/templates/scheduling_v1alpha1_queue.yaml @@ -2,6 +2,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: queues.scheduling.incubator.k8s.io + annotations: + "helm.sh/hook": "crd-install" spec: group: scheduling.incubator.k8s.io names: diff --git a/deployment/kube-batch/values.yaml b/deployment/kube-batch/values.yaml new file mode 100644 index 0000000000..2cdde33c0f --- /dev/null +++ b/deployment/kube-batch/values.yaml @@ -0,0 +1,16 @@ +# Default values for volcano. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 +image: + repository: kubesigs + tag: v0.4.2 + pullPolicy: IfNotPresent +resources: + limits: + cpu: 2000m + memory: 2048Mi + requests: + cpu: 2000m + memory: 2048Mi + diff --git a/installer/chart/volcano/Chart.yaml b/deployment/volcano/Chart.yaml similarity index 100% rename from installer/chart/volcano/Chart.yaml rename to deployment/volcano/Chart.yaml diff --git a/installer/chart/volcano/config/kube-batch.conf b/deployment/volcano/config/kube-batch.conf similarity index 100% rename from installer/chart/volcano/config/kube-batch.conf rename to deployment/volcano/config/kube-batch.conf diff --git a/installer/chart/volcano/plugins/gen-admission-secret/gen-admission-secret.sh b/deployment/volcano/plugins/gen-admission-secret/gen-admission-secret.sh similarity index 97% rename from installer/chart/volcano/plugins/gen-admission-secret/gen-admission-secret.sh rename to deployment/volcano/plugins/gen-admission-secret/gen-admission-secret.sh index 4d88c053b9..84bb061fb1 100644 --- a/installer/chart/volcano/plugins/gen-admission-secret/gen-admission-secret.sh +++ b/deployment/volcano/plugins/gen-admission-secret/gen-admission-secret.sh @@ -46,6 +46,7 @@ if [ -z ${service} ]; then exit 1 fi + [ -z ${secret} ] && secret=volcano-admission-secret [ -z ${namespace} ] && namespace=default @@ -107,7 +108,7 @@ done # approve and fetch the signed certificate kubectl certificate approve ${csrName} # verify certificate has been signed -for x in $(seq 10); do +for x in $(seq 20); do serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}') if [[ ${serverCert} != '' ]]; then break @@ -115,7 +116,7 @@ for x in $(seq 10); do sleep 1 done if [[ ${serverCert} == '' ]]; then - echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 10 attempts." >&2 + echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 20 attempts." >&2 exit 1 fi echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem diff --git a/installer/chart/volcano/plugins/gen-admission-secret/plugin.yaml b/deployment/volcano/plugins/gen-admission-secret/plugin.yaml similarity index 77% rename from installer/chart/volcano/plugins/gen-admission-secret/plugin.yaml rename to deployment/volcano/plugins/gen-admission-secret/plugin.yaml index 42d83866ad..eacaaaf51a 100644 --- a/installer/chart/volcano/plugins/gen-admission-secret/plugin.yaml +++ b/deployment/volcano/plugins/gen-admission-secret/plugin.yaml @@ -4,4 +4,4 @@ usage: "Generate valid cert for admission server" description: This plugin provides signed cert to admission server. ignoreFlags: false useTunnel: false -command: "bash $HELM_PLUGIN_DIR/gen-admission-secret.sh" +command: "bash $HELM_PLUGIN_DIR/gen-admission-secret.sh" \ No newline at end of file diff --git a/installer/chart/volcano/templates/admission-config.yaml b/deployment/volcano/templates/admission-config.yaml similarity index 93% rename from installer/chart/volcano/templates/admission-config.yaml rename to deployment/volcano/templates/admission-config.yaml index 8e4482cdb9..7ece1449d7 100644 --- a/installer/chart/volcano/templates/admission-config.yaml +++ b/deployment/volcano/templates/admission-config.yaml @@ -6,7 +6,6 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade,post-delete webhooks: - clientConfig: - caBundle: "{{ .Values.basic.ca_bundle }}" service: name: {{ .Release.Name }}-admission-service namespace: {{ .Release.Namespace }} @@ -33,7 +32,6 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade,post-delete webhooks: - clientConfig: - caBundle: "{{ .Values.basic.ca_bundle }}" service: name: {{ .Release.Name }}-admission-service namespace: {{ .Release.Namespace }} diff --git a/installer/chart/volcano/templates/admission.yaml b/deployment/volcano/templates/admission.yaml similarity index 93% rename from installer/chart/volcano/templates/admission.yaml rename to deployment/volcano/templates/admission.yaml index 66ccc33375..85693450ec 100644 --- a/installer/chart/volcano/templates/admission.yaml +++ b/deployment/volcano/templates/admission.yaml @@ -40,21 +40,18 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - app: admission - admission: "true" + app: volcano-admission name: {{ .Release.Name }}-admission namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: - app: admission - admission: "true" + app: volcano-admission template: metadata: labels: - app: admission - admission: "true" + app: volcano-admission spec: serviceAccount: {{ .Release.Name }}-admission {{ if .Values.basic.image_pull_secret }} @@ -90,7 +87,7 @@ apiVersion: v1 kind: Service metadata: labels: - test: admission + app: volcano-admission name: {{ .Release.Name }}-admission-service namespace: {{ .Release.Namespace }} spec: @@ -99,5 +96,5 @@ spec: protocol: TCP targetPort: 443 selector: - admission: "true" + app: volcano-admission sessionAffinity: None diff --git a/installer/chart/volcano/templates/batch_v1alpha1_job.yaml b/deployment/volcano/templates/batch_v1alpha1_job.yaml similarity index 100% rename from installer/chart/volcano/templates/batch_v1alpha1_job.yaml rename to deployment/volcano/templates/batch_v1alpha1_job.yaml diff --git a/installer/chart/volcano/templates/bus_v1alpha1_command.yaml b/deployment/volcano/templates/bus_v1alpha1_command.yaml similarity index 100% rename from installer/chart/volcano/templates/bus_v1alpha1_command.yaml rename to deployment/volcano/templates/bus_v1alpha1_command.yaml diff --git a/installer/chart/volcano/templates/controllers.yaml b/deployment/volcano/templates/controllers.yaml similarity index 92% rename from installer/chart/volcano/templates/controllers.yaml rename to deployment/volcano/templates/controllers.yaml index 173c22ce14..4a25b183af 100644 --- a/installer/chart/volcano/templates/controllers.yaml +++ b/deployment/volcano/templates/controllers.yaml @@ -63,20 +63,22 @@ apiVersion: apps/v1 metadata: name: {{ .Release.Name }}-controllers namespace: {{ .Release.Namespace }} + labels: + app: volcano-controller spec: replicas: 1 selector: matchLabels: - vk-controllers: test + app: volcano-controller template: metadata: labels: - vk-controllers: test + app: volcano-controller spec: serviceAccount: {{ .Release.Name }}-controllers {{ if .Values.basic.image_pull_secret }} imagePullSecrets: - - name: {{ .Values.basic.image_pull_secret }} + - name: {{ .Values.basic.image_pull_secret }} {{ end }} containers: - name: {{ .Release.Name }}-controllers @@ -85,4 +87,4 @@ spec: - --alsologtostderr - -v=4 - 2>&1 - imagePullPolicy: "IfNotPresent" \ No newline at end of file + imagePullPolicy: "IfNotPresent" diff --git a/deployment/volcano/templates/default-queue.yaml b/deployment/volcano/templates/default-queue.yaml new file mode 100644 index 0000000000..3aa233f5db --- /dev/null +++ b/deployment/volcano/templates/default-queue.yaml @@ -0,0 +1,6 @@ +apiVersion: scheduling.incubator.k8s.io/v1alpha1 +kind: Queue +metadata: + name: default +spec: + weight: 1 diff --git a/installer/chart/volcano/templates/scheduler.yaml b/deployment/volcano/templates/scheduler.yaml similarity index 88% rename from installer/chart/volcano/templates/scheduler.yaml rename to deployment/volcano/templates/scheduler.yaml index 669236fdbc..bb66a79576 100644 --- a/installer/chart/volcano/templates/scheduler.yaml +++ b/deployment/volcano/templates/scheduler.yaml @@ -42,10 +42,7 @@ rules: resources: ["persistentvolumes"] verbs: ["list", "watch"] - apiGroups: ["scheduling.incubator.k8s.io"] - resources: ["podgroups", "queues"] - verbs: ["list", "watch", "update"] - - apiGroups: ["scheduling.k8s.io"] - resources: ["priorityclasses"] + resources: ["podgroups"] verbs: ["list", "watch", "update"] - apiGroups: [""] resources: ["namespaces"] @@ -87,21 +84,19 @@ apiVersion: apps/v1 metadata: name: {{ .Release.Name }}-scheduler namespace: {{ .Release.Namespace }} + labels: + app: volcano-scheduler spec: replicas: 1 selector: matchLabels: - vk-scheduler: test + app: volcano-scheduler template: metadata: labels: - vk-scheduler: test + app: volcano-scheduler spec: serviceAccount: {{ .Release.Name }}-scheduler - {{ if .Values.basic.image_pull_secret }} - imagePullSecrets: - - name: {{ .Values.basic.image_pull_secret }} - {{ end }} containers: - name: {{ .Release.Name }}-scheduler image: {{.Values.basic.scheduler_image_name}}:{{.Values.basic.image_tag_version}} @@ -117,4 +112,4 @@ spec: volumes: - name: scheduler-config configMap: - name: {{ .Release.Name }}-scheduler-configmap \ No newline at end of file + name: {{ .Release.Name }}-scheduler-configmap diff --git a/installer/chart/volcano/templates/scheduling_v1alpha1_podgroup.yaml b/deployment/volcano/templates/scheduling_v1alpha1_podgroup.yaml similarity index 100% rename from installer/chart/volcano/templates/scheduling_v1alpha1_podgroup.yaml rename to deployment/volcano/templates/scheduling_v1alpha1_podgroup.yaml diff --git a/installer/chart/volcano/templates/scheduling_v1alpha1_queue.yaml b/deployment/volcano/templates/scheduling_v1alpha1_queue.yaml similarity index 100% rename from installer/chart/volcano/templates/scheduling_v1alpha1_queue.yaml rename to deployment/volcano/templates/scheduling_v1alpha1_queue.yaml diff --git a/deployment/volcano/values.yaml b/deployment/volcano/values.yaml new file mode 100644 index 0000000000..4e64a0ea4e --- /dev/null +++ b/deployment/volcano/values.yaml @@ -0,0 +1,7 @@ +basic: + image_tag_version: "latest" + controller_image_name: "kubesigs/vk-controllers" + scheduler_image_name: "kubesigs/kube-batch" + admission_image_name: "kubesigs/vk-admission" + admission_secret_name: "volcano-admission-secret" + image_pull_secret: "" diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index db54e476ad..a0bc88f70a 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -15,7 +15,7 @@ # limitations under the License. # The golang package that we are building. -readonly KUBE_GO_PACKAGE=github.com/kubernetes-sigs/kube-batch +readonly KUBE_GO_PACKAGE=volcano.sh/volcano readonly KUBE_GOPATH="${GOPATH}" # The set of server targets that we are only building for Linux diff --git a/hack/run-e2e-kind.sh b/hack/run-e2e-kind.sh index 91bdf3b3dc..3c5e3f6f08 100755 --- a/hack/run-e2e-kind.sh +++ b/hack/run-e2e-kind.sh @@ -16,8 +16,8 @@ export KIND_OPT=${KIND_OPT:="--image kindest/node:v1.13.2-huawei --config ${VK_R export KIND_IMAGE=$(echo ${KIND_OPT} |grep -E -o "image \w+\/[^ ]*" | sed "s/image //") -export IMAGE=${IMAGE:-volcano} -export TAG=${TAG:-0.1} +export IMAGE_PREFIX=kubesigs/vk +export TAG=${TAG:-v0.4.2} export TILLE_IMAGE=${TILLE_IMAGE:-"gcr.io/kubernetes-helm/tiller:v2.11.0"} export WEAVE_NET_IMAGE=${WEAVE_NET_IMAGE:-"weaveworks/weave-npc:2.5.1"} export WEAVE_KUBE_IMAGE=${WEAVE_KUBE_IMAGE:-"weaveworks/weave-kube:2.5.1"} @@ -83,9 +83,9 @@ function check-all-image { check-image ${WEAVE_KUBE_IMAGE} # used for volcano install check-image ${TILLE_IMAGE} - check-image ${IMAGE}-controllers:${TAG} - check-image ${IMAGE}-scheduler:${TAG} - check-image ${IMAGE}-admission:${TAG} + check-image ${IMAGE_PREFIX}-controllers:${TAG} + check-image kubesigs/kube-batch:${TAG} + check-image ${IMAGE_PREFIX}-admission:${TAG} # used for volcano test check-image ${TEST_BUSYBOX_IMAGE} check-image ${TEST_NGINX_IMAGE} @@ -117,17 +117,17 @@ function install-volcano { helm init --skip-refresh --service-account tiller --kubeconfig ${KUBECONFIG} --wait echo "Loading docker images into kind cluster" - kind load docker-image ${IMAGE}-controllers:${TAG} ${CLUSTER_CONTEXT} - kind load docker-image ${IMAGE}-scheduler:${TAG} ${CLUSTER_CONTEXT} - kind load docker-image ${IMAGE}-admission:${TAG} ${CLUSTER_CONTEXT} + kind load docker-image ${IMAGE_PREFIX}-controllers:${TAG} ${CLUSTER_CONTEXT} + kind load docker-image kubesigs/kube-batch:${TAG} ${CLUSTER_CONTEXT} + kind load docker-image ${IMAGE_PREFIX}-admission:${TAG} ${CLUSTER_CONTEXT} echo "Install volcano plugin into cluster...." helm plugin remove gen-admission-secret - helm plugin install --kubeconfig ${KUBECONFIG} installer/chart/volcano/plugins/gen-admission-secret + helm plugin install --kubeconfig ${KUBECONFIG} deployment/volcano/plugins/gen-admission-secret helm gen-admission-secret --service integration-admission-service --namespace kube-system echo "Install volcano chart" - helm install installer/chart/volcano --namespace kube-system --name integration --kubeconfig ${KUBECONFIG} --set basic.image_tag_version=${TAG} + helm install deployment/volcano --namespace kube-system --name integration --kubeconfig ${KUBECONFIG} --set basic.image_tag_version=${TAG} echo "Load required image" kind load docker-image ${TEST_BUSYBOX_IMAGE} ${CLUSTER_CONTEXT} diff --git a/hack/update-gencode.sh b/hack/update-gencode.sh index 0aaad3a1b3..465ac691aa 100755 --- a/hack/update-gencode.sh +++ b/hack/update-gencode.sh @@ -29,7 +29,7 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # instead of the $GOPATH directly. For normal projects this can be dropped. ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ volcano.sh/volcano/pkg/client volcano.sh/volcano/pkg/apis \ - "batch:v1alpha1 bus:v1alpha1" \ + "batch:v1alpha1 bus:v1alpha1 scheduling:v1alpha1" \ --go-header-file ${SCRIPT_ROOT}/hack/boilerplate/boilerplate.go.txt # To use your own boilerplate text use: diff --git a/installer/chart/volcano-init/Chart.yaml b/installer/chart/volcano-init/Chart.yaml deleted file mode 100644 index 632d2ceef9..0000000000 --- a/installer/chart/volcano-init/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -name: volcano-init -version: 0.0.1 -description: volcano crds and admission-controller config -apiVersion: v1 diff --git a/installer/chart/volcano-init/templates/batch_v1alpha1_job.yaml b/installer/chart/volcano-init/templates/batch_v1alpha1_job.yaml deleted file mode 100644 index 5b8b509a55..0000000000 --- a/installer/chart/volcano-init/templates/batch_v1alpha1_job.yaml +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: jobs.batch.volcano.sh -spec: - group: batch.volcano.sh - names: - kind: Job - plural: jobs - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of the desired behavior of a cron job, including - the minAvailable - properties: - input: - description: The volume mount for input of Job - properties: - volumeClaim: - description: VolumeClaim defines the PVC used by the VolumeMount. - type: object - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - required: - - mountPath - type: object - minAvailable: - description: The minimal available pods to run for this Job - format: int32 - type: integer - output: - description: The volume mount for output of Job - properties: - volumeClaim: - description: VolumeClaim defines the PVC used by the VolumeMount. - type: object - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - required: - - mountPath - type: object - policies: - description: Specifies the default lifecycle of tasks - items: - properties: - action: - description: The action that will be taken to the PodGroup according - to Event. One of "Restart", "None". Default to None. - type: string - event: - description: The Event recorded by scheduler; the controller takes - actions according to this Event. - type: string - timeout: - description: Timeout is the grace period for controller to take - actions. Default to nil (take action immediately). - type: object - type: object - type: array - schedulerName: - description: SchedulerName is the default value of `tasks.template.spec.schedulerName`. - type: string - tasks: - description: Tasks specifies the task specification of Job - items: - properties: - name: - description: Name specifies the name of tasks - type: string - policies: - description: Specifies the lifecycle of task - items: - properties: - action: - description: The action that will be taken to the PodGroup - according to Event. One of "Restart", "None". Default - to None. - type: string - event: - description: The Event recorded by scheduler; the controller - takes actions according to this Event. - type: string - timeout: - description: Timeout is the grace period for controller - to take actions. Default to nil (take action immediately). - type: object - type: object - type: array - replicas: - description: Replicas specifies the replicas of this TaskSpec - in Job - format: int32 - type: integer - template: - description: Specifies the pod that will be created for this TaskSpec - when executing a Job - type: object - type: object - type: array - type: object - status: - description: Current status of Job - properties: - Succeeded: - description: The number of pods which reached phase Succeeded. - format: int32 - type: integer - failed: - description: The number of pods which reached phase Failed. - format: int32 - type: integer - minAvailable: - description: The minimal available pods to run for this Job - format: int32 - type: integer - pending: - description: The number of pending pods. - format: int32 - type: integer - running: - description: The number of running pods. - format: int32 - type: integer - version: - description: Job's current version. - type: integer - format: int32 - state: - description: Current state of Job. - properties: - message: - description: Human-readable message indicating details about last - transition. - type: string - phase: - description: The phase of Job - type: string - reason: - description: Unique, one-word, CamelCase reason for the condition's - last transition. - type: string - type: object - type: object - version: v1alpha1 - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/installer/chart/volcano-init/templates/bus_v1alpha1_command.yaml b/installer/chart/volcano-init/templates/bus_v1alpha1_command.yaml deleted file mode 100644 index e5b9b451a8..0000000000 --- a/installer/chart/volcano-init/templates/bus_v1alpha1_command.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: commands.bus.volcano.sh -spec: - group: bus.volcano.sh - names: - kind: Command - plural: commands - scope: Namespaced - validation: - openAPIV3Schema: - properties: - action: - description: Action defines the action that will be took to the target object. - type: string - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: Human-readable message indicating details of this command. - type: string - metadata: - type: object - reason: - description: Unique, one-word, CamelCase reason for this command. - type: string - target: - description: TargetObject defines the target object of this command. - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/installer/chart/volcano-init/values.yaml b/installer/chart/volcano-init/values.yaml deleted file mode 100644 index 6c605ea110..0000000000 --- a/installer/chart/volcano-init/values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -basic: - namespace: default \ No newline at end of file diff --git a/installer/chart/volcano/values.yaml b/installer/chart/volcano/values.yaml deleted file mode 100644 index 9baa9b2dda..0000000000 --- a/installer/chart/volcano/values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -basic: - image_tag_version: "1.0" - controller_image_name: "volcano-controllers" - scheduler_image_name: "volcano-scheduler" - admission_image_name: "volcano-admission" - admission_secret_name: "volcano-admission-secret" - ca_bundle: "" - image_pull_secret: "" diff --git a/installer/dockerfile/scheduler/Dockerfile b/installer/dockerfile/scheduler/Dockerfile deleted file mode 100644 index 8b8d6d640d..0000000000 --- a/installer/dockerfile/scheduler/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2019 The Volcano Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FROM alpine:latest - -ADD vk-scheduler /vk-scheduler -ENTRYPOINT ["/vk-scheduler"] diff --git a/pkg/apis/batch/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/batch/v1alpha1/zz_generated.deepcopy.go index e00c862459..9259944ea4 100644 --- a/pkg/apis/batch/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/batch/v1alpha1/zz_generated.deepcopy.go @@ -32,7 +32,7 @@ func (in *Job) DeepCopyInto(out *Job) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -114,6 +114,21 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } return } @@ -147,6 +162,13 @@ func (in *JobState) DeepCopy() *JobState { func (in *JobStatus) DeepCopyInto(out *JobStatus) { *out = *in out.State = in.State + if in.ControlledResources != nil { + in, out := &in.ControlledResources, &out.ControlledResources + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/doc.go b/pkg/apis/scheduling/v1alpha1/doc.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/doc.go rename to pkg/apis/scheduling/v1alpha1/doc.go diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/labels.go b/pkg/apis/scheduling/v1alpha1/labels.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/labels.go rename to pkg/apis/scheduling/v1alpha1/labels.go diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/register.go b/pkg/apis/scheduling/v1alpha1/register.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/register.go rename to pkg/apis/scheduling/v1alpha1/register.go diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/types.go b/pkg/apis/scheduling/v1alpha1/types.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/types.go rename to pkg/apis/scheduling/v1alpha1/types.go diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go similarity index 99% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go rename to pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go index aaaa4e2c21..b5e343f981 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/utils/utils.go b/pkg/apis/utils/utils.go new file mode 100644 index 0000000000..3cbe5bdaf4 --- /dev/null +++ b/pkg/apis/utils/utils.go @@ -0,0 +1,115 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "github.com/golang/glog" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + + vkbatchv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" + vkv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" + vkcorev1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" +) + +var JobKind = vkbatchv1.SchemeGroupVersion.WithKind("Job") +var CommandKind = vkcorev1.SchemeGroupVersion.WithKind("Command") + +func GetController(obj interface{}) types.UID { + accessor, err := meta.Accessor(obj) + if err != nil { + return "" + } + + controllerRef := metav1.GetControllerOf(accessor) + if controllerRef != nil { + return controllerRef.UID + } + + return "" +} + +func ControlledBy(obj interface{}, gvk schema.GroupVersionKind) bool { + accessor, err := meta.Accessor(obj) + if err != nil { + return false + } + + controllerRef := metav1.GetControllerOf(accessor) + if controllerRef != nil { + return controllerRef.Kind == gvk.Kind + } + + return false +} + +func CreateConfigMapIfNotExist(job *vkv1.Job, kubeClients *kubernetes.Clientset, data map[string]string, cmName string) error { + // If ConfigMap does not exist, create one for Job. + if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + glog.V(3).Infof("Failed to get Configmap for Job <%s/%s>: %v", + job.Namespace, job.Name, err) + return err + } + } + + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: job.Namespace, + Name: cmName, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(job, JobKind), + }, + }, + Data: data, + } + + if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Create(cm); err != nil { + glog.V(3).Infof("Failed to create ConfigMap for Job <%s/%s>: %v", + job.Namespace, job.Name, err) + return err + } + + return nil +} + +func DeleteConfigmap(job *vkv1.Job, kubeClients *kubernetes.Clientset, cmName string) error { + if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + glog.V(3).Infof("Failed to get Configmap for Job <%s/%s>: %v", + job.Namespace, job.Name, err) + return err + } else { + return nil + } + } + + if err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Delete(cmName, nil); err != nil { + if !apierrors.IsNotFound(err) { + glog.Errorf("Failed to delete Configmap of Job %v/%v: %v", + job.Namespace, job.Name, err) + return err + } + } + + return nil +} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 2de59112aa..26ed1f6748 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -24,6 +24,7 @@ import ( flowcontrol "k8s.io/client-go/util/flowcontrol" batchv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/batch/v1alpha1" busv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/bus/v1alpha1" + schedulingv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/scheduling/v1alpha1" ) type Interface interface { @@ -34,14 +35,18 @@ type Interface interface { BusV1alpha1() busv1alpha1.BusV1alpha1Interface // Deprecated: please explicitly pick a version if possible. Bus() busv1alpha1.BusV1alpha1Interface + SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - batchV1alpha1 *batchv1alpha1.BatchV1alpha1Client - busV1alpha1 *busv1alpha1.BusV1alpha1Client + batchV1alpha1 *batchv1alpha1.BatchV1alpha1Client + busV1alpha1 *busv1alpha1.BusV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client } // BatchV1alpha1 retrieves the BatchV1alpha1Client @@ -66,6 +71,17 @@ func (c *Clientset) Bus() busv1alpha1.BusV1alpha1Interface { return c.busV1alpha1 } +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// Deprecated: Scheduling retrieves the default version of SchedulingClient. +// Please explicitly pick a version. +func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -90,6 +106,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -104,6 +124,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.batchV1alpha1 = batchv1alpha1.NewForConfigOrDie(c) cs.busV1alpha1 = busv1alpha1.NewForConfigOrDie(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -114,6 +135,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.batchV1alpha1 = batchv1alpha1.New(c) cs.busV1alpha1 = busv1alpha1.New(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 7884394f16..1cef3b8e41 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -29,6 +29,8 @@ import ( fakebatchv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/batch/v1alpha1/fake" busv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/bus/v1alpha1" fakebusv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/bus/v1alpha1/fake" + schedulingv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/scheduling/v1alpha1" + fakeschedulingv1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -92,3 +94,13 @@ func (c *Clientset) BusV1alpha1() busv1alpha1.BusV1alpha1Interface { func (c *Clientset) Bus() busv1alpha1.BusV1alpha1Interface { return &fakebusv1alpha1.FakeBusV1alpha1{Fake: &c.Fake} } + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} +} + +// Scheduling retrieves the SchedulingV1alpha1Client +func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface { + return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} +} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 52f2825082..fd0231a6f5 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" batchv1alpha1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" busv1alpha1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" + schedulingv1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) var scheme = runtime.NewScheme() @@ -34,6 +35,7 @@ var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ batchv1alpha1.AddToScheme, busv1alpha1.AddToScheme, + schedulingv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 8c6f2d3d60..e057701aa8 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" batchv1alpha1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" busv1alpha1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" + schedulingv1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) var Scheme = runtime.NewScheme() @@ -34,6 +35,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ batchv1alpha1.AddToScheme, busv1alpha1.AddToScheme, + schedulingv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go similarity index 94% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go index df51baa4d4..5b5f708b3a 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go similarity index 83% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go index 41721ca52d..8a62b8ba7b 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,5 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -// This package has the automatically generated clientset. -package versioned +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go new file mode 100644 index 0000000000..b9500a8e62 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Volcano Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" +) + +// FakePodGroups implements PodGroupInterface +type FakePodGroups struct { + Fake *FakeSchedulingV1alpha1 + ns string +} + +var podgroupsResource = schema.GroupVersionResource{Group: "scheduling", Version: "v1alpha1", Resource: "podgroups"} + +var podgroupsKind = schema.GroupVersionKind{Group: "scheduling", Version: "v1alpha1", Kind: "PodGroup"} + +// Get takes name of the podGroup, and returns the corresponding podGroup object, and an error if there is any. +func (c *FakePodGroups) Get(name string, options v1.GetOptions) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podgroupsResource, c.ns, name), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// List takes label and field selectors, and returns the list of PodGroups that match those selectors. +func (c *FakePodGroups) List(opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podgroupsResource, podgroupsKind, c.ns, opts), &v1alpha1.PodGroupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PodGroupList{ListMeta: obj.(*v1alpha1.PodGroupList).ListMeta} + for _, item := range obj.(*v1alpha1.PodGroupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podGroups. +func (c *FakePodGroups) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podgroupsResource, c.ns, opts)) + +} + +// Create takes the representation of a podGroup and creates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *FakePodGroups) Create(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podgroupsResource, c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// Update takes the representation of a podGroup and updates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *FakePodGroups) Update(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podgroupsResource, c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodGroups) UpdateStatus(podGroup *v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podgroupsResource, "status", c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// Delete takes name of the podGroup and deletes it. Returns an error if one occurs. +func (c *FakePodGroups) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podgroupsResource, c.ns, name), &v1alpha1.PodGroup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodGroups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podgroupsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PodGroupList{}) + return err +} + +// Patch applies the patch and returns the patched podGroup. +func (c *FakePodGroups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podgroupsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_queue.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_queue.go new file mode 100644 index 0000000000..c8733589df --- /dev/null +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_queue.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Volcano Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" +) + +// FakeQueues implements QueueInterface +type FakeQueues struct { + Fake *FakeSchedulingV1alpha1 +} + +var queuesResource = schema.GroupVersionResource{Group: "scheduling", Version: "v1alpha1", Resource: "queues"} + +var queuesKind = schema.GroupVersionKind{Group: "scheduling", Version: "v1alpha1", Kind: "Queue"} + +// Get takes name of the queue, and returns the corresponding queue object, and an error if there is any. +func (c *FakeQueues) Get(name string, options v1.GetOptions) (result *v1alpha1.Queue, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(queuesResource, name), &v1alpha1.Queue{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Queue), err +} + +// List takes label and field selectors, and returns the list of Queues that match those selectors. +func (c *FakeQueues) List(opts v1.ListOptions) (result *v1alpha1.QueueList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(queuesResource, queuesKind, opts), &v1alpha1.QueueList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.QueueList{ListMeta: obj.(*v1alpha1.QueueList).ListMeta} + for _, item := range obj.(*v1alpha1.QueueList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested queues. +func (c *FakeQueues) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(queuesResource, opts)) +} + +// Create takes the representation of a queue and creates it. Returns the server's representation of the queue, and an error, if there is any. +func (c *FakeQueues) Create(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(queuesResource, queue), &v1alpha1.Queue{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Queue), err +} + +// Update takes the representation of a queue and updates it. Returns the server's representation of the queue, and an error, if there is any. +func (c *FakeQueues) Update(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(queuesResource, queue), &v1alpha1.Queue{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Queue), err +} + +// Delete takes name of the queue and deletes it. Returns an error if one occurs. +func (c *FakeQueues) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(queuesResource, name), &v1alpha1.Queue{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeQueues) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(queuesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.QueueList{}) + return err +} + +// Patch applies the patch and returns the patched queue. +func (c *FakeQueues) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Queue, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(queuesResource, name, pt, data, subresources...), &v1alpha1.Queue{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Queue), err +} diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go new file mode 100644 index 0000000000..6e9351028b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Volcano Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "volcano.sh/volcano/pkg/client/clientset/versioned/typed/scheduling/v1alpha1" +) + +type FakeSchedulingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSchedulingV1alpha1) PodGroups(namespace string) v1alpha1.PodGroupInterface { + return &FakePodGroups{c, namespace} +} + +func (c *FakeSchedulingV1alpha1) Queues() v1alpha1.QueueInterface { + return &FakeQueues{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go similarity index 94% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go index f195814928..5c22c074ee 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go similarity index 89% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go index ea0cb3628c..6f95ca01ef 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + scheme "volcano.sh/volcano/pkg/client/clientset/versioned/scheme" ) // PodGroupsGetter has a method to return a PodGroupInterface. @@ -76,11 +78,16 @@ func (c *podGroups) Get(name string, options v1.GetOptions) (result *v1alpha1.Po // List takes label and field selectors, and returns the list of PodGroups that match those selectors. func (c *podGroups) List(opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodGroupList{} err = c.client.Get(). Namespace(c.ns). Resource("podgroups"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podGroups) List(opts v1.ListOptions) (result *v1alpha1.PodGroupList, er // Watch returns a watch.Interface that watches the requested podGroups. func (c *podGroups) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podgroups"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podGroups) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podGroups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podgroups"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go similarity index 87% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go index 77b1b40aeb..1b40817dfd 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + scheme "volcano.sh/volcano/pkg/client/clientset/versioned/scheme" ) // QueuesGetter has a method to return a QueueInterface. @@ -72,10 +74,15 @@ func (c *queues) Get(name string, options v1.GetOptions) (result *v1alpha1.Queue // List takes label and field selectors, and returns the list of Queues that match those selectors. func (c *queues) List(opts v1.ListOptions) (result *v1alpha1.QueueList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.QueueList{} err = c.client.Get(). Resource("queues"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *queues) List(opts v1.ListOptions) (result *v1alpha1.QueueList, err erro // Watch returns a watch.Interface that watches the requested queues. func (c *queues) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("queues"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *queues) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *queues) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("queues"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go similarity index 93% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go rename to pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go index d57f01217c..a455a668d8 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/client/clientset/versioned/scheme" ) type SchedulingV1alpha1Interface interface { diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index da45747f4a..a164ec6a21 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -31,6 +31,7 @@ import ( batch "volcano.sh/volcano/pkg/client/informers/externalversions/batch" bus "volcano.sh/volcano/pkg/client/informers/externalversions/bus" internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces" + scheduling "volcano.sh/volcano/pkg/client/informers/externalversions/scheduling" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. @@ -175,6 +176,7 @@ type SharedInformerFactory interface { Batch() batch.Interface Bus() bus.Interface + Scheduling() scheduling.Interface } func (f *sharedInformerFactory) Batch() batch.Interface { @@ -184,3 +186,7 @@ func (f *sharedInformerFactory) Batch() batch.Interface { func (f *sharedInformerFactory) Bus() bus.Interface { return bus.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) Scheduling() scheduling.Interface { + return scheduling.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index d2505e1822..39da2c70c0 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -25,6 +25,7 @@ import ( cache "k8s.io/client-go/tools/cache" v1alpha1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" busv1alpha1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" + schedulingv1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -61,6 +62,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case busv1alpha1.SchemeGroupVersion.WithResource("commands"): return &genericInformer{resource: resource.GroupResource(), informer: f.Bus().V1alpha1().Commands().Informer()}, nil + // Group=scheduling, Version=v1alpha1 + case schedulingv1alpha1.SchemeGroupVersion.WithResource("podgroups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PodGroups().Informer()}, nil + case schedulingv1alpha1.SchemeGroupVersion.WithResource("queues"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Queues().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/interface.go b/pkg/client/informers/externalversions/scheduling/interface.go similarity index 84% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/interface.go rename to pkg/client/informers/externalversions/scheduling/interface.go index 0a73048e1d..f0e32d2dda 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/interface.go +++ b/pkg/client/informers/externalversions/scheduling/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ limitations under the License. package scheduling import ( - internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1" + internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "volcano.sh/volcano/pkg/client/informers/externalversions/scheduling/v1alpha1" ) // Interface provides access to each of this group's versions. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go b/pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go similarity index 91% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go rename to pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go index 0194ac7824..cd2c952019 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/scheduling/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go b/pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go similarity index 85% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go rename to pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go index 5b967af45b..9e0179eaec 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go +++ b/pkg/client/informers/externalversions/scheduling/v1alpha1/podgroup.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,14 +21,14 @@ package v1alpha1 import ( time "time" - scheduling_v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" + schedulingv1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + versioned "volcano.sh/volcano/pkg/client/clientset/versioned" + internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "volcano.sh/volcano/pkg/client/listers/scheduling/v1alpha1" ) // PodGroupInformer provides access to a shared informer and lister for @@ -70,7 +70,7 @@ func NewFilteredPodGroupInformer(client versioned.Interface, namespace string, r return client.SchedulingV1alpha1().PodGroups(namespace).Watch(options) }, }, - &scheduling_v1alpha1.PodGroup{}, + &schedulingv1alpha1.PodGroup{}, resyncPeriod, indexers, ) @@ -81,7 +81,7 @@ func (f *podGroupInformer) defaultInformer(client versioned.Interface, resyncPer } func (f *podGroupInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&scheduling_v1alpha1.PodGroup{}, f.defaultInformer) + return f.factory.InformerFor(&schedulingv1alpha1.PodGroup{}, f.defaultInformer) } func (f *podGroupInformer) Lister() v1alpha1.PodGroupLister { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go b/pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go similarity index 85% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go rename to pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go index d23433f52a..92a42d7403 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go +++ b/pkg/client/informers/externalversions/scheduling/v1alpha1/queue.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,14 +21,14 @@ package v1alpha1 import ( time "time" - scheduling_v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" + schedulingv1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + versioned "volcano.sh/volcano/pkg/client/clientset/versioned" + internalinterfaces "volcano.sh/volcano/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "volcano.sh/volcano/pkg/client/listers/scheduling/v1alpha1" ) // QueueInformer provides access to a shared informer and lister for @@ -69,7 +69,7 @@ func NewFilteredQueueInformer(client versioned.Interface, resyncPeriod time.Dura return client.SchedulingV1alpha1().Queues().Watch(options) }, }, - &scheduling_v1alpha1.Queue{}, + &schedulingv1alpha1.Queue{}, resyncPeriod, indexers, ) @@ -80,7 +80,7 @@ func (f *queueInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *queueInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&scheduling_v1alpha1.Queue{}, f.defaultInformer) + return f.factory.InformerFor(&schedulingv1alpha1.Queue{}, f.defaultInformer) } func (f *queueInformer) Lister() v1alpha1.QueueLister { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/expansion_generated.go b/pkg/client/listers/scheduling/v1alpha1/expansion_generated.go similarity index 96% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/expansion_generated.go rename to pkg/client/listers/scheduling/v1alpha1/expansion_generated.go index c8f5713cfe..560863a750 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/scheduling/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/podgroup.go b/pkg/client/listers/scheduling/v1alpha1/podgroup.go similarity index 96% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/podgroup.go rename to pkg/client/listers/scheduling/v1alpha1/podgroup.go index 28ad72a068..f9cbba1046 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/podgroup.go +++ b/pkg/client/listers/scheduling/v1alpha1/podgroup.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) // PodGroupLister helps list PodGroups. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/queue.go b/pkg/client/listers/scheduling/v1alpha1/queue.go similarity index 94% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/queue.go rename to pkg/client/listers/scheduling/v1alpha1/queue.go index 7bdbdf91ca..0b5674e26b 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1/queue.go +++ b/pkg/client/listers/scheduling/v1alpha1/queue.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" + v1alpha1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) // QueueLister helps list Queues. diff --git a/pkg/controllers/job/job_controller.go b/pkg/controllers/job/job_controller.go index ca6e8e4377..8527f9e690 100644 --- a/pkg/controllers/job/job_controller.go +++ b/pkg/controllers/job/job_controller.go @@ -34,10 +34,10 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - kbinfoext "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions" - kbinfo "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1" - kblister "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1" + kbver "volcano.sh/volcano/pkg/client/clientset/versioned" + kbinfoext "volcano.sh/volcano/pkg/client/informers/externalversions" + kbinfo "volcano.sh/volcano/pkg/client/informers/externalversions/scheduling/v1alpha1" + kblister "volcano.sh/volcano/pkg/client/listers/scheduling/v1alpha1" v1corev1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" "volcano.sh/volcano/pkg/apis/helpers" diff --git a/pkg/controllers/job/job_controller_actions.go b/pkg/controllers/job/job_controller_actions.go index c8022f9edf..e80b32e3e1 100644 --- a/pkg/controllers/job/job_controller_actions.go +++ b/pkg/controllers/job/job_controller_actions.go @@ -21,7 +21,7 @@ import ( "sync" "github.com/golang/glog" - kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + kbv1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/controllers/job/job_controller_handler.go b/pkg/controllers/job/job_controller_handler.go index 87f6718206..efb97c8157 100644 --- a/pkg/controllers/job/job_controller_handler.go +++ b/pkg/controllers/job/job_controller_handler.go @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" - kbtype "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + kbtype "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" vkbatchv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" vkbusv1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" "volcano.sh/volcano/pkg/controllers/job/apis" diff --git a/pkg/controllers/job/job_controller_util.go b/pkg/controllers/job/job_controller_util.go index 927ee180e4..e80a0a5a96 100644 --- a/pkg/controllers/job/job_controller_util.go +++ b/pkg/controllers/job/job_controller_util.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kbapi "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + kbapi "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" admissioncontroller "volcano.sh/volcano/pkg/admission" vkv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" diff --git a/pkg/scheduler/README.md b/pkg/scheduler/README.md deleted file mode 100644 index 2a7bb58590..0000000000 --- a/pkg/scheduler/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## Volcano Scheduler - -Currently, Volcano scheduler duplicated actions/plugins from [kube-batch](https://github.com/kubernetes-sigs/kube-batch) -which focus on batch workload in Kubernetes. Expect the framework part of kube-batch can be migrated to the upstream. diff --git a/pkg/scheduler/actions/allocate/allocate.go b/pkg/scheduler/actions/allocate/allocate.go index e6cb053469..631be567db 100644 --- a/pkg/scheduler/actions/allocate/allocate.go +++ b/pkg/scheduler/actions/allocate/allocate.go @@ -17,11 +17,12 @@ limitations under the License. package allocate import ( - "github.com/golang/glog" + "fmt" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "github.com/golang/glog" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/util" ) @@ -47,12 +48,16 @@ func (alloc *allocateAction) Execute(ssn *framework.Session) { jobsMap := map[api.QueueID]*util.PriorityQueue{} for _, job := range ssn.Jobs { - if _, found := jobsMap[job.Queue]; !found { - jobsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) - } - if queue, found := ssn.Queues[job.Queue]; found { queues.Push(queue) + } else { + glog.Warningf("Skip adding Job <%s/%s> because its queue %s is not found", + job.Namespace, job.Name, job.Queue) + continue + } + + if _, found := jobsMap[job.Queue]; !found { + jobsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) } glog.V(4).Infof("Added Job <%s/%s> into Queue <%s>", job.Namespace, job.Name, job.Queue) @@ -63,6 +68,24 @@ func (alloc *allocateAction) Execute(ssn *framework.Session) { pendingTasks := map[api.JobID]*util.PriorityQueue{} + allNodes := util.GetNodeList(ssn.Nodes) + + predicateFn := func(task *api.TaskInfo, node *api.NodeInfo) error { + // Check for Resource Predicate + // TODO: We could not allocate resource to task from both node.Idle and node.Releasing now, + // after it is done, we could change the following compare to: + // clonedNode := node.Idle.Clone() + // if !task.InitResreq.LessEqual(clonedNode.Add(node.Releasing)) { + // ... + // } + if !task.InitResreq.LessEqual(node.Idle) && !task.InitResreq.LessEqual(node.Releasing) { + return fmt.Errorf("task <%s/%s> ResourceFit failed on node <%s>", + task.Namespace, task.Name, node.Name) + } + + return ssn.PredicateFn(task, node) + } + for { if queues.Empty() { break @@ -104,11 +127,7 @@ func (alloc *allocateAction) Execute(ssn *framework.Session) { tasks.Len(), job.Namespace, job.Name) for !tasks.Empty() { - predicateNodes := []*api.NodeInfo{} - nodeScores := map[int][]*api.NodeInfo{} - task := tasks.Pop().(*api.TaskInfo) - assigned := false glog.V(3).Infof("There are <%d> nodes for Job <%v/%v>", len(ssn.Nodes), job.Namespace, job.Name) @@ -120,67 +139,41 @@ func (alloc *allocateAction) Execute(ssn *framework.Session) { if len(job.NodesFitDelta) > 0 { job.NodesFitDelta = make(api.NodeResourceMap) } - for _, node := range ssn.Nodes { - glog.V(3).Infof("Considering Task <%v/%v> on node <%v>: <%v> vs. <%v>", - task.Namespace, task.Name, node.Name, task.Resreq, node.Idle) - - // TODO (k82cn): Enable eCache for performance improvement. - if err := ssn.PredicateFn(task, node); err != nil { - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v", - task.Namespace, task.Name, node.Name, err) - continue - } else { - predicateNodes = append(predicateNodes, node) - } - } - for _, node := range predicateNodes { - score, err := ssn.NodeOrderFn(task, node) - if err != nil { - glog.V(3).Infof("Error in Calculating Priority for the node:%v", err) - } else { - nodeScores[score] = append(nodeScores[score], node) - } + + predicateNodes := util.PredicateNodes(task, allNodes, predicateFn) + if len(predicateNodes) == 0 { + break } - selectedNodes := util.SelectBestNode(nodeScores) - for _, node := range selectedNodes { - // Allocate idle resource to the task. - if task.Resreq.LessEqual(node.Idle) { - glog.V(3).Infof("Binding Task <%v/%v> to node <%v>", - task.Namespace, task.Name, node.Name) - if err := ssn.Allocate(task, node.Name); err != nil { - glog.Errorf("Failed to bind Task %v on %v in Session %v", - task.UID, node.Name, ssn.UID) - continue - } - assigned = true - break - } else { - //store information about missing resources - job.NodesFitDelta[node.Name] = node.Idle.Clone() - job.NodesFitDelta[node.Name].FitDelta(task.Resreq) - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s> with limited resources", - task.Namespace, task.Name, node.Name) + + nodeScores := util.PrioritizeNodes(task, predicateNodes, ssn.NodeOrderFn) + + node := util.SelectBestNode(nodeScores) + // Allocate idle resource to the task. + if task.InitResreq.LessEqual(node.Idle) { + glog.V(3).Infof("Binding Task <%v/%v> to node <%v>", + task.Namespace, task.Name, node.Name) + if err := ssn.Allocate(task, node.Name); err != nil { + glog.Errorf("Failed to bind Task %v on %v in Session %v, err: %v", + task.UID, node.Name, ssn.UID, err) } + } else { + //store information about missing resources + job.NodesFitDelta[node.Name] = node.Idle.Clone() + job.NodesFitDelta[node.Name].FitDelta(task.InitResreq) + glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s> with limited resources", + task.Namespace, task.Name, node.Name) // Allocate releasing resource to the task if any. - if task.Resreq.LessEqual(node.Releasing) { + if task.InitResreq.LessEqual(node.Releasing) { glog.V(3).Infof("Pipelining Task <%v/%v> to node <%v> for <%v> on <%v>", - task.Namespace, task.Name, node.Name, task.Resreq, node.Releasing) + task.Namespace, task.Name, node.Name, task.InitResreq, node.Releasing) if err := ssn.Pipeline(task, node.Name); err != nil { glog.Errorf("Failed to pipeline Task %v on %v in Session %v", task.UID, node.Name, ssn.UID) - continue } - - assigned = true - break } } - if !assigned { - break - } - if ssn.JobReady(job) { jobs.Push(job) break diff --git a/pkg/scheduler/actions/allocate/allocate_test.go b/pkg/scheduler/actions/allocate/allocate_test.go index a1b3304b64..5beb07b640 100644 --- a/pkg/scheduler/actions/allocate/allocate_test.go +++ b/pkg/scheduler/actions/allocate/allocate_test.go @@ -30,12 +30,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - + kbv1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/cache" + "volcano.sh/volcano/pkg/scheduler/conf" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/plugins/drf" "volcano.sh/volcano/pkg/scheduler/plugins/proportion" ) @@ -279,14 +278,19 @@ func TestAllocate(t *testing.T) { schedulerCache.AddQueue(q) } + trueValue := true ssn := framework.OpenSession(schedulerCache, []conf.Tier{ { Plugins: []conf.PluginOption{ { - Name: "drf", + Name: "drf", + EnabledPreemptable: &trueValue, + EnabledJobOrder: &trueValue, }, { - Name: "proportion", + Name: "proportion", + EnabledQueueOrder: &trueValue, + EnabledReclaimable: &trueValue, }, }, }, diff --git a/pkg/scheduler/actions/backfill/backfill.go b/pkg/scheduler/actions/backfill/backfill.go index 85aa7ce319..f56002e657 100644 --- a/pkg/scheduler/actions/backfill/backfill.go +++ b/pkg/scheduler/actions/backfill/backfill.go @@ -19,8 +19,8 @@ package backfill import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" ) type backfillAction struct { @@ -44,7 +44,7 @@ func (alloc *backfillAction) Execute(ssn *framework.Session) { // TODO (k82cn): When backfill, it's also need to balance between Queues. for _, job := range ssn.Jobs { for _, task := range job.TaskStatusIndex[api.Pending] { - if task.Resreq.IsEmpty() { + if task.InitResreq.IsEmpty() { // As task did not request resources, so it only need to meet predicates. // TODO (k82cn): need to prioritize nodes to avoid pod hole. for _, node := range ssn.Nodes { diff --git a/pkg/scheduler/actions/factory.go b/pkg/scheduler/actions/factory.go index cbc8439fdf..f77db314ad 100644 --- a/pkg/scheduler/actions/factory.go +++ b/pkg/scheduler/actions/factory.go @@ -17,7 +17,7 @@ limitations under the License. package actions import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/actions/allocate" "volcano.sh/volcano/pkg/scheduler/actions/backfill" diff --git a/pkg/scheduler/actions/preempt/preempt.go b/pkg/scheduler/actions/preempt/preempt.go index b408f61aae..b7a0ea5327 100644 --- a/pkg/scheduler/actions/preempt/preempt.go +++ b/pkg/scheduler/actions/preempt/preempt.go @@ -21,10 +21,9 @@ import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" - + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/metrics" "volcano.sh/volcano/pkg/scheduler/util" ) @@ -115,15 +114,15 @@ func (alloc *preemptAction) Execute(ssn *framework.Session) { assigned = true } - // If job not ready, keep preempting - if ssn.JobReady(preemptorJob) { + // If job is not pipelined, keep preempting + if ssn.JobPipelined(preemptorJob) { stmt.Commit() break } } - // If job not ready after try all tasks, next job. - if !ssn.JobReady(preemptorJob) { + // If job is not pipelined after try all tasks, next job. + if !ssn.JobPipelined(preemptorJob) { stmt.Discard() continue } @@ -176,35 +175,22 @@ func preempt( nodes map[string]*api.NodeInfo, filter func(*api.TaskInfo) bool, ) (bool, error) { - predicateNodes := []*api.NodeInfo{} - nodeScores := map[int][]*api.NodeInfo{} assigned := false - for _, node := range nodes { - if err := ssn.PredicateFn(preemptor, node); err != nil { - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v", - preemptor.Namespace, preemptor.Name, node.Name, err) - continue - } else { - predicateNodes = append(predicateNodes, node) - } - } - for _, node := range predicateNodes { - score, err := ssn.NodeOrderFn(preemptor, node) - if err != nil { - glog.V(3).Infof("Error in Calculating Priority for the node:%v", err) - } else { - nodeScores[score] = append(nodeScores[score], node) - } - } - selectedNodes := util.SelectBestNode(nodeScores) + allNodes := util.GetNodeList(nodes) + + predicateNodes := util.PredicateNodes(preemptor, allNodes, ssn.PredicateFn) + + nodeScores := util.PrioritizeNodes(preemptor, predicateNodes, ssn.NodeOrderFn) + + selectedNodes := util.SortNodes(nodeScores) for _, node := range selectedNodes { glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.", preemptor.Namespace, preemptor.Name, node.Name) var preemptees []*api.TaskInfo preempted := api.EmptyResource() - resreq := preemptor.Resreq.Clone() + resreq := preemptor.InitResreq.Clone() for _, task := range node.Tasks { if filter == nil { @@ -221,8 +207,15 @@ func preempt( continue } - // Preempt victims for tasks. - for _, preemptee := range victims { + victimsQueue := util.NewPriorityQueue(func(l, r interface{}) bool { + return !ssn.TaskOrderFn(l, r) + }) + for _, victim := range victims { + victimsQueue.Push(victim) + } + // Preempt victims for tasks, pick lowest priority task first. + for !victimsQueue.Empty() { + preemptee := victimsQueue.Pop().(*api.TaskInfo) glog.Errorf("Try to preempt Task <%s/%s> for Tasks <%s/%s>", preemptee.Namespace, preemptee.Name, preemptor.Namespace, preemptor.Name) if err := stmt.Evict(preemptee, "preempt"); err != nil { @@ -239,9 +232,9 @@ func preempt( metrics.RegisterPreemptionAttempts() glog.V(3).Infof("Preempted <%v> for task <%s/%s> requested <%v>.", - preempted, preemptor.Namespace, preemptor.Name, preemptor.Resreq) + preempted, preemptor.Namespace, preemptor.Name, preemptor.InitResreq) - if preemptor.Resreq.LessEqual(preempted) { + if preemptor.InitResreq.LessEqual(preempted) { if err := stmt.Pipeline(preemptor, node.Name); err != nil { glog.Errorf("Failed to pipline Task <%s/%s> on Node <%s>", preemptor.Namespace, preemptor.Name, node.Name) diff --git a/pkg/scheduler/actions/preempt/preempt_test.go b/pkg/scheduler/actions/preempt/preempt_test.go index f6b677ac94..d313ae1bfa 100644 --- a/pkg/scheduler/actions/preempt/preempt_test.go +++ b/pkg/scheduler/actions/preempt/preempt_test.go @@ -19,7 +19,7 @@ package preempt import ( "testing" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/plugins/drf" ) diff --git a/pkg/scheduler/actions/reclaim/reclaim.go b/pkg/scheduler/actions/reclaim/reclaim.go index 68e96c8f98..1c6b0a3f10 100644 --- a/pkg/scheduler/actions/reclaim/reclaim.go +++ b/pkg/scheduler/actions/reclaim/reclaim.go @@ -19,9 +19,8 @@ package reclaim import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/util" ) @@ -111,14 +110,13 @@ func (alloc *reclaimAction) Execute(ssn *framework.Session) { } assigned := false - for _, n := range ssn.Nodes { // If predicates failed, next node. if err := ssn.PredicateFn(task, n); err != nil { continue } - resreq := task.Resreq.Clone() + resreq := task.InitResreq.Clone() reclaimed := api.EmptyResource() glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.", @@ -172,11 +170,11 @@ func (alloc *reclaimAction) Execute(ssn *framework.Session) { } glog.V(3).Infof("Reclaimed <%v> for task <%s/%s> requested <%v>.", - reclaimed, task.Namespace, task.Name, task.Resreq) + reclaimed, task.Namespace, task.Name, task.InitResreq) - if task.Resreq.LessEqual(reclaimed) { + if task.InitResreq.LessEqual(reclaimed) { if err := ssn.Pipeline(task, n.Name); err != nil { - glog.Errorf("Failed to pipline Task <%s/%s> on Node <%s>", + glog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>", task.Namespace, task.Name, n.Name) } diff --git a/pkg/scheduler/algorithm/factory.go b/pkg/scheduler/algorithm/factory.go deleted file mode 100644 index b68b108524..0000000000 --- a/pkg/scheduler/algorithm/factory.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2018 The Vulcan Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package algorithm - -import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - // Import default actions/plugins. - _ "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions" - _ "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins" - - "volcano.sh/volcano/pkg/scheduler/algorithm/fairshare" -) - -func init() { - framework.RegisterPluginBuilder("fairshare", fairshare.New) -} diff --git a/pkg/scheduler/algorithm/fairshare/fairshare.go b/pkg/scheduler/algorithm/fairshare/fairshare.go deleted file mode 100644 index f9a566b4d8..0000000000 --- a/pkg/scheduler/algorithm/fairshare/fairshare.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Volcano Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fairshare - -import "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - -type fairsharePlugin struct { -} - -func New() framework.Plugin { - return &fairsharePlugin{} -} - -func (dp *fairsharePlugin) Name() string { - return "fairshare" -} - -func (dp *fairsharePlugin) OnSessionOpen(ssn *framework.Session) { - -} - -func (dp *fairsharePlugin) OnSessionClose(ssn *framework.Session) { - -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/cluster_info.go b/pkg/scheduler/api/cluster_info.go similarity index 99% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/cluster_info.go rename to pkg/scheduler/api/cluster_info.go index 8264f7a7cc..40f9b9f6bc 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/cluster_info.go +++ b/pkg/scheduler/api/cluster_info.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package api import "fmt" diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers.go b/pkg/scheduler/api/helpers.go similarity index 87% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers.go rename to pkg/scheduler/api/helpers.go index 90a6eec8c4..f85f57472a 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers.go +++ b/pkg/scheduler/api/helpers.go @@ -25,11 +25,11 @@ import ( // PodKey returns the string key of a pod. func PodKey(pod *v1.Pod) TaskID { - if key, err := clientcache.MetaNamespaceKeyFunc(pod); err != nil { + key, err := clientcache.MetaNamespaceKeyFunc(pod) + if err != nil { return TaskID(fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)) - } else { - return TaskID(key) } + return TaskID(key) } func getTaskStatus(pod *v1.Pod) TaskStatus { @@ -60,6 +60,7 @@ func getTaskStatus(pod *v1.Pod) TaskStatus { return Unknown } +// AllocatedStatus checks whether the tasks has AllocatedStatus func AllocatedStatus(status TaskStatus) bool { switch status { case Bound, Binding, Running, Allocated: @@ -69,6 +70,7 @@ func AllocatedStatus(status TaskStatus) bool { } } +// MergeErrors is used to merge multiple errors into single error func MergeErrors(errs ...error) error { msg := "errors: " @@ -96,7 +98,7 @@ func MergeErrors(errs ...error) error { return nil } -// JobTerminated checkes whether job was terminated. +// JobTerminated checks whether job was terminated. func JobTerminated(job *JobInfo) bool { return job.PodGroup == nil && job.PDB == nil && diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers/helpers.go b/pkg/scheduler/api/helpers/helpers.go similarity index 88% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers/helpers.go rename to pkg/scheduler/api/helpers/helpers.go index 680a5f8efd..1dfe4dfca1 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers/helpers.go +++ b/pkg/scheduler/api/helpers/helpers.go @@ -19,9 +19,10 @@ package helpers import ( "math" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api" ) +// Min is used to find the min of two resource types func Min(l, r *api.Resource) *api.Resource { res := &api.Resource{} @@ -32,6 +33,7 @@ func Min(l, r *api.Resource) *api.Resource { return res } +// Share is used to determine the share func Share(l, r float64) float64 { var share float64 if r == 0 { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/job_info.go b/pkg/scheduler/api/job_info.go similarity index 71% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/job_info.go rename to pkg/scheduler/api/job_info.go index 297bf9df40..b8278e8328 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/job_info.go +++ b/pkg/scheduler/api/job_info.go @@ -26,11 +26,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) +// TaskID is UID type for Task type TaskID types.UID +// TaskInfo will have all infos about the task type TaskInfo struct { UID TaskID Job JobID @@ -38,7 +40,10 @@ type TaskInfo struct { Name string Namespace string + // Resreq is the resource that used when task running. Resreq *Resource + // InitResreq is the resource that used to launch a task. + InitResreq *Resource NodeName string Status TaskStatus @@ -60,26 +65,24 @@ func getJobID(pod *v1.Pod) JobID { return "" } +// NewTaskInfo creates new taskInfo object for a Pod func NewTaskInfo(pod *v1.Pod) *TaskInfo { - req := EmptyResource() - - // TODO(k82cn): also includes initContainers' resource. - for _, c := range pod.Spec.Containers { - req.Add(NewResource(c.Resources.Requests)) - } + req := GetPodResourceWithoutInitContainers(pod) + initResreq := GetPodResourceRequest(pod) jobID := getJobID(pod) ti := &TaskInfo{ - UID: TaskID(pod.UID), - Job: jobID, - Name: pod.Name, - Namespace: pod.Namespace, - NodeName: pod.Spec.NodeName, - Status: getTaskStatus(pod), - Priority: 1, - Pod: pod, - Resreq: req, + UID: TaskID(pod.UID), + Job: jobID, + Name: pod.Name, + Namespace: pod.Namespace, + NodeName: pod.Spec.NodeName, + Status: getTaskStatus(pod), + Priority: 1, + Pod: pod, + Resreq: req, + InitResreq: initResreq, } if pod.Spec.Priority != nil { @@ -89,6 +92,7 @@ func NewTaskInfo(pod *v1.Pod) *TaskInfo { return ti } +// Clone is used for cloning a task func (ti *TaskInfo) Clone() *TaskInfo { return &TaskInfo{ UID: ti.UID, @@ -100,10 +104,12 @@ func (ti *TaskInfo) Clone() *TaskInfo { Priority: ti.Priority, Pod: ti.Pod, Resreq: ti.Resreq.Clone(), + InitResreq: ti.InitResreq.Clone(), VolumeReady: ti.VolumeReady, } } +// String returns the taskInfo details in a string func (ti TaskInfo) String() string { return fmt.Sprintf("Task (%v:%v/%v): job %v, status %v, pri %v, resreq %v", ti.UID, ti.Namespace, ti.Name, ti.Job, ti.Status, ti.Priority, ti.Resreq) @@ -114,8 +120,10 @@ type JobID types.UID type tasksMap map[TaskID]*TaskInfo +// NodeResourceMap stores resource in a node type NodeResourceMap map[string]*Resource +// JobInfo will have all info of a Job type JobInfo struct { UID JobID @@ -141,10 +149,11 @@ type JobInfo struct { CreationTimestamp metav1.Time PodGroup *v1alpha1.PodGroup - // TODO(k82cn): keep backward compatbility, removed it when v1alpha1 finalized. + // TODO(k82cn): keep backward compatibility, removed it when v1alpha1 finalized. PDB *policyv1.PodDisruptionBudget } +// NewJobInfo creates a new jobInfo for set of tasks func NewJobInfo(uid JobID, tasks ...*TaskInfo) *JobInfo { job := &JobInfo{ UID: uid, @@ -166,10 +175,12 @@ func NewJobInfo(uid JobID, tasks ...*TaskInfo) *JobInfo { return job } +// UnsetPodGroup removes podGroup details from a job func (ji *JobInfo) UnsetPodGroup() { ji.PodGroup = nil } +// SetPodGroup sets podGroup details to a job func (ji *JobInfo) SetPodGroup(pg *v1alpha1.PodGroup) { ji.Name = pg.Name ji.Namespace = pg.Namespace @@ -180,6 +191,7 @@ func (ji *JobInfo) SetPodGroup(pg *v1alpha1.PodGroup) { ji.PodGroup = pg } +// SetPDB sets PDB to a job func (ji *JobInfo) SetPDB(pdb *policyv1.PodDisruptionBudget) { ji.Name = pdb.Name ji.MinAvailable = pdb.Spec.MinAvailable.IntVal @@ -189,10 +201,12 @@ func (ji *JobInfo) SetPDB(pdb *policyv1.PodDisruptionBudget) { ji.PDB = pdb } +// UnsetPDB removes PDB info of a job func (ji *JobInfo) UnsetPDB() { ji.PDB = nil } +// GetTasks gets all tasks with the taskStatus func (ji *JobInfo) GetTasks(statuses ...TaskStatus) []*TaskInfo { var res []*TaskInfo @@ -215,6 +229,7 @@ func (ji *JobInfo) addTaskIndex(ti *TaskInfo) { ji.TaskStatusIndex[ti.Status][ti.UID] = ti } +// AddTaskInfo is used to add a task to a job func (ji *JobInfo) AddTaskInfo(ti *TaskInfo) { ji.Tasks[ti.UID] = ti ji.addTaskIndex(ti) @@ -226,6 +241,7 @@ func (ji *JobInfo) AddTaskInfo(ti *TaskInfo) { } } +// UpdateTaskStatus is used to update task's status in a job func (ji *JobInfo) UpdateTaskStatus(task *TaskInfo, status TaskStatus) error { if err := validateStatusUpdate(task.Status, status); err != nil { return err @@ -251,6 +267,7 @@ func (ji *JobInfo) deleteTaskIndex(ti *TaskInfo) { } } +// DeleteTaskInfo is used to delete a task from a job func (ji *JobInfo) DeleteTaskInfo(ti *TaskInfo) error { if task, found := ji.Tasks[ti.UID]; found { ji.TotalRequest.Sub(task.Resreq) @@ -269,6 +286,7 @@ func (ji *JobInfo) DeleteTaskInfo(ti *TaskInfo) error { ti.Namespace, ti.Name, ji.Namespace, ji.Name) } +// Clone is used to clone a jobInfo object func (ji *JobInfo) Clone() *JobInfo { info := &JobInfo{ UID: ji.UID, @@ -303,6 +321,7 @@ func (ji *JobInfo) Clone() *JobInfo { return info } +// String returns a jobInfo object in string format func (ji JobInfo) String() string { res := "" @@ -316,7 +335,7 @@ func (ji JobInfo) String() string { ji.UID, ji.Namespace, ji.Queue, ji.Name, ji.MinAvailable, ji.PodGroup) + res } -// Error returns detailed information on why a job's task failed to fit on +// FitError returns detailed information on why a job's task failed to fit on // each available node func (ji *JobInfo) FitError() string { if len(ji.NodesFitDelta) == 0 { @@ -348,3 +367,57 @@ func (ji *JobInfo) FitError() string { reasonMsg := fmt.Sprintf("0/%v nodes are available, %v.", len(ji.NodesFitDelta), strings.Join(sortReasonsHistogram(), ", ")) return reasonMsg } + +// ReadyTaskNum returns the number of tasks that are ready. +func (ji *JobInfo) ReadyTaskNum() int32 { + occupid := 0 + for status, tasks := range ji.TaskStatusIndex { + if AllocatedStatus(status) || + status == Succeeded { + occupid = occupid + len(tasks) + } + } + + return int32(occupid) +} + +// WaitingTaskNum returns the number of tasks that are pipelined. +func (ji *JobInfo) WaitingTaskNum() int32 { + occupid := 0 + for status, tasks := range ji.TaskStatusIndex { + if status == Pipelined { + occupid = occupid + len(tasks) + } + } + + return int32(occupid) +} + +// ValidTaskNum returns the number of tasks that are valid. +func (ji *JobInfo) ValidTaskNum() int32 { + occupied := 0 + for status, tasks := range ji.TaskStatusIndex { + if AllocatedStatus(status) || + status == Succeeded || + status == Pipelined || + status == Pending { + occupied = occupied + len(tasks) + } + } + + return int32(occupied) +} + +// Ready returns whether job is ready for run +func (ji *JobInfo) Ready() bool { + occupied := ji.ReadyTaskNum() + + return occupied >= ji.MinAvailable +} + +// Pipelined returns whether the number of ready and pipelined task is enough +func (ji *JobInfo) Pipelined() bool { + occupied := ji.WaitingTaskNum() + ji.ReadyTaskNum() + + return occupied >= ji.MinAvailable +} diff --git a/pkg/scheduler/api/job_info_test.go b/pkg/scheduler/api/job_info_test.go new file mode 100644 index 0000000000..cc397e9d23 --- /dev/null +++ b/pkg/scheduler/api/job_info_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func jobInfoEqual(l, r *JobInfo) bool { + if !reflect.DeepEqual(l, r) { + return false + } + + return true +} + +func TestAddTaskInfo(t *testing.T) { + // case1 + case01UID := JobID("uid") + case01Ns := "c1" + case01Owner := buildOwnerReference("uid") + + case01Pod1 := buildPod(case01Ns, "p1", "", v1.PodPending, buildResourceList("1000m", "1G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task1 := NewTaskInfo(case01Pod1) + case01Pod2 := buildPod(case01Ns, "p2", "n1", v1.PodRunning, buildResourceList("2000m", "2G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task2 := NewTaskInfo(case01Pod2) + case01Pod3 := buildPod(case01Ns, "p3", "n1", v1.PodPending, buildResourceList("1000m", "1G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task3 := NewTaskInfo(case01Pod3) + case01Pod4 := buildPod(case01Ns, "p4", "n1", v1.PodPending, buildResourceList("1000m", "1G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task4 := NewTaskInfo(case01Pod4) + + tests := []struct { + name string + uid JobID + pods []*v1.Pod + expected *JobInfo + }{ + { + name: "add 1 pending owner pod, 1 running owner pod", + uid: case01UID, + pods: []*v1.Pod{case01Pod1, case01Pod2, case01Pod3, case01Pod4}, + expected: &JobInfo{ + UID: case01UID, + Allocated: buildResource("4000m", "4G"), + TotalRequest: buildResource("5000m", "5G"), + Tasks: tasksMap{ + case01Task1.UID: case01Task1, + case01Task2.UID: case01Task2, + case01Task3.UID: case01Task3, + case01Task4.UID: case01Task4, + }, + TaskStatusIndex: map[TaskStatus]tasksMap{ + Running: { + case01Task2.UID: case01Task2, + }, + Pending: { + case01Task1.UID: case01Task1, + }, + Bound: { + case01Task3.UID: case01Task3, + case01Task4.UID: case01Task4, + }, + }, + NodeSelector: make(map[string]string), + NodesFitDelta: make(NodeResourceMap), + }, + }, + } + + for i, test := range tests { + ps := NewJobInfo(test.uid) + + for _, pod := range test.pods { + pi := NewTaskInfo(pod) + ps.AddTaskInfo(pi) + } + + if !jobInfoEqual(ps, test.expected) { + t.Errorf("podset info %d: \n expected: %v, \n got: %v \n", + i, test.expected, ps) + } + } +} + +func TestDeleteTaskInfo(t *testing.T) { + // case1 + case01UID := JobID("owner1") + case01Ns := "c1" + case01Owner := buildOwnerReference(string(case01UID)) + case01Pod1 := buildPod(case01Ns, "p1", "", v1.PodPending, buildResourceList("1000m", "1G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task1 := NewTaskInfo(case01Pod1) + case01Pod2 := buildPod(case01Ns, "p2", "n1", v1.PodRunning, buildResourceList("2000m", "2G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Pod3 := buildPod(case01Ns, "p3", "n1", v1.PodRunning, buildResourceList("3000m", "3G"), []metav1.OwnerReference{case01Owner}, make(map[string]string)) + case01Task3 := NewTaskInfo(case01Pod3) + + // case2 + case02UID := JobID("owner2") + case02Ns := "c2" + case02Owner := buildOwnerReference(string(case02UID)) + case02Pod1 := buildPod(case02Ns, "p1", "", v1.PodPending, buildResourceList("1000m", "1G"), []metav1.OwnerReference{case02Owner}, make(map[string]string)) + case02Task1 := NewTaskInfo(case02Pod1) + case02Pod2 := buildPod(case02Ns, "p2", "n1", v1.PodPending, buildResourceList("2000m", "2G"), []metav1.OwnerReference{case02Owner}, make(map[string]string)) + case02Pod3 := buildPod(case02Ns, "p3", "n1", v1.PodRunning, buildResourceList("3000m", "3G"), []metav1.OwnerReference{case02Owner}, make(map[string]string)) + case02Task3 := NewTaskInfo(case02Pod3) + + tests := []struct { + name string + uid JobID + pods []*v1.Pod + rmPods []*v1.Pod + expected *JobInfo + }{ + { + name: "add 1 pending owner pod, 2 running owner pod, remove 1 running owner pod", + uid: case01UID, + pods: []*v1.Pod{case01Pod1, case01Pod2, case01Pod3}, + rmPods: []*v1.Pod{case01Pod2}, + expected: &JobInfo{ + UID: case01UID, + Allocated: buildResource("3000m", "3G"), + TotalRequest: buildResource("4000m", "4G"), + Tasks: tasksMap{ + case01Task1.UID: case01Task1, + case01Task3.UID: case01Task3, + }, + TaskStatusIndex: map[TaskStatus]tasksMap{ + Pending: {case01Task1.UID: case01Task1}, + Running: {case01Task3.UID: case01Task3}, + }, + NodeSelector: make(map[string]string), + NodesFitDelta: make(NodeResourceMap), + }, + }, + { + name: "add 2 pending owner pod, 1 running owner pod, remove 1 pending owner pod", + uid: case02UID, + pods: []*v1.Pod{case02Pod1, case02Pod2, case02Pod3}, + rmPods: []*v1.Pod{case02Pod2}, + expected: &JobInfo{ + UID: case02UID, + Allocated: buildResource("3000m", "3G"), + TotalRequest: buildResource("4000m", "4G"), + Tasks: tasksMap{ + case02Task1.UID: case02Task1, + case02Task3.UID: case02Task3, + }, + TaskStatusIndex: map[TaskStatus]tasksMap{ + Pending: { + case02Task1.UID: case02Task1, + }, + Running: { + case02Task3.UID: case02Task3, + }, + }, + NodeSelector: make(map[string]string), + NodesFitDelta: make(NodeResourceMap), + }, + }, + } + + for i, test := range tests { + ps := NewJobInfo(test.uid) + + for _, pod := range test.pods { + pi := NewTaskInfo(pod) + ps.AddTaskInfo(pi) + } + + for _, pod := range test.rmPods { + pi := NewTaskInfo(pod) + ps.DeleteTaskInfo(pi) + } + + if !jobInfoEqual(ps, test.expected) { + t.Errorf("podset info %d: \n expected: %v, \n got: %v \n", + i, test.expected, ps) + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/node_info.go b/pkg/scheduler/api/node_info.go similarity index 90% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/node_info.go rename to pkg/scheduler/api/node_info.go index 6565bde8ae..c459a1eeca 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/node_info.go +++ b/pkg/scheduler/api/node_info.go @@ -41,6 +41,7 @@ type NodeInfo struct { Tasks map[TaskID]*TaskInfo } +// NewNodeInfo is used to create new nodeInfo object func NewNodeInfo(node *v1.Node) *NodeInfo { if node == nil { return &NodeInfo{ @@ -70,6 +71,7 @@ func NewNodeInfo(node *v1.Node) *NodeInfo { } } +// Clone used to clone nodeInfo Object func (ni *NodeInfo) Clone() *NodeInfo { res := NewNodeInfo(ni.Node) @@ -80,6 +82,7 @@ func (ni *NodeInfo) Clone() *NodeInfo { return res } +// SetNode sets kubernetes node object to nodeInfo object func (ni *NodeInfo) SetNode(node *v1.Node) { ni.Name = node.Name ni.Node = node @@ -98,6 +101,7 @@ func (ni *NodeInfo) SetNode(node *v1.Node) { } } +// AddTask is used to add a task in nodeInfo object func (ni *NodeInfo) AddTask(task *TaskInfo) error { key := PodKey(task.Pod) if _, found := ni.Tasks[key]; found { @@ -128,6 +132,7 @@ func (ni *NodeInfo) AddTask(task *TaskInfo) error { return nil } +// RemoveTask used to remove a task from nodeInfo object func (ni *NodeInfo) RemoveTask(ti *TaskInfo) error { key := PodKey(ti.Pod) @@ -156,6 +161,7 @@ func (ni *NodeInfo) RemoveTask(ti *TaskInfo) error { return nil } +// UpdateTask is used to update a task in nodeInfo object func (ni *NodeInfo) UpdateTask(ti *TaskInfo) error { if err := ni.RemoveTask(ti); err != nil { return err @@ -164,6 +170,7 @@ func (ni *NodeInfo) UpdateTask(ti *TaskInfo) error { return ni.AddTask(ti) } +// String returns nodeInfo details in string format func (ni NodeInfo) String() string { res := "" @@ -178,6 +185,7 @@ func (ni NodeInfo) String() string { } +// Pods returns all pods running in that node func (ni *NodeInfo) Pods() (pods []*v1.Pod) { for _, t := range ni.Tasks { pods = append(pods, t.Pod) diff --git a/pkg/scheduler/api/node_info_test.go b/pkg/scheduler/api/node_info_test.go new file mode 100644 index 0000000000..12b2f3404f --- /dev/null +++ b/pkg/scheduler/api/node_info_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func nodeInfoEqual(l, r *NodeInfo) bool { + if !reflect.DeepEqual(l, r) { + return false + } + + return true +} + +func TestNodeInfo_AddPod(t *testing.T) { + // case1 + case01Node := buildNode("n1", buildResourceList("8000m", "10G")) + case01Pod1 := buildPod("c1", "p1", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), []metav1.OwnerReference{}, make(map[string]string)) + case01Pod2 := buildPod("c1", "p2", "n1", v1.PodRunning, buildResourceList("2000m", "2G"), []metav1.OwnerReference{}, make(map[string]string)) + + tests := []struct { + name string + node *v1.Node + pods []*v1.Pod + expected *NodeInfo + }{ + { + name: "add 2 running non-owner pod", + node: case01Node, + pods: []*v1.Pod{case01Pod1, case01Pod2}, + expected: &NodeInfo{ + Name: "n1", + Node: case01Node, + Idle: buildResource("5000m", "7G"), + Used: buildResource("3000m", "3G"), + Releasing: EmptyResource(), + Allocatable: buildResource("8000m", "10G"), + Capability: buildResource("8000m", "10G"), + Tasks: map[TaskID]*TaskInfo{ + "c1/p1": NewTaskInfo(case01Pod1), + "c1/p2": NewTaskInfo(case01Pod2), + }, + }, + }, + } + + for i, test := range tests { + ni := NewNodeInfo(test.node) + + for _, pod := range test.pods { + pi := NewTaskInfo(pod) + ni.AddTask(pi) + } + + if !nodeInfoEqual(ni, test.expected) { + t.Errorf("node info %d: \n expected %v, \n got %v \n", + i, test.expected, ni) + } + } +} + +func TestNodeInfo_RemovePod(t *testing.T) { + // case1 + case01Node := buildNode("n1", buildResourceList("8000m", "10G")) + case01Pod1 := buildPod("c1", "p1", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), []metav1.OwnerReference{}, make(map[string]string)) + case01Pod2 := buildPod("c1", "p2", "n1", v1.PodRunning, buildResourceList("2000m", "2G"), []metav1.OwnerReference{}, make(map[string]string)) + case01Pod3 := buildPod("c1", "p3", "n1", v1.PodRunning, buildResourceList("3000m", "3G"), []metav1.OwnerReference{}, make(map[string]string)) + + tests := []struct { + name string + node *v1.Node + pods []*v1.Pod + rmPods []*v1.Pod + expected *NodeInfo + }{ + { + name: "add 3 running non-owner pod, remove 1 running non-owner pod", + node: case01Node, + pods: []*v1.Pod{case01Pod1, case01Pod2, case01Pod3}, + rmPods: []*v1.Pod{case01Pod2}, + expected: &NodeInfo{ + Name: "n1", + Node: case01Node, + Idle: buildResource("4000m", "6G"), + Used: buildResource("4000m", "4G"), + Releasing: EmptyResource(), + Allocatable: buildResource("8000m", "10G"), + Capability: buildResource("8000m", "10G"), + Tasks: map[TaskID]*TaskInfo{ + "c1/p1": NewTaskInfo(case01Pod1), + "c1/p3": NewTaskInfo(case01Pod3), + }, + }, + }, + } + + for i, test := range tests { + ni := NewNodeInfo(test.node) + + for _, pod := range test.pods { + pi := NewTaskInfo(pod) + ni.AddTask(pi) + } + + for _, pod := range test.rmPods { + pi := NewTaskInfo(pod) + ni.RemoveTask(pi) + } + + if !nodeInfoEqual(ni, test.expected) { + t.Errorf("node info %d: \n expected %v, \n got %v \n", + i, test.expected, ni) + } + } +} diff --git a/pkg/scheduler/api/pod_info.go b/pkg/scheduler/api/pod_info.go new file mode 100644 index 0000000000..045bccc9d7 --- /dev/null +++ b/pkg/scheduler/api/pod_info.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/api/core/v1" +) + +// Refer k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go#GetResourceRequest. +// +// GetResourceRequest returns a *Resource that covers the largest width in each resource dimension. +// Because init-containers run sequentially, we collect the max in each dimension iteratively. +// In contrast, we sum the resource vectors for regular containers since they run simultaneously. +// +// To be consistent with kubernetes default scheduler, it is only used for predicates of actions(e.g. +// allocate, backfill, preempt, reclaim), please use GetPodResourceWithoutInitContainers for other cases. +// +// Example: +// +// Pod: +// InitContainers +// IC1: +// CPU: 2 +// Memory: 1G +// IC2: +// CPU: 2 +// Memory: 3G +// Containers +// C1: +// CPU: 2 +// Memory: 1G +// C2: +// CPU: 1 +// Memory: 1G +// +// Result: CPU: 3, Memory: 3G + +// GetPodResourceRequest returns all the resource required for that pod +func GetPodResourceRequest(pod *v1.Pod) *Resource { + result := GetPodResourceWithoutInitContainers(pod) + + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + result.SetMaxResource(NewResource(container.Resources.Requests)) + } + + return result +} + +// GetPodResourceWithoutInitContainers returns Pod's resource request, it does not contain +// init containers' resource request. +func GetPodResourceWithoutInitContainers(pod *v1.Pod) *Resource { + result := EmptyResource() + for _, container := range pod.Spec.Containers { + result.Add(NewResource(container.Resources.Requests)) + } + + return result +} diff --git a/pkg/scheduler/api/pod_info_test.go b/pkg/scheduler/api/pod_info_test.go new file mode 100644 index 0000000000..873b92f2cb --- /dev/null +++ b/pkg/scheduler/api/pod_info_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" +) + +func TestGetPodResourceRequest(t *testing.T) { + tests := []struct { + name string + pod *v1.Pod + expectedResource *Resource + }{ + { + name: "get resource for pod without init containers", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("1000m", "1G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + }, + }, + expectedResource: NewResource(buildResourceList("3000m", "2G")), + }, + { + name: "get resource for pod with init containers", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "5G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("1000m", "1G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + }, + }, + expectedResource: NewResource(buildResourceList("3000m", "5G")), + }, + } + + for i, test := range tests { + req := GetPodResourceRequest(test.pod) + if !reflect.DeepEqual(req, test.expectedResource) { + t.Errorf("case %d(%s) failed: \n expected %v, \n got: %v \n", + i, test.name, test.expectedResource, req) + } + } +} + +func TestGetPodResourceWithoutInitContainers(t *testing.T) { + tests := []struct { + name string + pod *v1.Pod + expectedResource *Resource + }{ + { + name: "get resource for pod without init containers", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("1000m", "1G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + }, + }, + expectedResource: NewResource(buildResourceList("3000m", "2G")), + }, + { + name: "get resource for pod with init containers", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "5G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("1000m", "1G"), + }, + }, + { + Resources: v1.ResourceRequirements{ + Requests: buildResourceList("2000m", "1G"), + }, + }, + }, + }, + }, + expectedResource: NewResource(buildResourceList("3000m", "2G")), + }, + } + + for i, test := range tests { + req := GetPodResourceWithoutInitContainers(test.pod) + if !reflect.DeepEqual(req, test.expectedResource) { + t.Errorf("case %d(%s) failed: \n expected %v, \n got: %v \n", + i, test.name, test.expectedResource, req) + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/queue_info.go b/pkg/scheduler/api/queue_info.go similarity index 80% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/queue_info.go rename to pkg/scheduler/api/queue_info.go index 869a84f204..6c4391b989 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/queue_info.go +++ b/pkg/scheduler/api/queue_info.go @@ -19,11 +19,13 @@ package api import ( "k8s.io/apimachinery/pkg/types" - arbcorev1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + arbcorev1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" ) +// QueueID is UID type, serves as unique ID for each queue type QueueID types.UID +// QueueInfo will have all details about queue type QueueInfo struct { UID QueueID Name string @@ -33,6 +35,7 @@ type QueueInfo struct { Queue *arbcorev1.Queue } +// NewQueueInfo creates new queueInfo object func NewQueueInfo(queue *arbcorev1.Queue) *QueueInfo { return &QueueInfo{ UID: QueueID(queue.Name), @@ -44,6 +47,7 @@ func NewQueueInfo(queue *arbcorev1.Queue) *QueueInfo { } } +// Clone is used to clone queueInfo object func (q *QueueInfo) Clone() *QueueInfo { return &QueueInfo{ UID: q.UID, diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/resource_info.go b/pkg/scheduler/api/resource_info.go similarity index 75% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/resource_info.go rename to pkg/scheduler/api/resource_info.go index 3640b90922..b56ae01453 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/resource_info.go +++ b/pkg/scheduler/api/resource_info.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" ) +// Resource struct defines all the resource type type Resource struct { MilliCPU float64 Memory float64 @@ -33,14 +34,16 @@ type Resource struct { } const ( - // need to follow https://github.com/NVIDIA/k8s-device-plugin/blob/66a35b71ac4b5cbfb04714678b548bd77e5ba719/server.go#L20 + //GPUResourceName need to follow https://github.com/NVIDIA/k8s-device-plugin/blob/66a35b71ac4b5cbfb04714678b548bd77e5ba719/server.go#L20 GPUResourceName = "nvidia.com/gpu" ) +// EmptyResource creates a empty resource object and returns func EmptyResource() *Resource { return &Resource{} } +// Clone is used to clone a resource type func (r *Resource) Clone() *Resource { clone := &Resource{ MilliCPU: r.MilliCPU, @@ -55,6 +58,7 @@ var minMilliCPU float64 = 10 var minMilliGPU float64 = 10 var minMemory float64 = 10 * 1024 * 1024 +// NewResource create a new resource object from resource list func NewResource(rl v1.ResourceList) *Resource { r := EmptyResource() for rName, rQuant := range rl { @@ -72,10 +76,12 @@ func NewResource(rl v1.ResourceList) *Resource { return r } +// IsEmpty returns bool after checking any of resource is less than min possible value func (r *Resource) IsEmpty() bool { return r.MilliCPU < minMilliCPU && r.Memory < minMemory && r.MilliGPU < minMilliGPU } +// IsZero checks whether that resource is less than min possible value func (r *Resource) IsZero(rn v1.ResourceName) bool { switch rn { case v1.ResourceCPU: @@ -89,6 +95,7 @@ func (r *Resource) IsZero(rn v1.ResourceName) bool { } } +// Add is used to add the two resources func (r *Resource) Add(rr *Resource) *Resource { r.MilliCPU += rr.MilliCPU r.Memory += rr.Memory @@ -109,7 +116,24 @@ func (r *Resource) Sub(rr *Resource) *Resource { r, rr)) } -//Computes the delta between a resource oject representing available +// SetMaxResource compares with ResourceList and takes max value for each Resource. +func (r *Resource) SetMaxResource(rr *Resource) { + if r == nil || rr == nil { + return + } + + if rr.MilliCPU > r.MilliCPU { + r.MilliCPU = rr.MilliCPU + } + if rr.Memory > r.Memory { + r.Memory = rr.Memory + } + if rr.MilliGPU > r.MilliGPU { + r.MilliGPU = rr.MilliGPU + } +} + +// FitDelta Computes the delta between a resource oject representing available //resources an operand representing resources being requested. Any //field that is less than 0 after the operation represents an //insufficient resource. @@ -128,6 +152,7 @@ func (r *Resource) FitDelta(rr *Resource) *Resource { return r } +// Multi multiples the resource with ratio provided func (r *Resource) Multi(ratio float64) *Resource { r.MilliCPU = r.MilliCPU * ratio r.Memory = r.Memory * ratio @@ -135,21 +160,25 @@ func (r *Resource) Multi(ratio float64) *Resource { return r } +// Less checks whether a resource is less than other func (r *Resource) Less(rr *Resource) bool { return r.MilliCPU < rr.MilliCPU && r.Memory < rr.Memory && r.MilliGPU < rr.MilliGPU } +// LessEqual checks whether a resource is less than other resource func (r *Resource) LessEqual(rr *Resource) bool { return (r.MilliCPU < rr.MilliCPU || math.Abs(rr.MilliCPU-r.MilliCPU) < minMilliCPU) && (r.Memory < rr.Memory || math.Abs(rr.Memory-r.Memory) < minMemory) && (r.MilliGPU < rr.MilliGPU || math.Abs(rr.MilliGPU-r.MilliGPU) < minMilliGPU) } +// String returns resource details in string format func (r *Resource) String() string { return fmt.Sprintf("cpu %0.2f, memory %0.2f, GPU %0.2f", r.MilliCPU, r.Memory, r.MilliGPU) } +// Get returns the resource value for that particular resource type func (r *Resource) Get(rn v1.ResourceName) float64 { switch rn { case v1.ResourceCPU: @@ -163,6 +192,7 @@ func (r *Resource) Get(rn v1.ResourceName) float64 { } } +// ResourceNames returns all resource types func ResourceNames() []v1.ResourceName { return []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, GPUResourceName} } diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/test_utils.go b/pkg/scheduler/api/test_utils.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/test_utils.go rename to pkg/scheduler/api/test_utils.go diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go similarity index 93% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/types.go rename to pkg/scheduler/api/types.go index 9fbd58c6c0..4d9b67f586 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/types.go +++ b/pkg/scheduler/api/types.go @@ -88,13 +88,14 @@ type CompareFn func(interface{}, interface{}) int // ValidateFn is the func declaration used to check object's status. type ValidateFn func(interface{}) bool -// +// ValidateResult is struct to which can used to determine the result type ValidateResult struct { Pass bool Reason string Message string } +// ValidateExFn is the func declaration used to validate the result type ValidateExFn func(interface{}) *ValidateResult // PredicateFn is the func declaration used to predicate node for task. @@ -104,4 +105,4 @@ type PredicateFn func(*TaskInfo, *NodeInfo) error type EvictableFn func(*TaskInfo, []*TaskInfo) []*TaskInfo // NodeOrderFn is the func declaration used to get priority score for a node for a particular task. -type NodeOrderFn func(*TaskInfo, *NodeInfo) (int, error) +type NodeOrderFn func(*TaskInfo, *NodeInfo) (float64, error) diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/cache.go b/pkg/scheduler/cache/cache.go similarity index 88% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/cache.go rename to pkg/scheduler/cache/cache.go index df0b533bfe..fe91ea2d84 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/cache.go +++ b/pkg/scheduler/cache/cache.go @@ -44,14 +44,15 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/scheduler/volumebinder" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" - kbschema "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" - kbinfo "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions" - kbinfov1 "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/cmd/kube-batch/app/options" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + kbver "volcano.sh/volcano/pkg/client/clientset/versioned" + "volcano.sh/volcano/pkg/client/clientset/versioned/scheme" + kbschema "volcano.sh/volcano/pkg/client/clientset/versioned/scheme" + kbinfo "volcano.sh/volcano/pkg/client/informers/externalversions" + kbinfov1 "volcano.sh/volcano/pkg/client/informers/externalversions/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/scheduler/api" + kbapi "volcano.sh/volcano/pkg/scheduler/api" ) func init() { @@ -67,6 +68,7 @@ func New(config *rest.Config, schedulerName string, defaultQueue string) Cache { return newSchedulerCache(config, schedulerName, defaultQueue) } +//SchedulerCache cache for the kube batch type SchedulerCache struct { sync.Mutex @@ -74,6 +76,8 @@ type SchedulerCache struct { kbclient *kbver.Clientset defaultQueue string + // schedulerName is the name for volcano scheduler + schedulerName string podInformer infov1.PodInformer nodeInformer infov1.NodeInformer @@ -108,6 +112,7 @@ type defaultBinder struct { kubeclient *kubernetes.Clientset } +//Bind will send bind request to api server func (db *defaultBinder) Bind(p *v1.Pod, hostname string) error { if err := db.kubeclient.CoreV1().Pods(p.Namespace).Bind(&v1.Binding{ ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID}, @@ -126,15 +131,11 @@ type defaultEvictor struct { kubeclient *kubernetes.Clientset } +//Evict will send delete pod request to api server func (de *defaultEvictor) Evict(p *v1.Pod) error { - // TODO (k82cn): makes grace period configurable. - threeSecs := int64(3) - glog.V(3).Infof("Evicting pod %v/%v", p.Namespace, p.Name) - if err := de.kubeclient.CoreV1().Pods(p.Namespace).Delete(p.Name, &metav1.DeleteOptions{ - GracePeriodSeconds: &threeSecs, - }); err != nil { + if err := de.kubeclient.CoreV1().Pods(p.Namespace).Delete(p.Name, nil); err != nil { glog.Errorf("Failed to evict pod <%v/%v>: %#v", p.Namespace, p.Name, err) return err } @@ -147,7 +148,7 @@ type defaultStatusUpdater struct { kbclient *kbver.Clientset } -// Update pod with podCondition +// UpdatePodCondition will Update pod with podCondition func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.PodCondition) (*v1.Pod, error) { glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) if podutil.UpdatePodCondition(&pod.Status, condition) { @@ -156,7 +157,7 @@ func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.Po return pod, nil } -// Update pod with podCondition +// UpdatePodGroup will Update pod with podCondition func (su *defaultStatusUpdater) UpdatePodGroup(pg *v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) { return su.kbclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Update(pg) } @@ -165,7 +166,7 @@ type defaultVolumeBinder struct { volumeBinder *volumebinder.VolumeBinder } -// AllocateVolume allocates volume on the host to the task +// AllocateVolumes allocates volume on the host to the task func (dvb *defaultVolumeBinder) AllocateVolumes(task *api.TaskInfo, hostname string) error { allBound, err := dvb.volumeBinder.Binder.AssumePodVolumes(task.Pod, hostname) task.VolumeReady = allBound @@ -173,7 +174,7 @@ func (dvb *defaultVolumeBinder) AllocateVolumes(task *api.TaskInfo, hostname str return err } -// BindVolume binds volumes to the task +// BindVolumes binds volumes to the task func (dvb *defaultVolumeBinder) BindVolumes(task *api.TaskInfo) error { // If task's volumes are ready, did not bind them again. if task.VolumeReady { @@ -194,6 +195,7 @@ func newSchedulerCache(config *rest.Config, schedulerName string, defaultQueue s kubeclient: kubernetes.NewForConfigOrDie(config), kbclient: kbver.NewForConfigOrDie(config), defaultQueue: defaultQueue, + schedulerName: schedulerName, } // Prepare event clients. @@ -297,6 +299,7 @@ func newSchedulerCache(config *rest.Config, schedulerName string, defaultQueue s return sc } +// Run starts the schedulerCache func (sc *SchedulerCache) Run(stopCh <-chan struct{}) { go sc.pdbInformer.Informer().Run(stopCh) go sc.podInformer.Informer().Run(stopCh) @@ -306,7 +309,10 @@ func (sc *SchedulerCache) Run(stopCh <-chan struct{}) { go sc.pvcInformer.Informer().Run(stopCh) go sc.scInformer.Informer().Run(stopCh) go sc.queueInformer.Informer().Run(stopCh) - go sc.pcInformer.Informer().Run(stopCh) + + if options.ServerOpts.EnablePriorityClass { + go sc.pcInformer.Informer().Run(stopCh) + } // Re-sync error tasks. go wait.Until(sc.processResyncTask, 0, stopCh) @@ -315,21 +321,30 @@ func (sc *SchedulerCache) Run(stopCh <-chan struct{}) { go wait.Until(sc.processCleanupJob, 0, stopCh) } +// WaitForCacheSync sync the cache with the api server func (sc *SchedulerCache) WaitForCacheSync(stopCh <-chan struct{}) bool { return cache.WaitForCacheSync(stopCh, - sc.pdbInformer.Informer().HasSynced, - sc.podInformer.Informer().HasSynced, - sc.podGroupInformer.Informer().HasSynced, - sc.nodeInformer.Informer().HasSynced, - sc.pvInformer.Informer().HasSynced, - sc.pvcInformer.Informer().HasSynced, - sc.scInformer.Informer().HasSynced, - sc.queueInformer.Informer().HasSynced, - sc.pcInformer.Informer().HasSynced, + func() []cache.InformerSynced { + informerSynced := []cache.InformerSynced{ + sc.pdbInformer.Informer().HasSynced, + sc.podInformer.Informer().HasSynced, + sc.podGroupInformer.Informer().HasSynced, + sc.nodeInformer.Informer().HasSynced, + sc.pvInformer.Informer().HasSynced, + sc.pvcInformer.Informer().HasSynced, + sc.scInformer.Informer().HasSynced, + sc.queueInformer.Informer().HasSynced, + } + if options.ServerOpts.EnablePriorityClass { + informerSynced = append(informerSynced, sc.pcInformer.Informer().HasSynced) + } + return informerSynced + }()..., ) } +// findJobAndTask returns job and the task info func (sc *SchedulerCache) findJobAndTask(taskInfo *kbapi.TaskInfo) (*kbapi.JobInfo, *kbapi.TaskInfo, error) { job, found := sc.Jobs[taskInfo.Job] if !found { @@ -346,6 +361,7 @@ func (sc *SchedulerCache) findJobAndTask(taskInfo *kbapi.TaskInfo) (*kbapi.JobIn return job, task, nil } +// Evict will evict the pod func (sc *SchedulerCache) Evict(taskInfo *kbapi.TaskInfo, reason string) error { sc.Mutex.Lock() defer sc.Mutex.Unlock() @@ -423,18 +439,20 @@ func (sc *SchedulerCache) Bind(taskInfo *kbapi.TaskInfo, hostname string) error go func() { if err := sc.Binder.Bind(p, hostname); err != nil { sc.resyncTask(task) + } else { + sc.Recorder.Eventf(p, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v/%v to %v", p.Namespace, p.Name, hostname) } }() return nil } -// AllocateVolume allocates volume on the host to the task +// AllocateVolumes allocates volume on the host to the task func (sc *SchedulerCache) AllocateVolumes(task *api.TaskInfo, hostname string) error { return sc.VolumeBinder.AllocateVolumes(task, hostname) } -// BindVolume binds volumes to the task +// BindVolumes binds volumes to the task func (sc *SchedulerCache) BindVolumes(task *api.TaskInfo) error { return sc.VolumeBinder.BindVolumes(task) } @@ -510,6 +528,7 @@ func (sc *SchedulerCache) processResyncTask() { } } +// Snapshot returns the complete snapshot of the cluster from cache func (sc *SchedulerCache) Snapshot() *kbapi.ClusterInfo { sc.Mutex.Lock() defer sc.Mutex.Unlock() @@ -564,6 +583,7 @@ func (sc *SchedulerCache) Snapshot() *kbapi.ClusterInfo { return snapshot } +// String returns information about the cache in a string format func (sc *SchedulerCache) String() string { sc.Mutex.Lock() defer sc.Mutex.Unlock() diff --git a/pkg/scheduler/cache/cache_test.go b/pkg/scheduler/cache/cache_test.go new file mode 100644 index 0000000000..7efa36b2c4 --- /dev/null +++ b/pkg/scheduler/cache/cache_test.go @@ -0,0 +1,309 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "reflect" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "volcano.sh/volcano/pkg/scheduler/api" +) + +func nodesEqual(l, r map[string]*api.NodeInfo) bool { + if len(l) != len(r) { + return false + } + + for k, n := range l { + if !reflect.DeepEqual(n, r[k]) { + return false + } + } + + return true +} + +func jobsEqual(l, r map[api.JobID]*api.JobInfo) bool { + if len(l) != len(r) { + return false + } + + for k, p := range l { + if !reflect.DeepEqual(p, r[k]) { + return false + } + } + + return true +} + +func cacheEqual(l, r *SchedulerCache) bool { + return nodesEqual(l.Nodes, r.Nodes) && + jobsEqual(l.Jobs, r.Jobs) +} + +func buildNode(name string, alloc v1.ResourceList) *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(name), + Name: name, + }, + Status: v1.NodeStatus{ + Capacity: alloc, + Allocatable: alloc, + }, + } +} + +func buildPod(ns, n, nn string, + p v1.PodPhase, req v1.ResourceList, + owner []metav1.OwnerReference, labels map[string]string) *v1.Pod { + + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(fmt.Sprintf("%v-%v", ns, n)), + Name: n, + Namespace: ns, + OwnerReferences: owner, + Labels: labels, + }, + Status: v1.PodStatus{ + Phase: p, + }, + Spec: v1.PodSpec{ + NodeName: nn, + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: req, + }, + }, + }, + }, + } +} + +func buildResourceList(cpu string, memory string) v1.ResourceList { + return v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(cpu), + v1.ResourceMemory: resource.MustParse(memory), + } +} + +func buildResource(cpu string, memory string) *api.Resource { + return api.NewResource(v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(cpu), + v1.ResourceMemory: resource.MustParse(memory), + }) +} + +func buildOwnerReference(owner string) metav1.OwnerReference { + controller := true + return metav1.OwnerReference{ + Controller: &controller, + UID: types.UID(owner), + } +} + +func TestAddPod(t *testing.T) { + + owner := buildOwnerReference("j1") + + // case 1: + pod1 := buildPod("c1", "p1", "", v1.PodPending, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner}, make(map[string]string)) + pi1 := api.NewTaskInfo(pod1) + pi1.Job = "j1" // The job name is set by cache. + pod2 := buildPod("c1", "p2", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner}, make(map[string]string)) + pi2 := api.NewTaskInfo(pod2) + pi2.Job = "j1" // The job name is set by cache. + + j1 := api.NewJobInfo(api.JobID("j1"), pi1, pi2) + pg := createShadowPodGroup(pod1) + j1.SetPodGroup(pg) + + node1 := buildNode("n1", buildResourceList("2000m", "10G")) + ni1 := api.NewNodeInfo(node1) + ni1.AddTask(pi2) + + tests := []struct { + pods []*v1.Pod + nodes []*v1.Node + expected *SchedulerCache + }{ + { + pods: []*v1.Pod{pod1, pod2}, + nodes: []*v1.Node{node1}, + expected: &SchedulerCache{ + Nodes: map[string]*api.NodeInfo{ + "n1": ni1, + }, + Jobs: map[api.JobID]*api.JobInfo{ + "j1": j1, + }, + }, + }, + } + + for i, test := range tests { + cache := &SchedulerCache{ + Jobs: make(map[api.JobID]*api.JobInfo), + Nodes: make(map[string]*api.NodeInfo), + } + + for _, n := range test.nodes { + cache.AddNode(n) + } + + for _, p := range test.pods { + cache.AddPod(p) + } + + if !cacheEqual(cache, test.expected) { + t.Errorf("case %d: \n expected %v, \n got %v \n", + i, test.expected, cache) + } + } +} + +func TestAddNode(t *testing.T) { + owner1 := buildOwnerReference("j1") + owner2 := buildOwnerReference("j2") + + // case 1 + node1 := buildNode("n1", buildResourceList("2000m", "10G")) + pod1 := buildPod("c1", "p1", "", v1.PodPending, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner1}, make(map[string]string)) + pi1 := api.NewTaskInfo(pod1) + pi1.Job = "j1" // The job name is set by cache. + + pod2 := buildPod("c1", "p2", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner2}, make(map[string]string)) + pi2 := api.NewTaskInfo(pod2) + pi2.Job = "j2" // The job name is set by cache. + + ni1 := api.NewNodeInfo(node1) + ni1.AddTask(pi2) + + j1 := api.NewJobInfo("j1") + pg1 := createShadowPodGroup(pod1) + j1.SetPodGroup(pg1) + + j2 := api.NewJobInfo("j2") + pg2 := createShadowPodGroup(pod2) + j2.SetPodGroup(pg2) + + j1.AddTaskInfo(pi1) + j2.AddTaskInfo(pi2) + + tests := []struct { + pods []*v1.Pod + nodes []*v1.Node + expected *SchedulerCache + }{ + { + pods: []*v1.Pod{pod1, pod2}, + nodes: []*v1.Node{node1}, + expected: &SchedulerCache{ + Nodes: map[string]*api.NodeInfo{ + "n1": ni1, + }, + Jobs: map[api.JobID]*api.JobInfo{ + "j1": j1, + "j2": j2, + }, + }, + }, + } + + for i, test := range tests { + cache := &SchedulerCache{ + Nodes: make(map[string]*api.NodeInfo), + Jobs: make(map[api.JobID]*api.JobInfo), + } + + for _, p := range test.pods { + cache.AddPod(p) + } + + for _, n := range test.nodes { + cache.AddNode(n) + } + + if !cacheEqual(cache, test.expected) { + t.Errorf("case %d: \n expected %v, \n got %v \n", + i, test.expected, cache) + } + } +} + +func TestGetOrCreateJob(t *testing.T) { + owner1 := buildOwnerReference("j1") + owner2 := buildOwnerReference("j2") + + pod1 := buildPod("c1", "p1", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner1}, make(map[string]string)) + pi1 := api.NewTaskInfo(pod1) + pi1.Job = "j1" // The job name is set by cache. + + pod2 := buildPod("c1", "p2", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner2}, make(map[string]string)) + pod2.Spec.SchedulerName = "kube-batch" + pi2 := api.NewTaskInfo(pod2) + + pod3 := buildPod("c3", "p3", "n1", v1.PodRunning, buildResourceList("1000m", "1G"), + []metav1.OwnerReference{owner2}, make(map[string]string)) + pi3 := api.NewTaskInfo(pod3) + + cache := &SchedulerCache{ + Nodes: make(map[string]*api.NodeInfo), + Jobs: make(map[api.JobID]*api.JobInfo), + schedulerName: "kube-batch", + } + + tests := []struct { + task *api.TaskInfo + gotJob bool // whether getOrCreateJob will return job for corresponding task + }{ + { + task: pi1, + gotJob: true, + }, + { + task: pi2, + gotJob: true, + }, + { + task: pi3, + gotJob: false, + }, + } + for i, test := range tests { + result := cache.getOrCreateJob(test.task) != nil + if result != test.gotJob { + t.Errorf("case %d: \n expected %t, \n got %t \n", + i, test.gotJob, result) + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/event_handlers.go b/pkg/scheduler/cache/event_handlers.go similarity index 91% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/event_handlers.go rename to pkg/scheduler/cache/event_handlers.go index 72055f3b28..145c177b03 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/event_handlers.go +++ b/pkg/scheduler/cache/event_handlers.go @@ -29,17 +29,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/utils" - kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + kbv1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/apis/utils" + kbapi "volcano.sh/volcano/pkg/scheduler/api" ) func isTerminated(status kbapi.TaskStatus) bool { return status == kbapi.Succeeded || status == kbapi.Failed } +// getOrCreateJob will return corresponding Job for pi if it exists, or it will create a Job and return it if +// pi.Pod.Spec.SchedulerName is same as kube-batch scheduler's name, otherwise it will return nil. func (sc *SchedulerCache) getOrCreateJob(pi *kbapi.TaskInfo) *kbapi.JobInfo { if len(pi.Job) == 0 { + if pi.Pod.Spec.SchedulerName != sc.schedulerName { + glog.V(4).Infof("Pod %s/%s will not not scheduled by %s, skip creating PodGroup and Job for it", + pi.Pod.Namespace, pi.Pod.Name, sc.schedulerName) + return nil + } pb := createShadowPodGroup(pi.Pod) pi.Job = kbapi.JobID(pb.Name) @@ -62,7 +69,9 @@ func (sc *SchedulerCache) getOrCreateJob(pi *kbapi.TaskInfo) *kbapi.JobInfo { func (sc *SchedulerCache) addTask(pi *kbapi.TaskInfo) error { job := sc.getOrCreateJob(pi) - job.AddTaskInfo(pi) + if job != nil { + job.AddTaskInfo(pi) + } if len(pi.NodeName) != 0 { if _, found := sc.Nodes[pi.NodeName]; !found { @@ -170,6 +179,7 @@ func (sc *SchedulerCache) deletePod(pod *v1.Pod) error { return nil } +// AddPod add pod to scheduler cache func (sc *SchedulerCache) AddPod(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -190,6 +200,7 @@ func (sc *SchedulerCache) AddPod(obj interface{}) { return } +// UpdatePod update pod to scheduler cache func (sc *SchedulerCache) UpdatePod(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*v1.Pod) if !ok { @@ -216,6 +227,7 @@ func (sc *SchedulerCache) UpdatePod(oldObj, newObj interface{}) { return } +// DeletePod delete pod from scheduler cache func (sc *SchedulerCache) DeletePod(obj interface{}) { var pod *v1.Pod switch t := obj.(type) { @@ -285,6 +297,7 @@ func (sc *SchedulerCache) deleteNode(node *v1.Node) error { return nil } +// AddNode add node to scheduler cache func (sc *SchedulerCache) AddNode(obj interface{}) { node, ok := obj.(*v1.Node) if !ok { @@ -303,6 +316,7 @@ func (sc *SchedulerCache) AddNode(obj interface{}) { return } +// UpdateNode update node to scheduler cache func (sc *SchedulerCache) UpdateNode(oldObj, newObj interface{}) { oldNode, ok := oldObj.(*v1.Node) if !ok { @@ -326,6 +340,7 @@ func (sc *SchedulerCache) UpdateNode(oldObj, newObj interface{}) { return } +// DeleteNode delete node from scheduler cache func (sc *SchedulerCache) DeleteNode(obj interface{}) { var node *v1.Node switch t := obj.(type) { @@ -363,7 +378,7 @@ func (sc *SchedulerCache) setPodGroup(ss *kbv1.PodGroup) error { job := getJobID(ss) if len(job) == 0 { - return fmt.Errorf("the controller of PodGroup is empty") + return fmt.Errorf("the identity of PodGroup is empty") } if _, found := sc.Jobs[job]; !found { @@ -402,6 +417,7 @@ func (sc *SchedulerCache) deletePodGroup(ss *kbv1.PodGroup) error { return nil } +// AddPodGroup add podgroup to scheduler cache func (sc *SchedulerCache) AddPodGroup(obj interface{}) { ss, ok := obj.(*kbv1.PodGroup) if !ok { @@ -421,6 +437,7 @@ func (sc *SchedulerCache) AddPodGroup(obj interface{}) { return } +// UpdatePodGroup add podgroup to scheduler cache func (sc *SchedulerCache) UpdatePodGroup(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*kbv1.PodGroup) if !ok { @@ -444,6 +461,7 @@ func (sc *SchedulerCache) UpdatePodGroup(oldObj, newObj interface{}) { return } +// DeletePodGroup delete podgroup from scheduler cache func (sc *SchedulerCache) DeletePodGroup(obj interface{}) { var ss *kbv1.PodGroup switch t := obj.(type) { @@ -513,6 +531,7 @@ func (sc *SchedulerCache) deletePDB(pdb *policyv1.PodDisruptionBudget) error { return nil } +// AddPDB add pdb to scheduler cache func (sc *SchedulerCache) AddPDB(obj interface{}) { pdb, ok := obj.(*policyv1.PodDisruptionBudget) if !ok { @@ -531,6 +550,7 @@ func (sc *SchedulerCache) AddPDB(obj interface{}) { return } +//UpdatePDB update pdb to scheduler cache func (sc *SchedulerCache) UpdatePDB(oldObj, newObj interface{}) { oldPDB, ok := oldObj.(*policyv1.PodDisruptionBudget) if !ok { @@ -554,6 +574,7 @@ func (sc *SchedulerCache) UpdatePDB(oldObj, newObj interface{}) { return } +//DeletePDB delete pdb from scheduler cache func (sc *SchedulerCache) DeletePDB(obj interface{}) { var pdb *policyv1.PodDisruptionBudget switch t := obj.(type) { @@ -582,6 +603,7 @@ func (sc *SchedulerCache) DeletePDB(obj interface{}) { return } +//AddQueue add queue to scheduler cache func (sc *SchedulerCache) AddQueue(obj interface{}) { ss, ok := obj.(*kbv1.Queue) if !ok { @@ -601,6 +623,7 @@ func (sc *SchedulerCache) AddQueue(obj interface{}) { return } +//UpdateQueue update queue to scheduler cache func (sc *SchedulerCache) UpdateQueue(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*kbv1.Queue) if !ok { @@ -624,6 +647,7 @@ func (sc *SchedulerCache) UpdateQueue(oldObj, newObj interface{}) { return } +//DeleteQueue delete queue from the scheduler cache func (sc *SchedulerCache) DeleteQueue(obj interface{}) { var ss *kbv1.Queue switch t := obj.(type) { @@ -673,6 +697,7 @@ func (sc *SchedulerCache) deleteQueue(queue *kbv1.Queue) error { return nil } +//DeletePriorityClass delete priorityclass from the scheduler cache func (sc *SchedulerCache) DeletePriorityClass(obj interface{}) { var ss *v1beta1.PriorityClass switch t := obj.(type) { @@ -696,6 +721,7 @@ func (sc *SchedulerCache) DeletePriorityClass(obj interface{}) { sc.deletePriorityClass(ss) } +//UpdatePriorityClass update priorityclass to scheduler cache func (sc *SchedulerCache) UpdatePriorityClass(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*v1beta1.PriorityClass) if !ok { @@ -720,6 +746,7 @@ func (sc *SchedulerCache) UpdatePriorityClass(oldObj, newObj interface{}) { sc.addPriorityClass(newSS) } +//AddPriorityClass add priorityclass to scheduler cache func (sc *SchedulerCache) AddPriorityClass(obj interface{}) { var ss *v1beta1.PriorityClass switch t := obj.(type) { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/interface.go b/pkg/scheduler/cache/interface.go similarity index 90% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/interface.go rename to pkg/scheduler/cache/interface.go index 38b991974a..689a0db73c 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/interface.go +++ b/pkg/scheduler/cache/interface.go @@ -19,8 +19,8 @@ package cache import ( "k8s.io/api/core/v1" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/scheduler/api" ) // Cache collects pods/nodes/queues information @@ -56,15 +56,18 @@ type Cache interface { BindVolumes(task *api.TaskInfo) error } +// VolumeBinder interface for allocate and bind volumes type VolumeBinder interface { AllocateVolumes(task *api.TaskInfo, hostname string) error BindVolumes(task *api.TaskInfo) error } +//Binder interface for binding task and hostname type Binder interface { Bind(task *v1.Pod, hostname string) error } +// Evictor interface for evict pods type Evictor interface { Evict(pod *v1.Pod) error } diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/util.go b/pkg/scheduler/cache/util.go similarity index 84% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/util.go rename to pkg/scheduler/cache/util.go index 411b47552d..06bbefc809 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache/util.go +++ b/pkg/scheduler/cache/util.go @@ -20,13 +20,13 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/utils" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/apis/utils" + "volcano.sh/volcano/pkg/scheduler/api" ) const ( - shadowPodGroupKey = "kube-batch/shadow-pod-group" + shadowPodGroupKey = "volcano/shadow-pod-group" ) func shadowPodGroup(pg *v1alpha1.PodGroup) bool { diff --git a/pkg/scheduler/conf/scheduler_conf.go b/pkg/scheduler/conf/scheduler_conf.go new file mode 100644 index 0000000000..516e07a9a5 --- /dev/null +++ b/pkg/scheduler/conf/scheduler_conf.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conf + +// SchedulerConfiguration defines the configuration of scheduler. +type SchedulerConfiguration struct { + // Actions defines the actions list of scheduler in order + Actions string `yaml:"actions"` + // Tiers defines plugins in different tiers + Tiers []Tier `yaml:"tiers"` +} + +// Tier defines plugin tier +type Tier struct { + Plugins []PluginOption `yaml:"plugins"` +} + +// PluginOption defines the options of plugin +type PluginOption struct { + // The name of Plugin + Name string `yaml:"name"` + // EnabledJobOrder defines whether jobOrderFn is enabled + EnabledJobOrder *bool `yaml:"enableJobOrder"` + // EnabledJobReady defines whether jobReadyFn is enabled + EnabledJobReady *bool `yaml:"enableJobReady"` + // EnabledJobPipelined defines whether jobPipelinedFn is enabled + EnabledJobPipelined *bool `yaml:"enableJobPipelined"` + // EnabledTaskOrder defines whether taskOrderFn is enabled + EnabledTaskOrder *bool `yaml:"enableTaskOrder"` + // EnabledPreemptable defines whether preemptableFn is enabled + EnabledPreemptable *bool `yaml:"enablePreemptable"` + // EnabledReclaimable defines whether reclaimableFn is enabled + EnabledReclaimable *bool `yaml:"enableReclaimable"` + // EnabledQueueOrder defines whether queueOrderFn is enabled + EnabledQueueOrder *bool `yaml:"enableQueueOrder"` + // EnabledPredicate defines whether predicateFn is enabled + EnabledPredicate *bool `yaml:"enablePredicate"` + // EnabledNodeOrder defines whether NodeOrderFn is enabled + EnabledNodeOrder *bool `yaml:"enableNodeOrder"` + // Arguments defines the different arguments that can be given to different plugins + Arguments map[string]string `yaml:"arguments"` +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/sort.go b/pkg/scheduler/framework/arguments.go similarity index 57% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/sort.go rename to pkg/scheduler/framework/arguments.go index d4d808338c..d968e7ae32 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/sort.go +++ b/pkg/scheduler/framework/arguments.go @@ -14,24 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package framework import ( - "sort" + "strconv" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "github.com/golang/glog" ) -func SelectBestNode(nodeScores map[int][]*api.NodeInfo) []*api.NodeInfo { - var nodesInorder []*api.NodeInfo - var keys []int - for key := range nodeScores { - keys = append(keys, key) +// Arguments map +type Arguments map[string]string + +//GetInt get the integer value from string +func (a Arguments) GetInt(ptr *int, key string) { + if ptr == nil { + return } - sort.Sort(sort.Reverse(sort.IntSlice(keys))) - for _, key := range keys { - nodes := nodeScores[key] - nodesInorder = append(nodesInorder, nodes...) + + argv, ok := a[key] + if !ok || argv == "" { + return } - return nodesInorder + + value, err := strconv.Atoi(argv) + if err != nil { + glog.Warningf("Could not parse argument: %s for key %s, with err %v", argv, key, err) + return + } + + *ptr = value } diff --git a/pkg/scheduler/framework/arguments_test.go b/pkg/scheduler/framework/arguments_test.go new file mode 100644 index 0000000000..f9d5f30a03 --- /dev/null +++ b/pkg/scheduler/framework/arguments_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "testing" +) + +type GetIntTestCases struct { + arg Arguments + key string + baseValue int + expectValue int +} + +func TestArgumentsGetInt(t *testing.T) { + key1 := "intkey" + + cases := []GetIntTestCases{ + { + arg: Arguments{ + "anotherkey": "12", + }, + key: key1, + baseValue: 10, + expectValue: 10, + }, + { + arg: Arguments{ + key1: "15", + }, + key: key1, + baseValue: 10, + expectValue: 15, + }, + { + arg: Arguments{ + key1: "errorvalue", + }, + key: key1, + baseValue: 11, + expectValue: 11, + }, + { + arg: Arguments{ + key1: "", + }, + key: key1, + baseValue: 0, + expectValue: 0, + }, + } + + for index, c := range cases { + baseValue := c.baseValue + c.arg.GetInt(nil, c.key) + c.arg.GetInt(&baseValue, c.key) + if baseValue != c.expectValue { + t.Errorf("index %d, value should be %v, but not %v", index, c.expectValue, baseValue) + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/event.go b/pkg/scheduler/framework/event.go similarity index 89% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/event.go rename to pkg/scheduler/framework/event.go index b2f12729d3..d21f5e9fdb 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/event.go +++ b/pkg/scheduler/framework/event.go @@ -17,13 +17,15 @@ limitations under the License. package framework import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api" ) +// Event structure type Event struct { Task *api.TaskInfo } +// EventHandler structure type EventHandler struct { AllocateFunc func(event *Event) DeallocateFunc func(event *Event) diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go similarity index 86% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/framework.go rename to pkg/scheduler/framework/framework.go index a25f1c6e0d..379b7c7b8d 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -21,11 +21,12 @@ import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" + "volcano.sh/volcano/pkg/scheduler/cache" + "volcano.sh/volcano/pkg/scheduler/conf" + "volcano.sh/volcano/pkg/scheduler/metrics" ) +// OpenSession start the session func OpenSession(cache cache.Cache, tiers []conf.Tier) *Session { ssn := openSession(cache) ssn.Tiers = tiers @@ -35,7 +36,7 @@ func OpenSession(cache cache.Cache, tiers []conf.Tier) *Session { if pb, found := GetPluginBuilder(plugin.Name); !found { glog.Errorf("Failed to get plugin %s.", plugin.Name) } else { - plugin := pb() + plugin := pb(plugin.Arguments) ssn.plugins[plugin.Name()] = plugin } } @@ -50,6 +51,7 @@ func OpenSession(cache cache.Cache, tiers []conf.Tier) *Session { return ssn } +// CloseSession close the session func CloseSession(ssn *Session) { for _, plugin := range ssn.plugins { onSessionCloseStart := time.Now() diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/interface.go b/pkg/scheduler/framework/interface.go similarity index 95% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/interface.go rename to pkg/scheduler/framework/interface.go index d7abc7d6d1..78878878f8 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/interface.go +++ b/pkg/scheduler/framework/interface.go @@ -31,6 +31,7 @@ type Action interface { UnInitialize() } +// Plugin is the interface of scheduler plugin type Plugin interface { // The unique name of Plugin. Name() string diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/plugins.go b/pkg/scheduler/framework/plugins.go similarity index 79% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/plugins.go rename to pkg/scheduler/framework/plugins.go index 3b5a897f30..7d5c736fc0 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/plugins.go +++ b/pkg/scheduler/framework/plugins.go @@ -20,18 +20,21 @@ import "sync" var pluginMutex sync.Mutex -type PluginBuilder func() Plugin +// PluginBuilder plugin management +type PluginBuilder func(Arguments) Plugin // Plugin management var pluginBuilders = map[string]PluginBuilder{} -func RegisterPluginBuilder(name string, pc func() Plugin) { +// RegisterPluginBuilder register the plugin +func RegisterPluginBuilder(name string, pc PluginBuilder) { pluginMutex.Lock() defer pluginMutex.Unlock() pluginBuilders[name] = pc } +// CleanupPluginBuilders cleans up all the plugin func CleanupPluginBuilders() { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -39,6 +42,7 @@ func CleanupPluginBuilders() { pluginBuilders = map[string]PluginBuilder{} } +// GetPluginBuilder get the pluginbuilder by name func GetPluginBuilder(name string) (PluginBuilder, bool) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -50,6 +54,7 @@ func GetPluginBuilder(name string) (PluginBuilder, bool) { // Action management var actionMap = map[string]Action{} +// RegisterAction register action func RegisterAction(act Action) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -57,6 +62,7 @@ func RegisterAction(act Action) { actionMap[act.Name()] = act } +// GetAction get the action by name func GetAction(name string) (Action, bool) { pluginMutex.Lock() defer pluginMutex.Unlock() diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session.go b/pkg/scheduler/framework/session.go similarity index 80% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session.go rename to pkg/scheduler/framework/session.go index c8dbd6e495..bdbc5aec4c 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session.go +++ b/pkg/scheduler/framework/session.go @@ -26,13 +26,14 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/cache" + "volcano.sh/volcano/pkg/scheduler/conf" + "volcano.sh/volcano/pkg/scheduler/metrics" ) +// Session information for the current session type Session struct { UID types.UID @@ -44,18 +45,19 @@ type Session struct { Backlog []*api.JobInfo Tiers []conf.Tier - plugins map[string]Plugin - eventHandlers []*EventHandler - jobOrderFns map[string]api.CompareFn - queueOrderFns map[string]api.CompareFn - taskOrderFns map[string]api.CompareFn - predicateFns map[string]api.PredicateFn - nodeOrderFns map[string]api.NodeOrderFn - preemptableFns map[string]api.EvictableFn - reclaimableFns map[string]api.EvictableFn - overusedFns map[string]api.ValidateFn - jobReadyFns map[string]api.ValidateFn - jobValidFns map[string]api.ValidateExFn + plugins map[string]Plugin + eventHandlers []*EventHandler + jobOrderFns map[string]api.CompareFn + queueOrderFns map[string]api.CompareFn + taskOrderFns map[string]api.CompareFn + predicateFns map[string]api.PredicateFn + nodeOrderFns map[string]api.NodeOrderFn + preemptableFns map[string]api.EvictableFn + reclaimableFns map[string]api.EvictableFn + overusedFns map[string]api.ValidateFn + jobReadyFns map[string]api.ValidateFn + jobPipelinedFns map[string]api.ValidateFn + jobValidFns map[string]api.ValidateExFn } func openSession(cache cache.Cache) *Session { @@ -67,17 +69,18 @@ func openSession(cache cache.Cache) *Session { Nodes: map[string]*api.NodeInfo{}, Queues: map[api.QueueID]*api.QueueInfo{}, - plugins: map[string]Plugin{}, - jobOrderFns: map[string]api.CompareFn{}, - queueOrderFns: map[string]api.CompareFn{}, - taskOrderFns: map[string]api.CompareFn{}, - predicateFns: map[string]api.PredicateFn{}, - nodeOrderFns: map[string]api.NodeOrderFn{}, - preemptableFns: map[string]api.EvictableFn{}, - reclaimableFns: map[string]api.EvictableFn{}, - overusedFns: map[string]api.ValidateFn{}, - jobReadyFns: map[string]api.ValidateFn{}, - jobValidFns: map[string]api.ValidateExFn{}, + plugins: map[string]Plugin{}, + jobOrderFns: map[string]api.CompareFn{}, + queueOrderFns: map[string]api.CompareFn{}, + taskOrderFns: map[string]api.CompareFn{}, + predicateFns: map[string]api.PredicateFn{}, + nodeOrderFns: map[string]api.NodeOrderFn{}, + preemptableFns: map[string]api.EvictableFn{}, + reclaimableFns: map[string]api.EvictableFn{}, + overusedFns: map[string]api.ValidateFn{}, + jobReadyFns: map[string]api.ValidateFn{}, + jobPipelinedFns: map[string]api.ValidateFn{}, + jobValidFns: map[string]api.ValidateExFn{}, } snapshot := cache.Snapshot() @@ -180,12 +183,14 @@ func jobStatus(ssn *Session, jobInfo *api.JobInfo) v1alpha1.PodGroupStatus { return status } +// Statement returns new statement object func (ssn *Session) Statement() *Statement { return &Statement{ ssn: ssn, } } +// Pipeline the task to the node in the session func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { // Only update status in session job, found := ssn.Jobs[task.Job] @@ -193,10 +198,12 @@ func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { if err := job.UpdateTaskStatus(task, api.Pipelined); err != nil { glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v", task.Namespace, task.Name, api.Pipelined, ssn.UID, err) + return err } } else { glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.", task.Job, ssn.UID) + return fmt.Errorf("failed to find job %s when binding", task.Job) } task.NodeName = hostname @@ -205,12 +212,14 @@ func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { if err := node.AddTask(task); err != nil { glog.Errorf("Failed to add task <%v/%v> to node <%v> in Session <%v>: %v", task.Namespace, task.Name, hostname, ssn.UID, err) + return err } glog.V(3).Infof("After added Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>", task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing) } else { glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.", hostname, ssn.UID) + return fmt.Errorf("failed to find node %s", hostname) } for _, eh := range ssn.eventHandlers { @@ -224,6 +233,7 @@ func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { return nil } +//Allocate the task to the node in the session func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error { if err := ssn.cache.AllocateVolumes(task, hostname); err != nil { return err @@ -235,10 +245,12 @@ func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error { if err := job.UpdateTaskStatus(task, api.Allocated); err != nil { glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v", task.Namespace, task.Name, api.Allocated, ssn.UID, err) + return err } } else { glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.", task.Job, ssn.UID) + return fmt.Errorf("failed to find job %s", task.Job) } task.NodeName = hostname @@ -247,12 +259,14 @@ func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error { if err := node.AddTask(task); err != nil { glog.Errorf("Failed to add task <%v/%v> to node <%v> in Session <%v>: %v", task.Namespace, task.Name, hostname, ssn.UID, err) + return err } glog.V(3).Infof("After allocated Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>", task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing) } else { glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.", hostname, ssn.UID) + return fmt.Errorf("failed to find node %s", hostname) } // Callbacks @@ -269,6 +283,7 @@ func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error { if err := ssn.dispatch(task); err != nil { glog.Errorf("Failed to dispatch task <%v/%v>: %v", task.Namespace, task.Name, err) + return err } } } @@ -290,16 +305,19 @@ func (ssn *Session) dispatch(task *api.TaskInfo) error { if err := job.UpdateTaskStatus(task, api.Binding); err != nil { glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v", task.Namespace, task.Name, api.Binding, ssn.UID, err) + return err } } else { glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.", task.Job, ssn.UID) + return fmt.Errorf("failed to find job %s", task.Job) } metrics.UpdateTaskScheduleDuration(metrics.Duration(task.Pod.CreationTimestamp.Time)) return nil } +//Evict the task in the session func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error { if err := ssn.cache.Evict(reclaimee, reason); err != nil { return err @@ -311,10 +329,12 @@ func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error { if err := job.UpdateTaskStatus(reclaimee, api.Releasing); err != nil { glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v", reclaimee.Namespace, reclaimee.Name, api.Releasing, ssn.UID, err) + return err } } else { glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.", reclaimee.Job, ssn.UID) + return fmt.Errorf("failed to find job %s", reclaimee.Job) } // Update task in node. @@ -322,6 +342,7 @@ func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error { if err := node.UpdateTask(reclaimee); err != nil { glog.Errorf("Failed to update task <%v/%v> in Session <%v>: %v", reclaimee.Namespace, reclaimee.Name, ssn.UID, err) + return err } } @@ -336,7 +357,7 @@ func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error { return nil } -// UpdateJobStatus update job condition accordingly. +// UpdateJobCondition update job condition accordingly. func (ssn *Session) UpdateJobCondition(jobInfo *api.JobInfo, cond *v1alpha1.PodGroupCondition) error { job, ok := ssn.Jobs[jobInfo.UID] if !ok { @@ -361,10 +382,12 @@ func (ssn *Session) UpdateJobCondition(jobInfo *api.JobInfo, cond *v1alpha1.PodG return nil } +// AddEventHandler add event handlers func (ssn *Session) AddEventHandler(eh *EventHandler) { ssn.eventHandlers = append(ssn.eventHandlers, eh) } +//String return nodes and jobs information in the session func (ssn Session) String() string { msg := fmt.Sprintf("Session %v: \n", ssn.UID) diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session_plugins.go b/pkg/scheduler/framework/session_plugins.go similarity index 69% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session_plugins.go rename to pkg/scheduler/framework/session_plugins.go index dfbe655c65..e368447d84 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/session_plugins.go +++ b/pkg/scheduler/framework/session_plugins.go @@ -17,56 +17,72 @@ limitations under the License. package framework import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api" ) +// AddJobOrderFn add job order function func (ssn *Session) AddJobOrderFn(name string, cf api.CompareFn) { ssn.jobOrderFns[name] = cf } +// AddQueueOrderFn add queue order function func (ssn *Session) AddQueueOrderFn(name string, qf api.CompareFn) { ssn.queueOrderFns[name] = qf } +// AddTaskOrderFn add task order function func (ssn *Session) AddTaskOrderFn(name string, cf api.CompareFn) { ssn.taskOrderFns[name] = cf } +// AddPreemptableFn add preemptable function func (ssn *Session) AddPreemptableFn(name string, cf api.EvictableFn) { ssn.preemptableFns[name] = cf } +// AddReclaimableFn add Reclaimable function func (ssn *Session) AddReclaimableFn(name string, rf api.EvictableFn) { ssn.reclaimableFns[name] = rf } +// AddJobReadyFn add JobReady function func (ssn *Session) AddJobReadyFn(name string, vf api.ValidateFn) { ssn.jobReadyFns[name] = vf } +// AddJobPipelinedFn add pipelined function +func (ssn *Session) AddJobPipelinedFn(name string, vf api.ValidateFn) { + ssn.jobPipelinedFns[name] = vf +} + +// AddPredicateFn add Predicate function func (ssn *Session) AddPredicateFn(name string, pf api.PredicateFn) { ssn.predicateFns[name] = pf } +// AddNodeOrderFn add Node order function func (ssn *Session) AddNodeOrderFn(name string, pf api.NodeOrderFn) { ssn.nodeOrderFns[name] = pf } +// AddOverusedFn add overused function func (ssn *Session) AddOverusedFn(name string, fn api.ValidateFn) { ssn.overusedFns[name] = fn } +// AddJobValidFn add jobvalid function func (ssn *Session) AddJobValidFn(name string, fn api.ValidateExFn) { ssn.jobValidFns[name] = fn } +// Reclaimable invoke reclaimable function of the plugins func (ssn *Session) Reclaimable(reclaimer *api.TaskInfo, reclaimees []*api.TaskInfo) []*api.TaskInfo { var victims []*api.TaskInfo var init bool for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.ReclaimableDisabled { + if !isEnabled(plugin.EnabledReclaimable) { continue } rf, found := ssn.reclaimableFns[plugin.Name] @@ -101,13 +117,14 @@ func (ssn *Session) Reclaimable(reclaimer *api.TaskInfo, reclaimees []*api.TaskI return victims } +// Preemptable invoke preemptable function of the plugins func (ssn *Session) Preemptable(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo { var victims []*api.TaskInfo var init bool for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.PreemptableDisabled { + if !isEnabled(plugin.EnabledPreemptable) { continue } @@ -143,6 +160,7 @@ func (ssn *Session) Preemptable(preemptor *api.TaskInfo, preemptees []*api.TaskI return victims } +// Overused invoke overused function of the plugins func (ssn *Session) Overused(queue *api.QueueInfo) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -159,10 +177,11 @@ func (ssn *Session) Overused(queue *api.QueueInfo) bool { return false } +// JobReady invoke jobready function of the plugins func (ssn *Session) JobReady(obj interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.JobReadyDisabled { + if !isEnabled(plugin.EnabledJobReady) { continue } jrf, found := ssn.jobReadyFns[plugin.Name] @@ -179,6 +198,28 @@ func (ssn *Session) JobReady(obj interface{}) bool { return true } +// JobPipelined invoke pipelined function of the plugins +func (ssn *Session) JobPipelined(obj interface{}) bool { + for _, tier := range ssn.Tiers { + for _, plugin := range tier.Plugins { + if !isEnabled(plugin.EnabledJobPipelined) { + continue + } + jrf, found := ssn.jobPipelinedFns[plugin.Name] + if !found { + continue + } + + if !jrf(obj) { + return false + } + } + } + + return true +} + +// JobValid invoke jobvalid function of the plugins func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -197,10 +238,11 @@ func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult { return nil } +// JobOrderFn invoke joborder function of the plugins func (ssn *Session) JobOrderFn(l, r interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.JobOrderDisabled { + if !isEnabled(plugin.EnabledJobOrder) { continue } jof, found := ssn.jobOrderFns[plugin.Name] @@ -213,21 +255,21 @@ func (ssn *Session) JobOrderFn(l, r interface{}) bool { } } - // If no job order funcs, order job by UID. + // If no job order funcs, order job by CreationTimestamp first, then by UID. lv := l.(*api.JobInfo) rv := r.(*api.JobInfo) - if lv.CreationTimestamp.Equal(&rv.CreationTimestamp) { return lv.UID < rv.UID } - return lv.CreationTimestamp.Before(&rv.CreationTimestamp) + } +// QueueOrderFn invoke queueorder function of the plugins func (ssn *Session) QueueOrderFn(l, r interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.QueueOrderDisabled { + if !isEnabled(plugin.EnabledQueueOrder) { continue } qof, found := ssn.queueOrderFns[plugin.Name] @@ -241,17 +283,21 @@ func (ssn *Session) QueueOrderFn(l, r interface{}) bool { } } - // If no queue order funcs, order queue by UID. + // If no queue order funcs, order queue by CreationTimestamp first, then by UID. lv := l.(*api.QueueInfo) rv := r.(*api.QueueInfo) + if lv.Queue.CreationTimestamp.Equal(&rv.Queue.CreationTimestamp) { + return lv.UID < rv.UID + } + return lv.Queue.CreationTimestamp.Before(&rv.Queue.CreationTimestamp) - return lv.UID < rv.UID } +// TaskCompareFns invoke taskorder function of the plugins func (ssn *Session) TaskCompareFns(l, r interface{}) int { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.TaskOrderDisabled { + if !isEnabled(plugin.EnabledTaskOrder) { continue } tof, found := ssn.taskOrderFns[plugin.Name] @@ -267,22 +313,27 @@ func (ssn *Session) TaskCompareFns(l, r interface{}) int { return 0 } +// TaskOrderFn invoke taskorder function of the plugins func (ssn *Session) TaskOrderFn(l, r interface{}) bool { if res := ssn.TaskCompareFns(l, r); res != 0 { return res < 0 } - // If no task order funcs, order task by UID. + // If no task order funcs, order task by CreationTimestamp first, then by UID. lv := l.(*api.TaskInfo) rv := r.(*api.TaskInfo) + if lv.Pod.CreationTimestamp.Equal(&rv.Pod.CreationTimestamp) { + return lv.UID < rv.UID + } + return lv.Pod.CreationTimestamp.Before(&rv.Pod.CreationTimestamp) - return lv.UID < rv.UID } +// PredicateFn invoke predicate function of the plugins func (ssn *Session) PredicateFn(task *api.TaskInfo, node *api.NodeInfo) error { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.PredicateDisabled { + if !isEnabled(plugin.EnabledPredicate) { continue } pfn, found := ssn.predicateFns[plugin.Name] @@ -298,11 +349,12 @@ func (ssn *Session) PredicateFn(task *api.TaskInfo, node *api.NodeInfo) error { return nil } -func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (int, error) { - priorityScore := 0 +// NodeOrderFn invoke node order function of the plugins +func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (float64, error) { + priorityScore := 0.0 for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { - if plugin.NodeOrderDisabled { + if !isEnabled(plugin.EnabledNodeOrder) { continue } pfn, found := ssn.nodeOrderFns[plugin.Name] @@ -312,10 +364,14 @@ func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (int, er score, err := pfn(task, node) if err != nil { return 0, err - } else { - priorityScore = priorityScore + score } + priorityScore = priorityScore + score + } } return priorityScore, nil } + +func isEnabled(enabled *bool) bool { + return enabled != nil && *enabled +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/statement.go b/pkg/scheduler/framework/statement.go similarity index 96% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/statement.go rename to pkg/scheduler/framework/statement.go index 9ed6ea1fdd..1545412e0f 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework/statement.go +++ b/pkg/scheduler/framework/statement.go @@ -19,9 +19,10 @@ package framework import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api" ) +// Statement structure type Statement struct { operations []operation ssn *Session @@ -32,6 +33,7 @@ type operation struct { args []interface{} } +//Evict the pod func (s *Statement) Evict(reclaimee *api.TaskInfo, reason string) error { // Update status in session job, found := s.ssn.Jobs[reclaimee.Job] @@ -107,6 +109,7 @@ func (s *Statement) unevict(reclaimee *api.TaskInfo, reason string) error { return nil } +// Pipeline the task for the node func (s *Statement) Pipeline(task *api.TaskInfo, hostname string) error { // Only update status in session job, found := s.ssn.Jobs[task.Job] @@ -191,6 +194,7 @@ func (s *Statement) unpipeline(task *api.TaskInfo) error { return nil } +// Discard operation for evict and pipeline func (s *Statement) Discard() { glog.V(3).Info("Discarding operations ...") for i := len(s.operations) - 1; i >= 0; i-- { @@ -204,6 +208,7 @@ func (s *Statement) Discard() { } } +// Commit operation for evict and pipeline func (s *Statement) Commit() { glog.V(3).Info("Committing operations ...") for _, op := range s.operations { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go similarity index 92% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics/metrics.go rename to pkg/scheduler/metrics/metrics.go index 4e634c65f6..db61c49f20 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -24,8 +24,8 @@ import ( ) const ( - // KubeBatchNamespace - namespace in prometheus used by kube-batch - KubeBatchNamespace = "kube_batch" + // VolcanoNamespace - namespace in prometheus used by volcano + VolcanoNamespace = "volcano" // OnSessionOpen label OnSessionOpen = "OnSessionOpen" @@ -37,7 +37,7 @@ const ( var ( e2eSchedulingLatency = promauto.NewHistogram( prometheus.HistogramOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "e2e_scheduling_latency_milliseconds", Help: "E2e scheduling latency in milliseconds (scheduling algorithm + binding)", Buckets: prometheus.ExponentialBuckets(5, 2, 10), @@ -46,7 +46,7 @@ var ( pluginSchedulingLatency = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "plugin_scheduling_latency_microseconds", Help: "Plugin scheduling latency in microseconds", Buckets: prometheus.ExponentialBuckets(5, 2, 10), @@ -55,7 +55,7 @@ var ( actionSchedulingLatency = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "action_scheduling_latency_microseconds", Help: "Action scheduling latency in microseconds", Buckets: prometheus.ExponentialBuckets(5, 2, 10), @@ -64,7 +64,7 @@ var ( taskSchedulingLatency = promauto.NewHistogram( prometheus.HistogramOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "task_scheduling_latency_microseconds", Help: "Task scheduling latency in microseconds", Buckets: prometheus.ExponentialBuckets(5, 2, 10), @@ -73,7 +73,7 @@ var ( scheduleAttempts = promauto.NewCounterVec( prometheus.CounterOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "schedule_attempts_total", Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.", }, []string{"result"}, @@ -81,7 +81,7 @@ var ( preemptionVictims = promauto.NewGauge( prometheus.GaugeOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "pod_preemption_victims", Help: "Number of selected preemption victims", }, @@ -89,7 +89,7 @@ var ( preemptionAttempts = promauto.NewCounter( prometheus.CounterOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "total_preemption_attempts", Help: "Total preemption attempts in the cluster till now", }, @@ -97,7 +97,7 @@ var ( unscheduleTaskCount = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "unschedule_task_count", Help: "Number of tasks could not be scheduled", }, []string{"job_id"}, @@ -105,7 +105,7 @@ var ( unscheduleJobCount = promauto.NewGauge( prometheus.GaugeOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "unschedule_job_count", Help: "Number of jobs could not be scheduled", }, @@ -113,7 +113,7 @@ var ( jobRetryCount = promauto.NewCounterVec( prometheus.CounterOpts{ - Subsystem: KubeBatchNamespace, + Subsystem: VolcanoNamespace, Name: "job_retry_counts", Help: "Number of retry counts for one job", }, []string{"job_id"}, diff --git a/pkg/scheduler/plugins/conformance/conformance.go b/pkg/scheduler/plugins/conformance/conformance.go index 779c3f79a9..980870d7c6 100644 --- a/pkg/scheduler/plugins/conformance/conformance.go +++ b/pkg/scheduler/plugins/conformance/conformance.go @@ -17,18 +17,21 @@ limitations under the License. package conformance import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/apis/scheduling" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" ) type conformancePlugin struct { + // Arguments given for the plugin + pluginArguments framework.Arguments } -func New() framework.Plugin { - return &conformancePlugin{} +// New return conformance plugin +func New(arguments framework.Arguments) framework.Plugin { + return &conformancePlugin{pluginArguments: arguments} } func (pp *conformancePlugin) Name() string { diff --git a/pkg/scheduler/plugins/defaults.go b/pkg/scheduler/plugins/defaults.go new file mode 100644 index 0000000000..303e2ee858 --- /dev/null +++ b/pkg/scheduler/plugins/defaults.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import "volcano.sh/volcano/pkg/scheduler/conf" + +// ApplyPluginConfDefaults sets option's filed to its default value if not set +func ApplyPluginConfDefaults(option *conf.PluginOption) { + t := true + + if option.EnabledJobOrder == nil { + option.EnabledJobOrder = &t + } + if option.EnabledJobReady == nil { + option.EnabledJobReady = &t + } + if option.EnabledJobPipelined == nil { + option.EnabledJobPipelined = &t + } + if option.EnabledTaskOrder == nil { + option.EnabledTaskOrder = &t + } + if option.EnabledPreemptable == nil { + option.EnabledPreemptable = &t + } + if option.EnabledReclaimable == nil { + option.EnabledReclaimable = &t + } + if option.EnabledQueueOrder == nil { + option.EnabledQueueOrder = &t + } + if option.EnabledPredicate == nil { + option.EnabledPredicate = &t + } + if option.EnabledNodeOrder == nil { + option.EnabledNodeOrder = &t + } +} diff --git a/pkg/scheduler/plugins/drf/drf.go b/pkg/scheduler/plugins/drf/drf.go index 246c23e6fe..ee154210c1 100644 --- a/pkg/scheduler/plugins/drf/drf.go +++ b/pkg/scheduler/plugins/drf/drf.go @@ -21,9 +21,9 @@ import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api/helpers" + "volcano.sh/volcano/pkg/scheduler/framework" ) var shareDelta = 0.000001 @@ -39,12 +39,17 @@ type drfPlugin struct { // Key is Job ID jobOpts map[api.JobID]*drfAttr + + // Arguments given for the plugin + pluginArguments framework.Arguments } -func New() framework.Plugin { +// New return drf plugin +func New(arguments framework.Arguments) framework.Plugin { return &drfPlugin{ - totalResource: api.EmptyResource(), - jobOpts: map[api.JobID]*drfAttr{}, + totalResource: api.EmptyResource(), + jobOpts: map[api.JobID]*drfAttr{}, + pluginArguments: arguments, } } @@ -110,8 +115,8 @@ func (drf *drfPlugin) OnSessionOpen(ssn *framework.Session) { lv := l.(*api.JobInfo) rv := r.(*api.JobInfo) - glog.V(4).Infof("DRF JobOrderFn: <%v/%v> is ready: %d, <%v/%v> is ready: %d", - lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) + glog.V(4).Infof("DRF JobOrderFn: <%v/%v> share state: %d, <%v/%v> share state: %d", + lv.Namespace, lv.Name, drf.jobOpts[lv.UID].share, rv.Namespace, rv.Name, drf.jobOpts[rv.UID].share) if drf.jobOpts[lv.UID].share == drf.jobOpts[rv.UID].share { return 0 diff --git a/pkg/scheduler/plugins/factory.go b/pkg/scheduler/plugins/factory.go index 193a42e034..e4b8419486 100644 --- a/pkg/scheduler/plugins/factory.go +++ b/pkg/scheduler/plugins/factory.go @@ -17,7 +17,7 @@ limitations under the License. package plugins import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/framework" "volcano.sh/volcano/pkg/scheduler/plugins/conformance" "volcano.sh/volcano/pkg/scheduler/plugins/drf" diff --git a/pkg/scheduler/plugins/gang/gang.go b/pkg/scheduler/plugins/gang/gang.go index 7c85f9aefa..88d13503ce 100644 --- a/pkg/scheduler/plugins/gang/gang.go +++ b/pkg/scheduler/plugins/gang/gang.go @@ -24,60 +24,26 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" + "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/metrics" ) type gangPlugin struct { + // Arguments given for the plugin + pluginArguments framework.Arguments } -func New() framework.Plugin { - return &gangPlugin{} +// New return gang plugin +func New(arguments framework.Arguments) framework.Plugin { + return &gangPlugin{pluginArguments: arguments} } func (gp *gangPlugin) Name() string { return "gang" } -// readyTaskNum return the number of tasks that are ready to run. -func readyTaskNum(job *api.JobInfo) int32 { - occupid := 0 - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) || - status == api.Succeeded || - status == api.Pipelined { - occupid = occupid + len(tasks) - } - } - - return int32(occupid) -} - -// validTaskNum return the number of tasks that are valid. -func validTaskNum(job *api.JobInfo) int32 { - occupied := 0 - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) || - status == api.Succeeded || - status == api.Pipelined || - status == api.Pending { - occupied = occupied + len(tasks) - } - } - - return int32(occupied) -} - -func jobReady(obj interface{}) bool { - job := obj.(*api.JobInfo) - - occupied := readyTaskNum(job) - - return occupied >= job.MinAvailable -} - func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { validJobFn := func(obj interface{}) *api.ValidateResult { job, ok := obj.(*api.JobInfo) @@ -88,7 +54,7 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { } } - vtn := validTaskNum(job) + vtn := job.ValidTaskNum() if vtn < job.MinAvailable { return &api.ValidateResult{ Pass: false, @@ -107,7 +73,7 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { for _, preemptee := range preemptees { job := ssn.Jobs[preemptee.Job] - occupid := readyTaskNum(job) + occupid := job.ReadyTaskNum() preemptable := job.MinAvailable <= occupid-1 || job.MinAvailable == 1 if !preemptable { @@ -131,8 +97,8 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { lv := l.(*api.JobInfo) rv := r.(*api.JobInfo) - lReady := jobReady(lv) - rReady := jobReady(rv) + lReady := lv.Ready() + rReady := rv.Ready() glog.V(4).Infof("Gang JobOrderFn: <%v/%v> is ready: %t, <%v/%v> is ready: %t", lv.Namespace, lv.Name, lReady, rv.Namespace, rv.Name, rReady) @@ -153,19 +119,26 @@ func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { } ssn.AddJobOrderFn(gp.Name(), jobOrderFn) - ssn.AddJobReadyFn(gp.Name(), jobReady) + ssn.AddJobReadyFn(gp.Name(), func(obj interface{}) bool { + ji := obj.(*api.JobInfo) + return ji.Ready() + }) + ssn.AddJobPipelinedFn(gp.Name(), func(obj interface{}) bool { + ji := obj.(*api.JobInfo) + return ji.Pipelined() + }) } func (gp *gangPlugin) OnSessionClose(ssn *framework.Session) { var unreadyTaskCount int32 var unScheduleJobCount int for _, job := range ssn.Jobs { - if !jobReady(job) { - unreadyTaskCount = job.MinAvailable - readyTaskNum(job) + if !job.Ready() { + unreadyTaskCount = job.MinAvailable - job.ReadyTaskNum() msg := fmt.Sprintf("%v/%v tasks in gang unschedulable: %v", - job.MinAvailable-readyTaskNum(job), len(job.Tasks), job.FitError()) + job.MinAvailable-job.ReadyTaskNum(), len(job.Tasks), job.FitError()) - unScheduleJobCount += 1 + unScheduleJobCount++ metrics.UpdateUnscheduleTaskCount(job.Name, int(unreadyTaskCount)) metrics.RegisterJobRetries(job.Name) diff --git a/pkg/scheduler/plugins/nodeorder/nodeorder.go b/pkg/scheduler/plugins/nodeorder/nodeorder.go index d2b40f6f1c..f6b5c06264 100644 --- a/pkg/scheduler/plugins/nodeorder/nodeorder.go +++ b/pkg/scheduler/plugins/nodeorder/nodeorder.go @@ -21,18 +21,31 @@ import ( "github.com/golang/glog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" +) + +const ( + // NodeAffinityWeight is the key for providing Node Affinity Priority Weight in YAML + NodeAffinityWeight = "nodeaffinity.weight" + // PodAffinityWeight is the key for providing Pod Affinity Priority Weight in YAML + PodAffinityWeight = "podaffinity.weight" + // LeastRequestedWeight is the key for providing Least Requested Priority Weight in YAML + LeastRequestedWeight = "leastrequested.weight" + // BalancedResourceWeight is the key for providing Balanced Resource Priority Weight in YAML + BalancedResourceWeight = "balancedresource.weight" ) type nodeOrderPlugin struct { + // Arguments given for the plugin + pluginArguments framework.Arguments } func getInterPodAffinityScore(name string, interPodAffinityScore schedulerapi.HostPriorityList) int { @@ -145,16 +158,71 @@ func (nl *nodeLister) List() ([]*v1.Node, error) { } //New function returns prioritizePlugin object -func New() framework.Plugin { - return &nodeOrderPlugin{} +func New(aruguments framework.Arguments) framework.Plugin { + return &nodeOrderPlugin{pluginArguments: aruguments} } func (pp *nodeOrderPlugin) Name() string { return "nodeorder" } +type priorityWeight struct { + leastReqWeight int + nodeAffinityWeight int + podAffinityWeight int + balancedRescourceWeight int +} + +func calculateWeight(args framework.Arguments) priorityWeight { + /* + User Should give priorityWeight in this format(nodeaffinity.weight, podaffinity.weight, leastrequested.weight, balancedresource.weight). + Currently supported only for nodeaffinity, podaffinity, leastrequested, balancedresouce priorities. + + actions: "reclaim, allocate, backfill, preempt" + tiers: + - plugins: + - name: priority + - name: gang + - name: conformance + - plugins: + - name: drf + - name: predicates + - name: proportion + - name: nodeorder + arguments: + nodeaffinity.weight: 2 + podaffinity.weight: 2 + leastrequested.weight: 2 + balancedresource.weight: 2 + */ + + // Values are initialized to 1. + weight := priorityWeight{ + leastReqWeight: 1, + nodeAffinityWeight: 1, + podAffinityWeight: 1, + balancedRescourceWeight: 1, + } + + // Checks whether nodeaffinity.weight is provided or not, if given, modifies the value in weight struct. + args.GetInt(&weight.nodeAffinityWeight, NodeAffinityWeight) + + // Checks whether podaffinity.weight is provided or not, if given, modifies the value in weight struct. + args.GetInt(&weight.podAffinityWeight, PodAffinityWeight) + + // Checks whether leastrequested.weight is provided or not, if given, modifies the value in weight struct. + args.GetInt(&weight.leastReqWeight, LeastRequestedWeight) + + // Checks whether balancedresource.weight is provided or not, if given, modifies the value in weight struct. + args.GetInt(&weight.balancedRescourceWeight, BalancedResourceWeight) + + return weight +} + func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) { - nodeOrderFn := func(task *api.TaskInfo, node *api.NodeInfo) (int, error) { + nodeOrderFn := func(task *api.TaskInfo, node *api.NodeInfo) (float64, error) { + + weight := calculateWeight(pp.pluginArguments) pl := &podLister{ session: ssn, @@ -176,7 +244,7 @@ func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) { nodeInfo := cache.NewNodeInfo(node.Pods()...) nodeInfo.SetNode(node.Node) - var score = 0 + var score = 0.0 //TODO: Add ImageLocalityPriority Function once priorityMetadata is published //Issue: #74132 in kubernetes ( https://github.com/kubernetes/kubernetes/issues/74132 ) @@ -186,14 +254,24 @@ func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) { glog.Warningf("Least Requested Priority Failed because of Error: %v", err) return 0, err } - score = score + host.Score + // If leastReqWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score. + score = score + float64(host.Score*weight.leastReqWeight) + + host, err = priorities.BalancedResourceAllocationMap(task.Pod, nil, nodeInfo) + if err != nil { + glog.Warningf("Balanced Resource Allocation Priority Failed because of Error: %v", err) + return 0, err + } + // If balancedRescourceWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score. + score = score + float64(host.Score*weight.balancedRescourceWeight) host, err = priorities.CalculateNodeAffinityPriorityMap(task.Pod, nil, nodeInfo) if err != nil { glog.Warningf("Calculate Node Affinity Priority Failed because of Error: %v", err) return 0, err } - score = score + host.Score + // If nodeAffinityWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score. + score = score + float64(host.Score*weight.nodeAffinityWeight) mapFn := priorities.NewInterPodAffinityPriority(cn, nl, pl, v1.DefaultHardPodAffinitySymmetricWeight) interPodAffinityScore, err = mapFn(task.Pod, nodeMap, nodeSlice) @@ -202,7 +280,8 @@ func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) { return 0, err } hostScore := getInterPodAffinityScore(node.Name, interPodAffinityScore) - score = score + hostScore + // If podAffinityWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score. + score = score + float64(hostScore*weight.podAffinityWeight) glog.V(4).Infof("Total Score for that node is: %d", score) return score, nil diff --git a/pkg/scheduler/plugins/predicates/predicates.go b/pkg/scheduler/plugins/predicates/predicates.go index d190904234..64a0572486 100644 --- a/pkg/scheduler/plugins/predicates/predicates.go +++ b/pkg/scheduler/plugins/predicates/predicates.go @@ -21,21 +21,24 @@ import ( "github.com/golang/glog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" ) type predicatesPlugin struct { + // Arguments given for the plugin + pluginArguments framework.Arguments } -func New() framework.Plugin { - return &predicatesPlugin{} +// New return predicate plugin +func New(arguments framework.Arguments) framework.Plugin { + return &predicatesPlugin{pluginArguments: arguments} } func (pp *predicatesPlugin) Name() string { @@ -101,7 +104,7 @@ func (c *cachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) { return node.Node, nil } -// Check to see if node spec is set to Schedulable or not +// CheckNodeUnschedulable Check to see if node spec is set to Schedulable or not func CheckNodeUnschedulable(pod *v1.Pod, nodeInfo *cache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo.Node().Spec.Unschedulable { return false, []algorithm.PredicateFailureReason{predicates.ErrNodeUnschedulable}, nil @@ -123,10 +126,10 @@ func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) { nodeInfo.SetNode(node.Node) if node.Allocatable.MaxTaskNum <= len(nodeInfo.Pods()) { - return fmt.Errorf("Node <%s> can not allow more task running on it.", node.Name) + return fmt.Errorf("node <%s> can not allow more task running on it", node.Name) } - // NodeSeletor Predicate + // NodeSelector Predicate fit, _, err := predicates.PodMatchNodeSelector(task.Pod, nil, nodeInfo) if err != nil { return err diff --git a/pkg/scheduler/plugins/priority/priority.go b/pkg/scheduler/plugins/priority/priority.go index c1a422d9dd..5ecd6ed444 100644 --- a/pkg/scheduler/plugins/priority/priority.go +++ b/pkg/scheduler/plugins/priority/priority.go @@ -18,16 +18,18 @@ package priority import ( "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/framework" ) type priorityPlugin struct { + // Arguments given for the plugin + pluginArguments framework.Arguments } -func New() framework.Plugin { - return &priorityPlugin{} +// New return priority plugin +func New(arguments framework.Arguments) framework.Plugin { + return &priorityPlugin{pluginArguments: arguments} } func (pp *priorityPlugin) Name() string { @@ -39,7 +41,7 @@ func (pp *priorityPlugin) OnSessionOpen(ssn *framework.Session) { lv := l.(*api.TaskInfo) rv := r.(*api.TaskInfo) - glog.V(4).Infof("Priority TaskOrder: <%v/%v> prority is %v, <%v/%v> priority is %v", + glog.V(4).Infof("Priority TaskOrder: <%v/%v> priority is %v, <%v/%v> priority is %v", lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) if lv.Priority == rv.Priority { @@ -60,7 +62,7 @@ func (pp *priorityPlugin) OnSessionOpen(ssn *framework.Session) { lv := l.(*api.JobInfo) rv := r.(*api.JobInfo) - glog.V(4).Infof("Priority JobOrderFn: <%v/%v> is ready: %d, <%v/%v> is ready: %d", + glog.V(4).Infof("Priority JobOrderFn: <%v/%v> priority: %d, <%v/%v> priority: %d", lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) if lv.Priority > rv.Priority { diff --git a/pkg/scheduler/plugins/proportion/proportion.go b/pkg/scheduler/plugins/proportion/proportion.go index 582adf3f4e..ef3543c42c 100644 --- a/pkg/scheduler/plugins/proportion/proportion.go +++ b/pkg/scheduler/plugins/proportion/proportion.go @@ -19,14 +19,16 @@ package proportion import ( "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api/helpers" + "volcano.sh/volcano/pkg/scheduler/framework" ) type proportionPlugin struct { totalResource *api.Resource queueOpts map[api.QueueID]*queueAttr + // Arguments given for the plugin + pluginArguments framework.Arguments } type queueAttr struct { @@ -40,10 +42,12 @@ type queueAttr struct { request *api.Resource } -func New() framework.Plugin { +// New return proportion action +func New(arguments framework.Arguments) framework.Plugin { return &proportionPlugin{ - totalResource: api.EmptyResource(), - queueOpts: map[api.QueueID]*queueAttr{}, + totalResource: api.EmptyResource(), + queueOpts: map[api.QueueID]*queueAttr{}, + pluginArguments: arguments, } } @@ -119,6 +123,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { continue } + oldDeserved := attr.deserved.Clone() attr.deserved.Add(remaining.Clone().Multi(float64(attr.weight) / float64(totalWeight))) if !attr.deserved.LessEqual(attr.request) { attr.deserved = helpers.Min(attr.deserved, attr.request) @@ -129,7 +134,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { glog.V(4).Infof("The attributes of queue <%s> in proportion: deserved <%v>, allocate <%v>, request <%v>, share <%0.2f>", attr.name, attr.deserved, attr.allocated, attr.request, attr.share) - deserved.Add(attr.deserved) + deserved.Add(attr.deserved.Clone().Sub(oldDeserved)) } remaining.Sub(deserved) @@ -166,7 +171,7 @@ func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { } allocated := allocations[job.Queue] if allocated.Less(reclaimee.Resreq) { - glog.Errorf("Failed to calculate the allocation of Task <%s/%s> in Queue <%s>.", + glog.Errorf("Failed to allocate resource for Task <%s/%s> in Queue <%s>, not enough resource.", reclaimee.Namespace, reclaimee.Name, job.Queue) continue } @@ -226,7 +231,7 @@ func (pp *proportionPlugin) OnSessionClose(ssn *framework.Session) { func (pp *proportionPlugin) updateShare(attr *queueAttr) { res := float64(0) - // TODO(k82cn): how to handle fragement issues? + // TODO(k82cn): how to handle fragment issues? for _, rn := range api.ResourceNames() { share := helpers.Share(attr.allocated.Get(rn), attr.deserved.Get(rn)) if share > res { diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go similarity index 84% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/scheduler.go rename to pkg/scheduler/scheduler.go index d85c5c7f1a..e42036f537 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -17,7 +17,6 @@ limitations under the License. package scheduler import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf" "time" "github.com/golang/glog" @@ -25,11 +24,14 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" - schedcache "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" + schedcache "volcano.sh/volcano/pkg/scheduler/cache" + "volcano.sh/volcano/pkg/scheduler/conf" + "volcano.sh/volcano/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/metrics" ) +// Scheduler watches for new unscheduled pods for kubebatch. It attempts to find +// nodes that they fit on and writes bindings back to the api server. type Scheduler struct { cache schedcache.Cache config *rest.Config @@ -39,24 +41,25 @@ type Scheduler struct { schedulePeriod time.Duration } +// NewScheduler returns a scheduler func NewScheduler( config *rest.Config, schedulerName string, conf string, - period string, + period time.Duration, defaultQueue string, ) (*Scheduler, error) { - sp, _ := time.ParseDuration(period) scheduler := &Scheduler{ config: config, schedulerConf: conf, cache: schedcache.New(config, schedulerName, defaultQueue), - schedulePeriod: sp, + schedulePeriod: period, } return scheduler, nil } +// Run runs the Scheduler func (pc *Scheduler) Run(stopCh <-chan struct{}) { var err error diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util.go b/pkg/scheduler/util.go similarity index 83% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util.go rename to pkg/scheduler/util.go index 4b55f9b9f6..b6a280f7e8 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util.go +++ b/pkg/scheduler/util.go @@ -23,8 +23,9 @@ import ( "gopkg.in/yaml.v2" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/conf" + "volcano.sh/volcano/pkg/scheduler/framework" + "volcano.sh/volcano/pkg/scheduler/plugins" ) var defaultSchedulerConf = ` @@ -51,6 +52,14 @@ func loadSchedulerConf(confStr string) ([]framework.Action, []conf.Tier, error) if err := yaml.Unmarshal(buf, schedulerConf); err != nil { return nil, nil, err } + + // Set default settings for each plugin if not set + for i, tier := range schedulerConf.Tiers { + for j := range tier.Plugins { + plugins.ApplyPluginConfDefaults(&schedulerConf.Tiers[i].Plugins[j]) + } + } + actionNames := strings.Split(schedulerConf.Actions, ",") for _, actionName := range actionNames { if action, found := framework.GetAction(strings.TrimSpace(actionName)); found { diff --git a/pkg/scheduler/util/priority_queue.go b/pkg/scheduler/util/priority_queue.go index 91b3d72003..eb10111a70 100644 --- a/pkg/scheduler/util/priority_queue.go +++ b/pkg/scheduler/util/priority_queue.go @@ -19,9 +19,10 @@ package util import ( "container/heap" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + "volcano.sh/volcano/pkg/scheduler/api" ) +//PriorityQueue implements a scheduling queue. type PriorityQueue struct { queue priorityQueue } @@ -31,6 +32,7 @@ type priorityQueue struct { lessFn api.LessFn } +// NewPriorityQueue returns a PriorityQueue func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue { return &PriorityQueue{ queue: priorityQueue{ @@ -40,10 +42,12 @@ func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue { } } +// Push pushes element in the priority Queue func (q *PriorityQueue) Push(it interface{}) { heap.Push(&q.queue, it) } +// Pop pops element in the priority Queue func (q *PriorityQueue) Pop() interface{} { if q.Len() == 0 { return nil @@ -52,10 +56,12 @@ func (q *PriorityQueue) Pop() interface{} { return heap.Pop(&q.queue) } +// Empty check if queue is empty func (q *PriorityQueue) Empty() bool { return q.queue.Len() == 0 } +// Len returns Len of the priority queue func (q *PriorityQueue) Len() int { return q.queue.Len() } diff --git a/pkg/scheduler/util/scheduler_helper.go b/pkg/scheduler/util/scheduler_helper.go new file mode 100644 index 0000000000..1440b55440 --- /dev/null +++ b/pkg/scheduler/util/scheduler_helper.go @@ -0,0 +1,114 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "math/rand" + "sort" + "sync" + + "github.com/golang/glog" + "k8s.io/client-go/util/workqueue" + + "volcano.sh/volcano/pkg/scheduler/api" +) + +// PredicateNodes returns nodes that fit task +func PredicateNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.PredicateFn) []*api.NodeInfo { + var predicateNodes []*api.NodeInfo + + var workerLock sync.Mutex + checkNode := func(index int) { + node := nodes[index] + glog.V(3).Infof("Considering Task <%v/%v> on node <%v>: <%v> vs. <%v>", + task.Namespace, task.Name, node.Name, task.Resreq, node.Idle) + + // TODO (k82cn): Enable eCache for performance improvement. + if err := fn(task, node); err != nil { + glog.Errorf("Predicates failed for task <%s/%s> on node <%s>: %v", + task.Namespace, task.Name, node.Name, err) + return + } + + workerLock.Lock() + predicateNodes = append(predicateNodes, node) + workerLock.Unlock() + } + + workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), checkNode) + return predicateNodes +} + +// PrioritizeNodes returns a map whose key is node's score and value are corresponding nodes +func PrioritizeNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.NodeOrderFn) map[float64][]*api.NodeInfo { + nodeScores := map[float64][]*api.NodeInfo{} + + var workerLock sync.Mutex + scoreNode := func(index int) { + node := nodes[index] + score, err := fn(task, node) + if err != nil { + glog.Errorf("Error in Calculating Priority for the node:%v", err) + return + } + + workerLock.Lock() + nodeScores[score] = append(nodeScores[score], node) + workerLock.Unlock() + } + workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), scoreNode) + return nodeScores +} + +// SortNodes returns nodes by order of score +func SortNodes(nodeScores map[float64][]*api.NodeInfo) []*api.NodeInfo { + var nodesInorder []*api.NodeInfo + var keys []float64 + for key := range nodeScores { + keys = append(keys, key) + } + sort.Sort(sort.Reverse(sort.Float64Slice(keys))) + for _, key := range keys { + nodes := nodeScores[key] + nodesInorder = append(nodesInorder, nodes...) + } + return nodesInorder +} + +// SelectBestNode returns best node whose score is highest, pick one randomly if there are many nodes with same score. +func SelectBestNode(nodeScores map[float64][]*api.NodeInfo) *api.NodeInfo { + var bestNodes []*api.NodeInfo + maxScore := -1.0 + for score, nodes := range nodeScores { + if score > maxScore { + maxScore = score + bestNodes = nodes + } + } + + return bestNodes[rand.Intn(len(bestNodes))] +} + +// GetNodeList returns values of the map 'nodes' +func GetNodeList(nodes map[string]*api.NodeInfo) []*api.NodeInfo { + result := make([]*api.NodeInfo, 0, len(nodes)) + for _, v := range nodes { + result = append(result, v) + } + return result +} diff --git a/pkg/scheduler/util/scheduler_helper_test.go b/pkg/scheduler/util/scheduler_helper_test.go new file mode 100644 index 0000000000..53e136f9c3 --- /dev/null +++ b/pkg/scheduler/util/scheduler_helper_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "volcano.sh/volcano/pkg/scheduler/api" + "reflect" + "testing" +) + +func TestSelectBestNode(t *testing.T) { + cases := []struct { + NodeScores map[float64][]*api.NodeInfo + // Expected node is one of ExpectedNodes + ExpectedNodes []*api.NodeInfo + }{ + { + NodeScores: map[float64][]*api.NodeInfo{ + 1.0: {&api.NodeInfo{Name: "node1"}, &api.NodeInfo{Name: "node2"}}, + 2.0: {&api.NodeInfo{Name: "node3"}, &api.NodeInfo{Name: "node4"}}, + }, + ExpectedNodes: []*api.NodeInfo{{Name: "node3"}, {Name: "node4"}}, + }, + { + NodeScores: map[float64][]*api.NodeInfo{ + 1.0: {&api.NodeInfo{Name: "node1"}, &api.NodeInfo{Name: "node2"}}, + 3.0: {&api.NodeInfo{Name: "node3"}}, + 2.0: {&api.NodeInfo{Name: "node4"}, &api.NodeInfo{Name: "node5"}}, + }, + ExpectedNodes: []*api.NodeInfo{{Name: "node3"}}, + }, + } + + oneOf := func(node *api.NodeInfo, nodes []*api.NodeInfo) bool { + for _, v := range nodes { + if reflect.DeepEqual(node, v) { + return true + } + } + return false + } + for i, test := range cases { + result := SelectBestNode(test.NodeScores) + if !oneOf(result, test.ExpectedNodes) { + t.Errorf("Failed test case #%d, expected: %#v, got %#v", i, test.ExpectedNodes, result) + } + } +} diff --git a/pkg/scheduler/util/sort.go b/pkg/scheduler/util/sort.go deleted file mode 100644 index d4d808338c..0000000000 --- a/pkg/scheduler/util/sort.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sort" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" -) - -func SelectBestNode(nodeScores map[int][]*api.NodeInfo) []*api.NodeInfo { - var nodesInorder []*api.NodeInfo - var keys []int - for key := range nodeScores { - keys = append(keys, key) - } - sort.Sort(sort.Reverse(sort.IntSlice(keys))) - for _, key := range keys { - nodes := nodeScores[key] - nodesInorder = append(nodesInorder, nodes...) - } - return nodesInorder -} diff --git a/pkg/scheduler/util_test.go b/pkg/scheduler/util_test.go new file mode 100644 index 0000000000..f127d36828 --- /dev/null +++ b/pkg/scheduler/util_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "reflect" + "testing" + + _ "volcano.sh/volcano/pkg/scheduler/actions" + "volcano.sh/volcano/pkg/scheduler/conf" +) + +func TestLoadSchedulerConf(t *testing.T) { + configuration := ` +actions: "allocate, backfill" +tiers: +- plugins: + - name: priority + - name: gang + - name: conformance +- plugins: + - name: drf + - name: predicates + - name: proportion + - name: nodeorder +` + + trueValue := true + expectedTiers := []conf.Tier{ + { + Plugins: []conf.PluginOption{ + { + Name: "priority", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + { + Name: "gang", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + { + Name: "conformance", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + }, + }, + { + Plugins: []conf.PluginOption{ + { + Name: "drf", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + { + Name: "predicates", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + { + Name: "proportion", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + { + Name: "nodeorder", + EnabledJobOrder: &trueValue, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + EnabledTaskOrder: &trueValue, + EnabledPreemptable: &trueValue, + EnabledReclaimable: &trueValue, + EnabledQueueOrder: &trueValue, + EnabledPredicate: &trueValue, + EnabledNodeOrder: &trueValue, + }, + }, + }, + } + + _, tiers, err := loadSchedulerConf(configuration) + if err != nil { + t.Errorf("Failed to load scheduler configuration: %v", err) + } + if !reflect.DeepEqual(tiers, expectedTiers) { + t.Errorf("Failed to set default settings for plugins, expected: %+v, got %+v", + expectedTiers, tiers) + } +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/version/version.go b/pkg/version/version.go similarity index 93% rename from vendor/github.com/kubernetes-sigs/kube-batch/pkg/version/version.go rename to pkg/version/version.go index 6db1560c25..ffef611a04 100644 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/version/version.go +++ b/pkg/version/version.go @@ -23,9 +23,9 @@ import ( ) var ( - // Version shows the version of kube-batch. + // Version shows the version of volcano. Version = "Not provided." - // GitSHA shoows the git commit id of kube-batch. + // GitSHA shoows the git commit id of volcano. GitSHA = "Not provided." // Built shows the built time of the binary. Built = "Not provided." diff --git a/test/e2e/job_error_handling.go b/test/e2e/job_error_handling.go index b35a60bd3f..067217fb55 100644 --- a/test/e2e/job_error_handling.go +++ b/test/e2e/job_error_handling.go @@ -355,6 +355,7 @@ var _ = Describe("Job Error Handling", func() { } err = taintAllNodes(context, taints) Expect(err).NotTo(HaveOccurred()) + defer removeTaintsFromAllNodes(context, taints) podName := jobhelpers.MakePodName(job.Name, "test", 0) By("Kill one of the pod in order to trigger unschedulable status") diff --git a/test/e2e/util.go b/test/e2e/util.go index 383c5c6be0..9523dc02a1 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -39,9 +39,9 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + kbv1 "volcano.sh/volcano/pkg/apis/scheduling/v1alpha1" + kbver "volcano.sh/volcano/pkg/client/clientset/versioned" + kbapi "volcano.sh/volcano/pkg/scheduler/api" vkv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1" vkver "volcano.sh/volcano/pkg/client/clientset/versioned" diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE b/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/utils/utils.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/utils/utils.go deleted file mode 100644 index f7561b8425..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/utils/utils.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -func GetController(obj interface{}) types.UID { - accessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - - controllerRef := metav1.GetControllerOf(accessor) - if controllerRef != nil { - return controllerRef.UID - } - - return "" -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go deleted file mode 100644 index c1ddfe07ca..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client -} - -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return c.schedulingV1alpha1 -} - -// Deprecated: Scheduling retrieves the default version of SchedulingClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface { - return c.schedulingV1alpha1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 7dc3756168..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go deleted file mode 100644 index e08eecc63e..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - - corev1 "k8s.io/api/core/v1" - //PDB defintions - policyv1beta1 "k8s.io/api/policy/v1beta1" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(Scheme) -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -func AddToScheme(scheme *runtime.Scheme) { - schedulingv1alpha1.AddToScheme(scheme) - corev1.AddToScheme(scheme) - policyv1beta1.AddToScheme(scheme) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/factory.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/factory.go deleted file mode 100644 index badcefe205..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/factory.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces" - scheduling "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client versioned.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Scheduling() scheduling.Interface -} - -func (f *sharedInformerFactory) Scheduling() scheduling.Interface { - return scheduling.New(f, f.namespace, f.tweakListOptions) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/generic.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/generic.go deleted file mode 100644 index 08d1bfc5d3..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/generic.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=scheduling, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("podgroups"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PodGroups().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("queues"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Queues().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index 85b532cafc..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" -) - -type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/allocate/allocate.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/allocate/allocate.go deleted file mode 100644 index 753ad8bbb2..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/allocate/allocate.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package allocate - -import ( - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util" -) - -type allocateAction struct { - ssn *framework.Session -} - -func New() *allocateAction { - return &allocateAction{} -} - -func (alloc *allocateAction) Name() string { - return "allocate" -} - -func (alloc *allocateAction) Initialize() {} - -func (alloc *allocateAction) Execute(ssn *framework.Session) { - glog.V(3).Infof("Enter Allocate ...") - defer glog.V(3).Infof("Leaving Allocate ...") - - queues := util.NewPriorityQueue(ssn.QueueOrderFn) - jobsMap := map[api.QueueID]*util.PriorityQueue{} - - for _, job := range ssn.Jobs { - if _, found := jobsMap[job.Queue]; !found { - jobsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) - } - - if queue, found := ssn.Queues[job.Queue]; found { - queues.Push(queue) - } - - glog.V(4).Infof("Added Job <%s/%s> into Queue <%s>", job.Namespace, job.Name, job.Queue) - jobsMap[job.Queue].Push(job) - } - - glog.V(3).Infof("Try to allocate resource to %d Queues", len(jobsMap)) - - pendingTasks := map[api.JobID]*util.PriorityQueue{} - - for { - if queues.Empty() { - break - } - - queue := queues.Pop().(*api.QueueInfo) - if ssn.Overused(queue) { - glog.V(3).Infof("Queue <%s> is overused, ignore it.", queue.Name) - continue - } - - jobs, found := jobsMap[queue.UID] - - glog.V(3).Infof("Try to allocate resource to Jobs in Queue <%v>", queue.Name) - - if !found || jobs.Empty() { - glog.V(4).Infof("Can not find jobs for queue %s.", queue.Name) - continue - } - - job := jobs.Pop().(*api.JobInfo) - if _, found := pendingTasks[job.UID]; !found { - tasks := util.NewPriorityQueue(ssn.TaskOrderFn) - for _, task := range job.TaskStatusIndex[api.Pending] { - // Skip BestEffort task in 'allocate' action. - if task.Resreq.IsEmpty() { - glog.V(4).Infof("Task <%v/%v> is BestEffort task, skip it.", - task.Namespace, task.Name) - continue - } - - tasks.Push(task) - } - pendingTasks[job.UID] = tasks - } - tasks := pendingTasks[job.UID] - - glog.V(3).Infof("Try to allocate resource to %d tasks of Job <%v/%v>", - tasks.Len(), job.Namespace, job.Name) - - for !tasks.Empty() { - predicateNodes := []*api.NodeInfo{} - nodeScores := map[int][]*api.NodeInfo{} - - task := tasks.Pop().(*api.TaskInfo) - assigned := false - - glog.V(3).Infof("There are <%d> nodes for Job <%v/%v>", - len(ssn.Nodes), job.Namespace, job.Name) - - //any task that doesn't fit will be the last processed - //within this loop context so any existing contents of - //NodesFitDelta are for tasks that eventually did fit on a - //node - if len(job.NodesFitDelta) > 0 { - job.NodesFitDelta = make(api.NodeResourceMap) - } - for _, node := range ssn.Nodes { - glog.V(3).Infof("Considering Task <%v/%v> on node <%v>: <%v> vs. <%v>", - task.Namespace, task.Name, node.Name, task.Resreq, node.Idle) - - // TODO (k82cn): Enable eCache for performance improvement. - if err := ssn.PredicateFn(task, node); err != nil { - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v", - task.Namespace, task.Name, node.Name, err) - continue - } else { - predicateNodes = append(predicateNodes, node) - } - } - for _, node := range predicateNodes { - score, err := ssn.NodeOrderFn(task, node) - if err != nil { - glog.V(3).Infof("Error in Calculating Priority for the node:%v", err) - } else { - nodeScores[score] = append(nodeScores[score], node) - } - } - selectedNodes := util.SelectBestNode(nodeScores) - for _, node := range selectedNodes { - // Allocate idle resource to the task. - if task.Resreq.LessEqual(node.Idle) { - glog.V(3).Infof("Binding Task <%v/%v> to node <%v>", - task.Namespace, task.Name, node.Name) - if err := ssn.Allocate(task, node.Name); err != nil { - glog.Errorf("Failed to bind Task %v on %v in Session %v", - task.UID, node.Name, ssn.UID) - continue - } - assigned = true - break - } else { - //store information about missing resources - job.NodesFitDelta[node.Name] = node.Idle.Clone() - job.NodesFitDelta[node.Name].FitDelta(task.Resreq) - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s> with limited resources", - task.Namespace, task.Name, node.Name) - } - - // Allocate releasing resource to the task if any. - if task.Resreq.LessEqual(node.Releasing) { - glog.V(3).Infof("Pipelining Task <%v/%v> to node <%v> for <%v> on <%v>", - task.Namespace, task.Name, node.Name, task.Resreq, node.Releasing) - if err := ssn.Pipeline(task, node.Name); err != nil { - glog.Errorf("Failed to pipeline Task %v on %v in Session %v", - task.UID, node.Name, ssn.UID) - continue - } - - assigned = true - break - } - } - - if !assigned { - break - } - - if ssn.JobReady(job) { - jobs.Push(job) - break - } - } - - // Added Queue back until no job in Queue. - queues.Push(queue) - } -} - -func (alloc *allocateAction) UnInitialize() {} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/backfill/backfill.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/backfill/backfill.go deleted file mode 100644 index 85aa7ce319..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/backfill/backfill.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package backfill - -import ( - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type backfillAction struct { - ssn *framework.Session -} - -func New() *backfillAction { - return &backfillAction{} -} - -func (alloc *backfillAction) Name() string { - return "backfill" -} - -func (alloc *backfillAction) Initialize() {} - -func (alloc *backfillAction) Execute(ssn *framework.Session) { - glog.V(3).Infof("Enter Backfill ...") - defer glog.V(3).Infof("Leaving Backfill ...") - - // TODO (k82cn): When backfill, it's also need to balance between Queues. - for _, job := range ssn.Jobs { - for _, task := range job.TaskStatusIndex[api.Pending] { - if task.Resreq.IsEmpty() { - // As task did not request resources, so it only need to meet predicates. - // TODO (k82cn): need to prioritize nodes to avoid pod hole. - for _, node := range ssn.Nodes { - // TODO (k82cn): predicates did not consider pod number for now, there'll - // be ping-pong case here. - if err := ssn.PredicateFn(task, node); err != nil { - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v", - task.Namespace, task.Name, node.Name, err) - continue - } - - glog.V(3).Infof("Binding Task <%v/%v> to node <%v>", task.Namespace, task.Name, node.Name) - if err := ssn.Allocate(task, node.Name); err != nil { - glog.Errorf("Failed to bind Task %v on %v in Session %v", task.UID, node.Name, ssn.UID) - continue - } - break - } - } else { - // TODO (k82cn): backfill for other case. - } - } - } -} - -func (alloc *backfillAction) UnInitialize() {} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/factory.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/factory.go deleted file mode 100644 index 827533b226..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/factory.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package actions - -import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/allocate" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/backfill" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/preempt" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/reclaim" -) - -func init() { - framework.RegisterAction(reclaim.New()) - framework.RegisterAction(allocate.New()) - framework.RegisterAction(backfill.New()) - framework.RegisterAction(preempt.New()) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/preempt/preempt.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/preempt/preempt.go deleted file mode 100644 index 9b21325490..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/preempt/preempt.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package preempt - -import ( - "fmt" - - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util" -) - -type preemptAction struct { - ssn *framework.Session -} - -func New() *preemptAction { - return &preemptAction{} -} - -func (alloc *preemptAction) Name() string { - return "preempt" -} - -func (alloc *preemptAction) Initialize() {} - -func (alloc *preemptAction) Execute(ssn *framework.Session) { - glog.V(3).Infof("Enter Preempt ...") - defer glog.V(3).Infof("Leaving Preempt ...") - - preemptorsMap := map[api.QueueID]*util.PriorityQueue{} - preemptorTasks := map[api.JobID]*util.PriorityQueue{} - - var underRequest []*api.JobInfo - queues := map[api.QueueID]*api.QueueInfo{} - - for _, job := range ssn.Jobs { - if queue, found := ssn.Queues[job.Queue]; !found { - continue - } else if _, existed := queues[queue.UID]; !existed { - glog.V(3).Infof("Added Queue <%s> for Job <%s/%s>", - queue.Name, job.Namespace, job.Name) - queues[queue.UID] = queue - } - - if len(job.TaskStatusIndex[api.Pending]) != 0 { - if _, found := preemptorsMap[job.Queue]; !found { - preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) - } - preemptorsMap[job.Queue].Push(job) - underRequest = append(underRequest, job) - preemptorTasks[job.UID] = util.NewPriorityQueue(ssn.TaskOrderFn) - for _, task := range job.TaskStatusIndex[api.Pending] { - preemptorTasks[job.UID].Push(task) - } - } - } - - // Preemption between Jobs within Queue. - for _, queue := range queues { - for { - preemptors := preemptorsMap[queue.UID] - - // If no preemptors, no preemption. - if preemptors == nil || preemptors.Empty() { - glog.V(4).Infof("No preemptors in Queue <%s>, break.", queue.Name) - break - } - - preemptorJob := preemptors.Pop().(*api.JobInfo) - - stmt := ssn.Statement() - assigned := false - for { - // If not preemptor tasks, next job. - if preemptorTasks[preemptorJob.UID].Empty() { - glog.V(3).Infof("No preemptor task in job <%s/%s>.", - preemptorJob.Namespace, preemptorJob.Name) - break - } - - preemptor := preemptorTasks[preemptorJob.UID].Pop().(*api.TaskInfo) - - if preempted, _ := preempt(ssn, stmt, preemptor, ssn.Nodes, func(task *api.TaskInfo) bool { - // Ignore non running task. - if task.Status != api.Running { - return false - } - - job, found := ssn.Jobs[task.Job] - if !found { - return false - } - // Preempt other jobs within queue - return job.Queue == preemptorJob.Queue && preemptor.Job != task.Job - }); preempted { - assigned = true - } - - // If job not ready, keep preempting - if ssn.JobReady(preemptorJob) { - stmt.Commit() - break - } - } - - // If job not ready after try all tasks, next job. - if !ssn.JobReady(preemptorJob) { - stmt.Discard() - continue - } - - if assigned { - preemptors.Push(preemptorJob) - } - } - - // Preemption between Task within Job. - for _, job := range underRequest { - for { - if _, found := preemptorTasks[job.UID]; !found { - break - } - - if preemptorTasks[job.UID].Empty() { - break - } - - preemptor := preemptorTasks[job.UID].Pop().(*api.TaskInfo) - - stmt := ssn.Statement() - assigned, _ := preempt(ssn, stmt, preemptor, ssn.Nodes, func(task *api.TaskInfo) bool { - // Ignore non running task. - if task.Status != api.Running { - return false - } - - // Preempt tasks within job. - return preemptor.Job == task.Job - }) - stmt.Commit() - - // If no preemption, next job. - if !assigned { - break - } - } - } - } -} - -func (alloc *preemptAction) UnInitialize() {} - -func preempt( - ssn *framework.Session, - stmt *framework.Statement, - preemptor *api.TaskInfo, - nodes map[string]*api.NodeInfo, - filter func(*api.TaskInfo) bool, -) (bool, error) { - predicateNodes := []*api.NodeInfo{} - nodeScores := map[int][]*api.NodeInfo{} - assigned := false - - for _, node := range nodes { - if err := ssn.PredicateFn(preemptor, node); err != nil { - glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v", - preemptor.Namespace, preemptor.Name, node.Name, err) - continue - } else { - predicateNodes = append(predicateNodes, node) - } - } - for _, node := range predicateNodes { - score, err := ssn.NodeOrderFn(preemptor, node) - if err != nil { - glog.V(3).Infof("Error in Calculating Priority for the node:%v", err) - } else { - nodeScores[score] = append(nodeScores[score], node) - } - } - selectedNodes := util.SelectBestNode(nodeScores) - for _, node := range selectedNodes { - glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.", - preemptor.Namespace, preemptor.Name, node.Name) - - var preemptees []*api.TaskInfo - preempted := api.EmptyResource() - resreq := preemptor.Resreq.Clone() - - for _, task := range node.Tasks { - if filter == nil { - preemptees = append(preemptees, task.Clone()) - } else if filter(task) { - preemptees = append(preemptees, task.Clone()) - } - } - victims := ssn.Preemptable(preemptor, preemptees) - metrics.UpdatePreemptionVictimsCount(len(victims)) - - if err := validateVictims(victims, resreq); err != nil { - glog.V(3).Infof("No validated victims on Node <%s>: %v", node.Name, err) - continue - } - - // Preempt victims for tasks. - for _, preemptee := range victims { - glog.Errorf("Try to preempt Task <%s/%s> for Tasks <%s/%s>", - preemptee.Namespace, preemptee.Name, preemptor.Namespace, preemptor.Name) - if err := stmt.Evict(preemptee, "preempt"); err != nil { - glog.Errorf("Failed to preempt Task <%s/%s> for Tasks <%s/%s>: %v", - preemptee.Namespace, preemptee.Name, preemptor.Namespace, preemptor.Name, err) - continue - } - preempted.Add(preemptee.Resreq) - // If reclaimed enough resources, break loop to avoid Sub panic. - if resreq.LessEqual(preemptee.Resreq) { - break - } - resreq.Sub(preemptee.Resreq) - } - - metrics.RegisterPreemptionAttempts() - glog.V(3).Infof("Preempted <%v> for task <%s/%s> requested <%v>.", - preempted, preemptor.Namespace, preemptor.Name, preemptor.Resreq) - - if preemptor.Resreq.LessEqual(preempted) { - if err := stmt.Pipeline(preemptor, node.Name); err != nil { - glog.Errorf("Failed to pipline Task <%s/%s> on Node <%s>", - preemptor.Namespace, preemptor.Name, node.Name) - } - - // Ignore pipeline error, will be corrected in next scheduling loop. - assigned = true - - break - } - } - - return assigned, nil -} - -func validateVictims(victims []*api.TaskInfo, resreq *api.Resource) error { - if len(victims) == 0 { - return fmt.Errorf("no victims") - } - - // If not enough resource, continue - allRes := api.EmptyResource() - for _, v := range victims { - allRes.Add(v.Resreq) - } - if allRes.Less(resreq) { - return fmt.Errorf("not enough resources") - } - - return nil -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/reclaim/reclaim.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/reclaim/reclaim.go deleted file mode 100644 index e113a65a99..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/reclaim/reclaim.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reclaim - -import ( - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util" -) - -type reclaimAction struct { - ssn *framework.Session -} - -func New() *reclaimAction { - return &reclaimAction{} -} - -func (alloc *reclaimAction) Name() string { - return "reclaim" -} - -func (alloc *reclaimAction) Initialize() {} - -func (alloc *reclaimAction) Execute(ssn *framework.Session) { - glog.V(3).Infof("Enter Reclaim ...") - defer glog.V(3).Infof("Leaving Reclaim ...") - - queues := util.NewPriorityQueue(ssn.QueueOrderFn) - queueMap := map[api.QueueID]*api.QueueInfo{} - - preemptorsMap := map[api.QueueID]*util.PriorityQueue{} - preemptorTasks := map[api.JobID]*util.PriorityQueue{} - - glog.V(3).Infof("There are <%d> Jobs and <%d> Queues in total for scheduling.", - len(ssn.Jobs), len(ssn.Queues)) - - var underRequest []*api.JobInfo - for _, job := range ssn.Jobs { - if queue, found := ssn.Queues[job.Queue]; !found { - glog.Errorf("Failed to find Queue <%s> for Job <%s/%s>", - job.Queue, job.Namespace, job.Name) - continue - } else { - if _, existed := queueMap[queue.UID]; !existed { - glog.V(4).Infof("Added Queue <%s> for Job <%s/%s>", - queue.Name, job.Namespace, job.Name) - - queueMap[queue.UID] = queue - queues.Push(queue) - } - } - - if len(job.TaskStatusIndex[api.Pending]) != 0 { - if _, found := preemptorsMap[job.Queue]; !found { - preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn) - } - preemptorsMap[job.Queue].Push(job) - underRequest = append(underRequest, job) - preemptorTasks[job.UID] = util.NewPriorityQueue(ssn.TaskOrderFn) - for _, task := range job.TaskStatusIndex[api.Pending] { - preemptorTasks[job.UID].Push(task) - } - } - } - - for { - // If no queues, break - if queues.Empty() { - break - } - - var job *api.JobInfo - var task *api.TaskInfo - - queue := queues.Pop().(*api.QueueInfo) - if ssn.Overused(queue) { - glog.V(3).Infof("Queue <%s> is overused, ignore it.", queue.Name) - continue - } - - // Found "high" priority job - if jobs, found := preemptorsMap[queue.UID]; !found || jobs.Empty() { - continue - } else { - job = jobs.Pop().(*api.JobInfo) - } - - // Found "high" priority task to reclaim others - if tasks, found := preemptorTasks[job.UID]; !found || tasks.Empty() { - continue - } else { - task = tasks.Pop().(*api.TaskInfo) - } - - assigned := false - - for _, n := range ssn.Nodes { - // If predicates failed, next node. - if err := ssn.PredicateFn(task, n); err != nil { - continue - } - - resreq := task.Resreq.Clone() - reclaimed := api.EmptyResource() - - glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.", - task.Namespace, task.Name, n.Name) - - var reclaimees []*api.TaskInfo - for _, task := range n.Tasks { - // Ignore non running task. - if task.Status != api.Running { - continue - } - - if j, found := ssn.Jobs[task.Job]; !found { - continue - } else if j.Queue != job.Queue { - // Clone task to avoid modify Task's status on node. - reclaimees = append(reclaimees, task.Clone()) - } - } - victims := ssn.Reclaimable(task, reclaimees) - - if len(victims) == 0 { - glog.V(3).Infof("No victims on Node <%s>.", n.Name) - continue - } - - // If not enough resource, continue - allRes := api.EmptyResource() - for _, v := range victims { - allRes.Add(v.Resreq) - } - if allRes.Less(resreq) { - glog.V(3).Infof("Not enough resource from victims on Node <%s>.", n.Name) - continue - } - - // Reclaim victims for tasks. - for _, reclaimee := range victims { - glog.Errorf("Try to reclaim Task <%s/%s> for Tasks <%s/%s>", - reclaimee.Namespace, reclaimee.Name, task.Namespace, task.Name) - if err := ssn.Evict(reclaimee, "reclaim"); err != nil { - glog.Errorf("Failed to reclaim Task <%s/%s> for Tasks <%s/%s>: %v", - reclaimee.Namespace, reclaimee.Name, task.Namespace, task.Name, err) - continue - } - reclaimed.Add(reclaimee.Resreq) - // If reclaimed enough resources, break loop to avoid Sub panic. - if resreq.LessEqual(reclaimee.Resreq) { - break - } - resreq.Sub(reclaimee.Resreq) - } - - glog.V(3).Infof("Reclaimed <%v> for task <%s/%s> requested <%v>.", - reclaimed, task.Namespace, task.Name, task.Resreq) - - if task.Resreq.LessEqual(reclaimed) { - if err := ssn.Pipeline(task, n.Name); err != nil { - glog.Errorf("Failed to pipline Task <%s/%s> on Node <%s>", - task.Namespace, task.Name, n.Name) - } - - // Ignore error of pipeline, will be corrected in next scheduling loop. - assigned = true - - break - } - } - - if assigned { - queues.Push(queue) - } - } - -} - -func (ra *reclaimAction) UnInitialize() { -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf/scheduler_conf.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf/scheduler_conf.go deleted file mode 100644 index fdcbdff902..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf/scheduler_conf.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conf - -// SchedulerConfiguration defines the configuration of scheduler. -type SchedulerConfiguration struct { - // Actions defines the actions list of scheduler in order - Actions string `yaml:"actions"` - // Tiers defines plugins in different tiers - Tiers []Tier `yaml:"tiers"` -} - -// Tier defines plugin tier -type Tier struct { - Plugins []PluginOption `yaml:"plugins"` -} - -// PluginOption defines the options of plugin -type PluginOption struct { - // The name of Plugin - Name string `yaml:"name"` - // JobOrderDisabled defines whether jobOrderFn is disabled - JobOrderDisabled bool `yaml:"disableJobOrder"` - // JobReadyDisabled defines whether jobReadyFn is disabled - JobReadyDisabled bool `yaml:"disableJobReady"` - // TaskOrderDisabled defines whether taskOrderFn is disabled - TaskOrderDisabled bool `yaml:"disableTaskOrder"` - // PreemptableDisabled defines whether preemptableFn is disabled - PreemptableDisabled bool `yaml:"disablePreemptable"` - // ReclaimableDisabled defines whether reclaimableFn is disabled - ReclaimableDisabled bool `yaml:"disableReclaimable"` - // QueueOrderDisabled defines whether queueOrderFn is disabled - QueueOrderDisabled bool `yaml:"disableQueueOrder"` - // PredicateDisabled defines whether predicateFn is disabled - PredicateDisabled bool `yaml:"disablePredicate"` - // NodeOrderDisabled defines whether NodeOrderFn is disabled - NodeOrderDisabled bool `yaml:"disableNodeOrder"` -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/conformance/conformance.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/conformance/conformance.go deleted file mode 100644 index 779c3f79a9..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/conformance/conformance.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conformance - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/scheduling" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type conformancePlugin struct { -} - -func New() framework.Plugin { - return &conformancePlugin{} -} - -func (pp *conformancePlugin) Name() string { - return "conformance" -} - -func (pp *conformancePlugin) OnSessionOpen(ssn *framework.Session) { - evictableFn := func(evictor *api.TaskInfo, evictees []*api.TaskInfo) []*api.TaskInfo { - var victims []*api.TaskInfo - - for _, evictee := range evictees { - className := evictee.Pod.Spec.PriorityClassName - // Skip critical pod. - if className == scheduling.SystemClusterCritical || - className == scheduling.SystemNodeCritical || - evictee.Namespace == v1.NamespaceSystem { - - continue - } - - victims = append(victims, evictee) - } - - return victims - } - - ssn.AddPreemptableFn(pp.Name(), evictableFn) - ssn.AddReclaimableFn(pp.Name(), evictableFn) -} - -func (pp *conformancePlugin) OnSessionClose(ssn *framework.Session) {} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/drf/drf.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/drf/drf.go deleted file mode 100644 index 246c23e6fe..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/drf/drf.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package drf - -import ( - "math" - - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -var shareDelta = 0.000001 - -type drfAttr struct { - share float64 - dominantResource string - allocated *api.Resource -} - -type drfPlugin struct { - totalResource *api.Resource - - // Key is Job ID - jobOpts map[api.JobID]*drfAttr -} - -func New() framework.Plugin { - return &drfPlugin{ - totalResource: api.EmptyResource(), - jobOpts: map[api.JobID]*drfAttr{}, - } -} - -func (drf *drfPlugin) Name() string { - return "drf" -} - -func (drf *drfPlugin) OnSessionOpen(ssn *framework.Session) { - // Prepare scheduling data for this session. - for _, n := range ssn.Nodes { - drf.totalResource.Add(n.Allocatable) - } - - for _, job := range ssn.Jobs { - attr := &drfAttr{ - allocated: api.EmptyResource(), - } - - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) { - for _, t := range tasks { - attr.allocated.Add(t.Resreq) - } - } - } - - // Calculate the init share of Job - drf.updateShare(attr) - - drf.jobOpts[job.UID] = attr - } - - preemptableFn := func(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo { - var victims []*api.TaskInfo - - latt := drf.jobOpts[preemptor.Job] - lalloc := latt.allocated.Clone().Add(preemptor.Resreq) - ls := drf.calculateShare(lalloc, drf.totalResource) - - allocations := map[api.JobID]*api.Resource{} - - for _, preemptee := range preemptees { - if _, found := allocations[preemptee.Job]; !found { - ratt := drf.jobOpts[preemptee.Job] - allocations[preemptee.Job] = ratt.allocated.Clone() - } - ralloc := allocations[preemptee.Job].Sub(preemptee.Resreq) - rs := drf.calculateShare(ralloc, drf.totalResource) - - if ls < rs || math.Abs(ls-rs) <= shareDelta { - victims = append(victims, preemptee) - } - } - - glog.V(4).Infof("Victims from DRF plugins are %+v", victims) - - return victims - } - - ssn.AddPreemptableFn(drf.Name(), preemptableFn) - - jobOrderFn := func(l interface{}, r interface{}) int { - lv := l.(*api.JobInfo) - rv := r.(*api.JobInfo) - - glog.V(4).Infof("DRF JobOrderFn: <%v/%v> is ready: %d, <%v/%v> is ready: %d", - lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) - - if drf.jobOpts[lv.UID].share == drf.jobOpts[rv.UID].share { - return 0 - } - - if drf.jobOpts[lv.UID].share < drf.jobOpts[rv.UID].share { - return -1 - } - - return 1 - } - - ssn.AddJobOrderFn(drf.Name(), jobOrderFn) - - // Register event handlers. - ssn.AddEventHandler(&framework.EventHandler{ - AllocateFunc: func(event *framework.Event) { - attr := drf.jobOpts[event.Task.Job] - attr.allocated.Add(event.Task.Resreq) - - drf.updateShare(attr) - - glog.V(4).Infof("DRF AllocateFunc: task <%v/%v>, resreq <%v>, share <%v>", - event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share) - }, - DeallocateFunc: func(event *framework.Event) { - attr := drf.jobOpts[event.Task.Job] - attr.allocated.Sub(event.Task.Resreq) - - drf.updateShare(attr) - - glog.V(4).Infof("DRF EvictFunc: task <%v/%v>, resreq <%v>, share <%v>", - event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share) - }, - }) -} - -func (drf *drfPlugin) updateShare(attr *drfAttr) { - attr.share = drf.calculateShare(attr.allocated, drf.totalResource) -} - -func (drf *drfPlugin) calculateShare(allocated, totalResource *api.Resource) float64 { - res := float64(0) - for _, rn := range api.ResourceNames() { - share := helpers.Share(allocated.Get(rn), totalResource.Get(rn)) - if share > res { - res = share - } - } - - return res -} - -func (drf *drfPlugin) OnSessionClose(session *framework.Session) { - // Clean schedule data. - drf.totalResource = api.EmptyResource() - drf.jobOpts = map[api.JobID]*drfAttr{} -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/factory.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/factory.go deleted file mode 100644 index 331b9db09f..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/factory.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugins - -import ( - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/conformance" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/drf" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/gang" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/nodeorder" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/predicates" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/priority" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/proportion" -) - -func init() { - // Plugins for Jobs - framework.RegisterPluginBuilder("drf", drf.New) - framework.RegisterPluginBuilder("gang", gang.New) - framework.RegisterPluginBuilder("predicates", predicates.New) - framework.RegisterPluginBuilder("priority", priority.New) - framework.RegisterPluginBuilder("nodeorder", nodeorder.New) - framework.RegisterPluginBuilder("conformance", conformance.New) - - // Plugins for Queues - framework.RegisterPluginBuilder("proportion", proportion.New) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/gang/gang.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/gang/gang.go deleted file mode 100644 index 7c85f9aefa..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/gang/gang.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gang - -import ( - "fmt" - - "github.com/golang/glog" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics" -) - -type gangPlugin struct { -} - -func New() framework.Plugin { - return &gangPlugin{} -} - -func (gp *gangPlugin) Name() string { - return "gang" -} - -// readyTaskNum return the number of tasks that are ready to run. -func readyTaskNum(job *api.JobInfo) int32 { - occupid := 0 - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) || - status == api.Succeeded || - status == api.Pipelined { - occupid = occupid + len(tasks) - } - } - - return int32(occupid) -} - -// validTaskNum return the number of tasks that are valid. -func validTaskNum(job *api.JobInfo) int32 { - occupied := 0 - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) || - status == api.Succeeded || - status == api.Pipelined || - status == api.Pending { - occupied = occupied + len(tasks) - } - } - - return int32(occupied) -} - -func jobReady(obj interface{}) bool { - job := obj.(*api.JobInfo) - - occupied := readyTaskNum(job) - - return occupied >= job.MinAvailable -} - -func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) { - validJobFn := func(obj interface{}) *api.ValidateResult { - job, ok := obj.(*api.JobInfo) - if !ok { - return &api.ValidateResult{ - Pass: false, - Message: fmt.Sprintf("Failed to convert <%v> to *JobInfo", obj), - } - } - - vtn := validTaskNum(job) - if vtn < job.MinAvailable { - return &api.ValidateResult{ - Pass: false, - Reason: v1alpha1.NotEnoughPodsReason, - Message: fmt.Sprintf("Not enough valid tasks for gang-scheduling, valid: %d, min: %d", - vtn, job.MinAvailable), - } - } - return nil - } - - ssn.AddJobValidFn(gp.Name(), validJobFn) - - preemptableFn := func(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo { - var victims []*api.TaskInfo - - for _, preemptee := range preemptees { - job := ssn.Jobs[preemptee.Job] - occupid := readyTaskNum(job) - preemptable := job.MinAvailable <= occupid-1 || job.MinAvailable == 1 - - if !preemptable { - glog.V(3).Infof("Can not preempt task <%v/%v> because of gang-scheduling", - preemptee.Namespace, preemptee.Name) - } else { - victims = append(victims, preemptee) - } - } - - glog.V(3).Infof("Victims from Gang plugins are %+v", victims) - - return victims - } - - // TODO(k82cn): Support preempt/reclaim batch job. - ssn.AddReclaimableFn(gp.Name(), preemptableFn) - ssn.AddPreemptableFn(gp.Name(), preemptableFn) - - jobOrderFn := func(l, r interface{}) int { - lv := l.(*api.JobInfo) - rv := r.(*api.JobInfo) - - lReady := jobReady(lv) - rReady := jobReady(rv) - - glog.V(4).Infof("Gang JobOrderFn: <%v/%v> is ready: %t, <%v/%v> is ready: %t", - lv.Namespace, lv.Name, lReady, rv.Namespace, rv.Name, rReady) - - if lReady && rReady { - return 0 - } - - if lReady { - return 1 - } - - if rReady { - return -1 - } - - return 0 - } - - ssn.AddJobOrderFn(gp.Name(), jobOrderFn) - ssn.AddJobReadyFn(gp.Name(), jobReady) -} - -func (gp *gangPlugin) OnSessionClose(ssn *framework.Session) { - var unreadyTaskCount int32 - var unScheduleJobCount int - for _, job := range ssn.Jobs { - if !jobReady(job) { - unreadyTaskCount = job.MinAvailable - readyTaskNum(job) - msg := fmt.Sprintf("%v/%v tasks in gang unschedulable: %v", - job.MinAvailable-readyTaskNum(job), len(job.Tasks), job.FitError()) - - unScheduleJobCount += 1 - metrics.UpdateUnscheduleTaskCount(job.Name, int(unreadyTaskCount)) - metrics.RegisterJobRetries(job.Name) - - jc := &v1alpha1.PodGroupCondition{ - Type: v1alpha1.PodGroupUnschedulableType, - Status: v1.ConditionTrue, - LastTransitionTime: metav1.Now(), - TransitionID: string(ssn.UID), - Reason: v1alpha1.NotEnoughResourcesReason, - Message: msg, - } - - if err := ssn.UpdateJobCondition(job, jc); err != nil { - glog.Errorf("Failed to update job <%s/%s> condition: %v", - job.Namespace, job.Name, err) - } - } - } - - metrics.UpdateUnscheduleJobCount(unScheduleJobCount) -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/nodeorder/nodeorder.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/nodeorder/nodeorder.go deleted file mode 100644 index d2b40f6f1c..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/nodeorder/nodeorder.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeorder - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "k8s.io/kubernetes/pkg/scheduler/cache" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type nodeOrderPlugin struct { -} - -func getInterPodAffinityScore(name string, interPodAffinityScore schedulerapi.HostPriorityList) int { - for _, hostPriority := range interPodAffinityScore { - if hostPriority.Host == name { - return hostPriority.Score - } - } - return 0 -} - -func generateNodeMapAndSlice(nodes map[string]*api.NodeInfo) (map[string]*cache.NodeInfo, []*v1.Node) { - var nodeMap map[string]*cache.NodeInfo - var nodeSlice []*v1.Node - nodeMap = make(map[string]*cache.NodeInfo) - for _, node := range nodes { - nodeInfo := cache.NewNodeInfo(node.Pods()...) - nodeInfo.SetNode(node.Node) - nodeMap[node.Name] = nodeInfo - nodeSlice = append(nodeSlice, node.Node) - } - return nodeMap, nodeSlice -} - -type cachedNodeInfo struct { - session *framework.Session -} - -func (c *cachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) { - node, found := c.session.Nodes[name] - if !found { - for _, cacheNode := range c.session.Nodes { - pods := cacheNode.Pods() - for _, pod := range pods { - if pod.Spec.NodeName == "" { - return cacheNode.Node, nil - } - } - } - return nil, fmt.Errorf("failed to find node <%s>", name) - } - - return node.Node, nil -} - -type podLister struct { - session *framework.Session -} - -func (pl *podLister) List(selector labels.Selector) ([]*v1.Pod, error) { - var pods []*v1.Pod - for _, job := range pl.session.Jobs { - for status, tasks := range job.TaskStatusIndex { - if !api.AllocatedStatus(status) { - continue - } - - for _, task := range tasks { - if selector.Matches(labels.Set(task.Pod.Labels)) { - if task.NodeName != task.Pod.Spec.NodeName { - pod := task.Pod.DeepCopy() - pod.Spec.NodeName = task.NodeName - pods = append(pods, pod) - } else { - pods = append(pods, task.Pod) - } - } - } - } - } - - return pods, nil -} - -func (pl *podLister) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { - var pods []*v1.Pod - for _, job := range pl.session.Jobs { - for status, tasks := range job.TaskStatusIndex { - if !api.AllocatedStatus(status) { - continue - } - - for _, task := range tasks { - if podFilter(task.Pod) && selector.Matches(labels.Set(task.Pod.Labels)) { - if task.NodeName != task.Pod.Spec.NodeName { - pod := task.Pod.DeepCopy() - pod.Spec.NodeName = task.NodeName - pods = append(pods, pod) - } else { - pods = append(pods, task.Pod) - } - } - } - } - } - - return pods, nil -} - -type nodeLister struct { - session *framework.Session -} - -func (nl *nodeLister) List() ([]*v1.Node, error) { - var nodes []*v1.Node - for _, node := range nl.session.Nodes { - nodes = append(nodes, node.Node) - } - return nodes, nil -} - -//New function returns prioritizePlugin object -func New() framework.Plugin { - return &nodeOrderPlugin{} -} - -func (pp *nodeOrderPlugin) Name() string { - return "nodeorder" -} - -func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) { - nodeOrderFn := func(task *api.TaskInfo, node *api.NodeInfo) (int, error) { - - pl := &podLister{ - session: ssn, - } - - nl := &nodeLister{ - session: ssn, - } - - cn := &cachedNodeInfo{ - session: ssn, - } - - var nodeMap map[string]*cache.NodeInfo - var nodeSlice []*v1.Node - var interPodAffinityScore schedulerapi.HostPriorityList - - nodeMap, nodeSlice = generateNodeMapAndSlice(ssn.Nodes) - - nodeInfo := cache.NewNodeInfo(node.Pods()...) - nodeInfo.SetNode(node.Node) - var score = 0 - - //TODO: Add ImageLocalityPriority Function once priorityMetadata is published - //Issue: #74132 in kubernetes ( https://github.com/kubernetes/kubernetes/issues/74132 ) - - host, err := priorities.LeastRequestedPriorityMap(task.Pod, nil, nodeInfo) - if err != nil { - glog.Warningf("Least Requested Priority Failed because of Error: %v", err) - return 0, err - } - score = score + host.Score - - host, err = priorities.CalculateNodeAffinityPriorityMap(task.Pod, nil, nodeInfo) - if err != nil { - glog.Warningf("Calculate Node Affinity Priority Failed because of Error: %v", err) - return 0, err - } - score = score + host.Score - - mapFn := priorities.NewInterPodAffinityPriority(cn, nl, pl, v1.DefaultHardPodAffinitySymmetricWeight) - interPodAffinityScore, err = mapFn(task.Pod, nodeMap, nodeSlice) - if err != nil { - glog.Warningf("Calculate Inter Pod Affinity Priority Failed because of Error: %v", err) - return 0, err - } - hostScore := getInterPodAffinityScore(node.Name, interPodAffinityScore) - score = score + hostScore - - glog.V(4).Infof("Total Score for that node is: %d", score) - return score, nil - } - ssn.AddNodeOrderFn(pp.Name(), nodeOrderFn) -} - -func (pp *nodeOrderPlugin) OnSessionClose(ssn *framework.Session) { -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/predicates/predicates.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/predicates/predicates.go deleted file mode 100644 index d190904234..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/predicates/predicates.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicates - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/cache" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type predicatesPlugin struct { -} - -func New() framework.Plugin { - return &predicatesPlugin{} -} - -func (pp *predicatesPlugin) Name() string { - return "predicates" -} - -type podLister struct { - session *framework.Session -} - -func (pl *podLister) List(selector labels.Selector) ([]*v1.Pod, error) { - var pods []*v1.Pod - for _, job := range pl.session.Jobs { - for status, tasks := range job.TaskStatusIndex { - if !api.AllocatedStatus(status) { - continue - } - - for _, task := range tasks { - if selector.Matches(labels.Set(task.Pod.Labels)) { - pod := task.Pod.DeepCopy() - pod.Spec.NodeName = task.NodeName - pods = append(pods, pod) - } - } - } - } - - return pods, nil -} - -func (pl *podLister) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { - var pods []*v1.Pod - for _, job := range pl.session.Jobs { - for status, tasks := range job.TaskStatusIndex { - if !api.AllocatedStatus(status) { - continue - } - - for _, task := range tasks { - if podFilter(task.Pod) && selector.Matches(labels.Set(task.Pod.Labels)) { - pod := task.Pod.DeepCopy() - pod.Spec.NodeName = task.NodeName - pods = append(pods, pod) - } - } - } - } - - return pods, nil -} - -type cachedNodeInfo struct { - session *framework.Session -} - -func (c *cachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) { - node, found := c.session.Nodes[name] - if !found { - return nil, fmt.Errorf("failed to find node <%s>", name) - } - - return node.Node, nil -} - -// Check to see if node spec is set to Schedulable or not -func CheckNodeUnschedulable(pod *v1.Pod, nodeInfo *cache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - if nodeInfo.Node().Spec.Unschedulable { - return false, []algorithm.PredicateFailureReason{predicates.ErrNodeUnschedulable}, nil - } - return true, nil, nil -} - -func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) { - pl := &podLister{ - session: ssn, - } - - ni := &cachedNodeInfo{ - session: ssn, - } - - ssn.AddPredicateFn(pp.Name(), func(task *api.TaskInfo, node *api.NodeInfo) error { - nodeInfo := cache.NewNodeInfo(node.Pods()...) - nodeInfo.SetNode(node.Node) - - if node.Allocatable.MaxTaskNum <= len(nodeInfo.Pods()) { - return fmt.Errorf("Node <%s> can not allow more task running on it.", node.Name) - } - - // NodeSeletor Predicate - fit, _, err := predicates.PodMatchNodeSelector(task.Pod, nil, nodeInfo) - if err != nil { - return err - } - - glog.V(4).Infof("NodeSelect predicates Task <%s/%s> on Node <%s>: fit %t, err %v", - task.Namespace, task.Name, node.Name, fit, err) - - if !fit { - return fmt.Errorf("node <%s> didn't match task <%s/%s> node selector", - node.Name, task.Namespace, task.Name) - } - - // HostPorts Predicate - fit, _, err = predicates.PodFitsHostPorts(task.Pod, nil, nodeInfo) - if err != nil { - return err - } - - glog.V(4).Infof("HostPorts predicates Task <%s/%s> on Node <%s>: fit %t, err %v", - task.Namespace, task.Name, node.Name, fit, err) - - if !fit { - return fmt.Errorf("node <%s> didn't have available host ports for task <%s/%s>", - node.Name, task.Namespace, task.Name) - } - - // Check to see if node.Spec.Unschedulable is set - fit, _, err = CheckNodeUnschedulable(task.Pod, nodeInfo) - if err != nil { - return err - } - - glog.V(4).Infof("Check Unschedulable Task <%s/%s> on Node <%s>: fit %t, err %v", - task.Namespace, task.Name, node.Name, fit, err) - - if !fit { - return fmt.Errorf("task <%s/%s> node <%s> set to unschedulable", - task.Namespace, task.Name, node.Name) - } - - // Toleration/Taint Predicate - fit, _, err = predicates.PodToleratesNodeTaints(task.Pod, nil, nodeInfo) - if err != nil { - return err - } - - glog.V(4).Infof("Toleration/Taint predicates Task <%s/%s> on Node <%s>: fit %t, err %v", - task.Namespace, task.Name, node.Name, fit, err) - - if !fit { - return fmt.Errorf("task <%s/%s> does not tolerate node <%s> taints", - task.Namespace, task.Name, node.Name) - } - - // Pod Affinity/Anti-Affinity Predicate - podAffinityPredicate := predicates.NewPodAffinityPredicate(ni, pl) - fit, _, err = podAffinityPredicate(task.Pod, nil, nodeInfo) - if err != nil { - return err - } - - glog.V(4).Infof("Pod Affinity/Anti-Affinity predicates Task <%s/%s> on Node <%s>: fit %t, err %v", - task.Namespace, task.Name, node.Name, fit, err) - - if !fit { - return fmt.Errorf("task <%s/%s> affinity/anti-affinity failed on node <%s>", - node.Name, task.Namespace, task.Name) - } - - return nil - }) -} - -func (pp *predicatesPlugin) OnSessionClose(ssn *framework.Session) {} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/priority/priority.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/priority/priority.go deleted file mode 100644 index edb4ac4756..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/priority/priority.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package priority - -import ( - "github.com/golang/glog" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type priorityPlugin struct { -} - -func New() framework.Plugin { - return &priorityPlugin{} -} - -func (pp *priorityPlugin) Name() string { - return "priority" -} - -func (pp *priorityPlugin) OnSessionOpen(ssn *framework.Session) { - taskOrderFn := func(l interface{}, r interface{}) int { - lv := l.(*api.TaskInfo) - rv := r.(*api.TaskInfo) - - glog.V(4).Infof("Priority TaskOrder: <%v/%v> prority is %v, <%v/%v> priority is %v", - lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) - - if lv.Priority == rv.Priority { - return 0 - } - - if lv.Priority > rv.Priority { - return -1 - } - - return 1 - } - - // Add Task Order function - ssn.AddTaskOrderFn(pp.Name(), taskOrderFn) - - jobOrderFn := func(l, r interface{}) int { - lv := l.(*api.JobInfo) - rv := r.(*api.JobInfo) - - glog.V(4).Infof("Priority JobOrderFn: <%v/%v> is ready: %d, <%v/%v> is ready: %d", - lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority) - - if lv.Priority > rv.Priority { - return -1 - } - - if lv.Priority < rv.Priority { - return 1 - } - - return 0 - } - - ssn.AddJobOrderFn(pp.Name(), jobOrderFn) -} - -func (pp *priorityPlugin) OnSessionClose(ssn *framework.Session) {} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/proportion/proportion.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/proportion/proportion.go deleted file mode 100644 index 582adf3f4e..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/proportion/proportion.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proportion - -import ( - "github.com/golang/glog" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers" - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework" -) - -type proportionPlugin struct { - totalResource *api.Resource - queueOpts map[api.QueueID]*queueAttr -} - -type queueAttr struct { - queueID api.QueueID - name string - weight int32 - share float64 - - deserved *api.Resource - allocated *api.Resource - request *api.Resource -} - -func New() framework.Plugin { - return &proportionPlugin{ - totalResource: api.EmptyResource(), - queueOpts: map[api.QueueID]*queueAttr{}, - } -} - -func (pp *proportionPlugin) Name() string { - return "proportion" -} - -func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) { - // Prepare scheduling data for this session. - for _, n := range ssn.Nodes { - pp.totalResource.Add(n.Allocatable) - } - - glog.V(4).Infof("The total resource is <%v>", pp.totalResource) - - // Build attributes for Queues. - for _, job := range ssn.Jobs { - glog.V(4).Infof("Considering Job <%s/%s>.", job.Namespace, job.Name) - - if _, found := pp.queueOpts[job.Queue]; !found { - queue := ssn.Queues[job.Queue] - attr := &queueAttr{ - queueID: queue.UID, - name: queue.Name, - weight: queue.Weight, - - deserved: api.EmptyResource(), - allocated: api.EmptyResource(), - request: api.EmptyResource(), - } - pp.queueOpts[job.Queue] = attr - glog.V(4).Infof("Added Queue <%s> attributes.", job.Queue) - } - - for status, tasks := range job.TaskStatusIndex { - if api.AllocatedStatus(status) { - for _, t := range tasks { - attr := pp.queueOpts[job.Queue] - attr.allocated.Add(t.Resreq) - attr.request.Add(t.Resreq) - } - } else if status == api.Pending { - for _, t := range tasks { - attr := pp.queueOpts[job.Queue] - attr.request.Add(t.Resreq) - } - } - } - } - - remaining := pp.totalResource.Clone() - meet := map[api.QueueID]struct{}{} - for { - totalWeight := int32(0) - for _, attr := range pp.queueOpts { - if _, found := meet[attr.queueID]; found { - continue - } - totalWeight += attr.weight - } - - // If no queues, break - if totalWeight == 0 { - break - } - - // Calculates the deserved of each Queue. - deserved := api.EmptyResource() - for _, attr := range pp.queueOpts { - glog.V(4).Infof("Considering Queue <%s>: weight <%d>, total weight <%d>.", - attr.name, attr.weight, totalWeight) - if _, found := meet[attr.queueID]; found { - continue - } - - attr.deserved.Add(remaining.Clone().Multi(float64(attr.weight) / float64(totalWeight))) - if !attr.deserved.LessEqual(attr.request) { - attr.deserved = helpers.Min(attr.deserved, attr.request) - meet[attr.queueID] = struct{}{} - } - pp.updateShare(attr) - - glog.V(4).Infof("The attributes of queue <%s> in proportion: deserved <%v>, allocate <%v>, request <%v>, share <%0.2f>", - attr.name, attr.deserved, attr.allocated, attr.request, attr.share) - - deserved.Add(attr.deserved) - } - - remaining.Sub(deserved) - if remaining.IsEmpty() { - break - } - } - - ssn.AddQueueOrderFn(pp.Name(), func(l, r interface{}) int { - lv := l.(*api.QueueInfo) - rv := r.(*api.QueueInfo) - - if pp.queueOpts[lv.UID].share == pp.queueOpts[rv.UID].share { - return 0 - } - - if pp.queueOpts[lv.UID].share < pp.queueOpts[rv.UID].share { - return -1 - } - - return 1 - }) - - ssn.AddReclaimableFn(pp.Name(), func(reclaimer *api.TaskInfo, reclaimees []*api.TaskInfo) []*api.TaskInfo { - var victims []*api.TaskInfo - allocations := map[api.QueueID]*api.Resource{} - - for _, reclaimee := range reclaimees { - job := ssn.Jobs[reclaimee.Job] - attr := pp.queueOpts[job.Queue] - - if _, found := allocations[job.Queue]; !found { - allocations[job.Queue] = attr.allocated.Clone() - } - allocated := allocations[job.Queue] - if allocated.Less(reclaimee.Resreq) { - glog.Errorf("Failed to calculate the allocation of Task <%s/%s> in Queue <%s>.", - reclaimee.Namespace, reclaimee.Name, job.Queue) - continue - } - - allocated.Sub(reclaimee.Resreq) - if attr.deserved.LessEqual(allocated) { - victims = append(victims, reclaimee) - } - } - - return victims - }) - - ssn.AddOverusedFn(pp.Name(), func(obj interface{}) bool { - queue := obj.(*api.QueueInfo) - attr := pp.queueOpts[queue.UID] - - overused := attr.deserved.LessEqual(attr.allocated) - if overused { - glog.V(3).Infof("Queue <%v>: deserved <%v>, allocated <%v>, share <%v>", - queue.Name, attr.deserved, attr.allocated, attr.share) - } - - return overused - }) - - // Register event handlers. - ssn.AddEventHandler(&framework.EventHandler{ - AllocateFunc: func(event *framework.Event) { - job := ssn.Jobs[event.Task.Job] - attr := pp.queueOpts[job.Queue] - attr.allocated.Add(event.Task.Resreq) - - pp.updateShare(attr) - - glog.V(4).Infof("Proportion AllocateFunc: task <%v/%v>, resreq <%v>, share <%v>", - event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share) - }, - DeallocateFunc: func(event *framework.Event) { - job := ssn.Jobs[event.Task.Job] - attr := pp.queueOpts[job.Queue] - attr.allocated.Sub(event.Task.Resreq) - - pp.updateShare(attr) - - glog.V(4).Infof("Proportion EvictFunc: task <%v/%v>, resreq <%v>, share <%v>", - event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share) - }, - }) -} - -func (pp *proportionPlugin) OnSessionClose(ssn *framework.Session) { - pp.totalResource = nil - pp.queueOpts = nil -} - -func (pp *proportionPlugin) updateShare(attr *queueAttr) { - res := float64(0) - - // TODO(k82cn): how to handle fragement issues? - for _, rn := range api.ResourceNames() { - share := helpers.Share(attr.allocated.Get(rn), attr.deserved.Get(rn)) - if share > res { - res = share - } - } - - attr.share = res -} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/priority_queue.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/priority_queue.go deleted file mode 100644 index 91b3d72003..0000000000 --- a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util/priority_queue.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "container/heap" - - "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" -) - -type PriorityQueue struct { - queue priorityQueue -} - -type priorityQueue struct { - items []interface{} - lessFn api.LessFn -} - -func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue { - return &PriorityQueue{ - queue: priorityQueue{ - items: make([]interface{}, 0), - lessFn: lessFn, - }, - } -} - -func (q *PriorityQueue) Push(it interface{}) { - heap.Push(&q.queue, it) -} - -func (q *PriorityQueue) Pop() interface{} { - if q.Len() == 0 { - return nil - } - - return heap.Pop(&q.queue) -} - -func (q *PriorityQueue) Empty() bool { - return q.queue.Len() == 0 -} - -func (q *PriorityQueue) Len() int { - return q.queue.Len() -} - -func (pq *priorityQueue) Len() int { return len(pq.items) } - -func (pq *priorityQueue) Less(i, j int) bool { - if pq.lessFn == nil { - return i < j - } - - // We want Pop to give us the highest, not lowest, priority so we use greater than here. - return pq.lessFn(pq.items[i], pq.items[j]) -} - -func (pq priorityQueue) Swap(i, j int) { - pq.items[i], pq.items[j] = pq.items[j], pq.items[i] -} - -func (pq *priorityQueue) Push(x interface{}) { - (*pq).items = append((*pq).items, x) -} - -func (pq *priorityQueue) Pop() interface{} { - old := (*pq).items - n := len(old) - item := old[n-1] - (*pq).items = old[0 : n-1] - return item -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh old mode 100644 new mode 100755