From 6fdaa3ee18e09cf996cd1020d530c0bde4c92ad1 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 16 Sep 2020 19:52:11 +0800 Subject: [PATCH 1/7] add UT for dmcluster configurations and dmapi --- .../v1alpha1/defaulting/dmcluster_test.go | 59 ++++ pkg/apis/pingcap/v1alpha1/dm_config_test.go | 98 ++++++ pkg/apis/pingcap/v1alpha1/dmcluster_test.go | 314 ++++++++++++++++++ pkg/discovery/server/server_test.go | 93 ++++++ pkg/dmapi/dmapi.go | 8 +- pkg/dmapi/dmapi_test.go | 210 ++++++++++++ 6 files changed, 778 insertions(+), 4 deletions(-) create mode 100644 pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go create mode 100644 pkg/apis/pingcap/v1alpha1/dm_config_test.go create mode 100644 pkg/apis/pingcap/v1alpha1/dmcluster_test.go create mode 100644 pkg/dmapi/dmapi_test.go diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go b/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go new file mode 100644 index 00000000000..1d06cb0bab1 --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaulting + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" +) + +func TestSetDMSpecDefault(t *testing.T) { + g := NewGomegaWithT(t) + + dc := newDMCluster() + SetDMClusterDefault(dc) + g.Expect(dc.Spec.Master.Config).Should(BeNil()) + + dc = newDMCluster() + rpcTimeoutStr := "40s" + dc.Spec.Master.Config = &v1alpha1.MasterConfig{ + RPCTimeoutStr: &rpcTimeoutStr, + } + SetDMClusterDefault(dc) + g.Expect(*dc.Spec.Master.Config.RPCTimeoutStr).Should(Equal(rpcTimeoutStr)) + + dc = newDMCluster() + dc.Spec.Version = "v2.0.0-rc.2" + keepAliveTTL := int64(15) + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + KeepAliveTTL: &keepAliveTTL, + } + SetDMClusterDefault(dc) + g.Expect(*dc.Spec.Worker.Config.KeepAliveTTL).Should(Equal(keepAliveTTL)) + g.Expect(*dc.Spec.Master.MaxFailoverCount).Should(Equal(int32(3))) + g.Expect(dc.Spec.Master.BaseImage).Should(Equal(defaultMasterImage)) + g.Expect(*dc.Spec.Worker.MaxFailoverCount).Should(Equal(int32(3))) + g.Expect(dc.Spec.Worker.BaseImage).Should(Equal(defaultWorkerImage)) +} + +func newDMCluster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + } +} diff --git a/pkg/apis/pingcap/v1alpha1/dm_config_test.go b/pkg/apis/pingcap/v1alpha1/dm_config_test.go new file mode 100644 index 00000000000..86a0bb1fedc --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/dm_config_test.go @@ -0,0 +1,98 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/BurntSushi/toml" + . "github.com/onsi/gomega" + "k8s.io/utils/pointer" +) + +func TestDMMasterConfig(t *testing.T) { + g := NewGomegaWithT(t) + c := &MasterConfig{ + RPCTimeoutStr: pointer.StringPtr("40s"), + RPCRateLimit: pointer.Float64Ptr(15), + DMSecurityConfig: DMSecurityConfig{ + SSLCA: pointer.StringPtr("/var/lib/dm-master-tls/ca.crt"), + SSLCert: pointer.StringPtr("/var/lib/dm-master-tls/tls.crt"), + SSLKey: pointer.StringPtr("/var/lib/dm-master-tls/tls.key"), + }, + } + jsonStr, err := json.Marshal(c) + g.Expect(err).To(Succeed()) + g.Expect(jsonStr).To(ContainSubstring("rpc-rate-limit")) + g.Expect(jsonStr).To(ContainSubstring("40s")) + g.Expect(jsonStr).NotTo(ContainSubstring("rpc-rate-burst"), "Expected empty fields to be omitted") + var jsonUnmarshaled MasterConfig + err = json.Unmarshal(jsonStr, &jsonUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&jsonUnmarshaled).To(Equal(c)) + + buff := new(bytes.Buffer) + encoder := toml.NewEncoder(buff) + err = encoder.Encode(c) + g.Expect(err).To(Succeed()) + tStr := buff.String() + g.Expect(tStr).To((Equal(`rpc-timeout = "40s" +rpc-rate-limit = 15.0 +ssl-ca = "/var/lib/dm-master-tls/ca.crt" +ssl-cert = "/var/lib/dm-master-tls/tls.crt" +ssl-key = "/var/lib/dm-master-tls/tls.key" +`))) + + var tUnmarshaled MasterConfig + err = toml.Unmarshal([]byte(tStr), &tUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&tUnmarshaled).To(Equal(c)) +} + +func TestDMWorkerConfig(t *testing.T) { + g := NewGomegaWithT(t) + c := &WorkerConfig{ + KeepAliveTTL: pointer.Int64Ptr(15), + DMSecurityConfig: DMSecurityConfig{ + SSLCA: pointer.StringPtr("/var/lib/dm-worker-tls/ca.crt"), + SSLCert: pointer.StringPtr("/var/lib/dm-worker-tls/tls.crt"), + SSLKey: pointer.StringPtr("/var/lib/dm-worker-tls/tls.key"), + }, + } + jsonStr, err := json.Marshal(c) + g.Expect(err).To(Succeed()) + g.Expect(jsonStr).NotTo(ContainSubstring("log-file"), "Expected empty fields to be omitted") + var jsonUnmarshaled WorkerConfig + err = json.Unmarshal(jsonStr, &jsonUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&jsonUnmarshaled).To(Equal(c)) + + buff := new(bytes.Buffer) + encoder := toml.NewEncoder(buff) + err = encoder.Encode(c) + g.Expect(err).To(Succeed()) + tStr := buff.String() + g.Expect(tStr).To((Equal(`keepalive-ttl = 15 +ssl-ca = "/var/lib/dm-worker-tls/ca.crt" +ssl-cert = "/var/lib/dm-worker-tls/tls.crt" +ssl-key = "/var/lib/dm-worker-tls/tls.key" +`))) + + var tUnmarshaled WorkerConfig + err = toml.Unmarshal([]byte(tStr), &tUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&tUnmarshaled).To(Equal(c)) +} diff --git a/pkg/apis/pingcap/v1alpha1/dmcluster_test.go b/pkg/apis/pingcap/v1alpha1/dmcluster_test.go new file mode 100644 index 00000000000..570f8dc0df1 --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/dmcluster_test.go @@ -0,0 +1,314 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "testing" + + . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" +) + +func TestDMMasterIsAvailable(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*DMCluster) + expectFn func(*GomegaWithT, bool) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + test.update(dc) + test.expectFn(g, dc.MasterIsAvailable()) + } + tests := []testcase{ + { + name: "dm-master members count is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + } + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master members count is 2, but health count is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: false}, + } + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master members count is 3, health count is 3, but ready replicas is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 1} + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master is available", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeTrue()) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMComponentAccessor(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + cluster *DMClusterSpec + component *ComponentSpec + expectFn func(*GomegaWithT, ComponentAccessor) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + accessor := buildDMClusterComponentAccessor(test.cluster, test.component) + test.expectFn(g, accessor) + } + affinity := &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + TopologyKey: "rack", + }}, + }, + } + toleration1 := corev1.Toleration{ + Key: "k1", + } + toleration2 := corev1.Toleration{ + Key: "k2", + } + tests := []testcase{ + { + name: "use cluster-level defaults", + cluster: &DMClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: pointer.BoolPtr(true), + Affinity: affinity, + PriorityClassName: pointer.StringPtr("test"), + SchedulerName: "test", + }, + component: &ComponentSpec{}, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullNever)) + g.Expect(a.HostNetwork()).Should(Equal(true)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(*a.PriorityClassName()).Should(Equal("test")) + g.Expect(a.SchedulerName()).Should(Equal("test")) + }, + }, + { + name: "override at component-level", + cluster: &DMClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: pointer.BoolPtr(true), + Affinity: nil, + PriorityClassName: pointer.StringPtr("test"), + SchedulerName: "test", + }, + component: &ComponentSpec{ + ImagePullPolicy: func() *corev1.PullPolicy { a := corev1.PullAlways; return &a }(), + HostNetwork: func() *bool { a := false; return &a }(), + Affinity: affinity, + PriorityClassName: pointer.StringPtr("override"), + SchedulerName: pointer.StringPtr("override"), + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullAlways)) + g.Expect(a.HostNetwork()).Should(Equal(false)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(*a.PriorityClassName()).Should(Equal("override")) + g.Expect(a.SchedulerName()).Should(Equal("override")) + }, + }, + { + name: "node selector merge", + cluster: &DMClusterSpec{ + NodeSelector: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + NodeSelector: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.NodeSelector()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &DMClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &DMClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "tolerations merge", + cluster: &DMClusterSpec{ + Tolerations: []corev1.Toleration{toleration1}, + }, + component: &ComponentSpec{ + Tolerations: []corev1.Toleration{toleration2}, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Tolerations()).Should(ConsistOf(toleration2)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestMasterVersion(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*DMCluster) + expectFn func(*GomegaWithT, *DMCluster) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + test.update(dc) + test.expectFn(g, dc) + } + tests := []testcase{ + { + name: "has tag", + update: func(dc *DMCluster) { + dc.Spec.Master.BaseImage = "pingcap/dm:v2.0.0-rc.2" + }, + expectFn: func(g *GomegaWithT, dc *DMCluster) { + g.Expect(dc.MasterVersion()).To(Equal("v2.0.0-rc.2")) + }, + }, + { + name: "don't have tag", + update: func(dc *DMCluster) { + dc.Spec.Master.BaseImage = "pingcap/pd" + }, + expectFn: func(g *GomegaWithT, dc *DMCluster) { + g.Expect(dc.MasterVersion()).To(Equal("latest")) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func newDMCluster() *DMCluster { + return &DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: DMClusterSpec{ + Master: MasterSpec{ + Replicas: 3, + StorageSize: "10G", + }, + Worker: &WorkerSpec{ + Replicas: 3, + StorageSize: "10G", + }, + }, + } +} diff --git a/pkg/discovery/server/server_test.go b/pkg/discovery/server/server_test.go index 8c2f5d51712..dec2e060d8c 100644 --- a/pkg/discovery/server/server_test.go +++ b/pkg/discovery/server/server_test.go @@ -49,6 +49,17 @@ var ( PD: &v1alpha1.PDSpec{Replicas: 3}, }, } + dc = &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{Kind: "DMCluster", APIVersion: "v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + ResourceVersion: "1", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{Replicas: 3}, + }, + } ) func TestServer(t *testing.T) { @@ -134,3 +145,85 @@ func TestServer(t *testing.T) { t.Errorf("join expects 2, got %d", join) } } + +func TestDMServer(t *testing.T) { + os.Setenv("MY_POD_NAMESPACE", "default") + cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + fakePDControl := pdapi.NewFakePDControl(kubeCli) + faleMasterControl := dmapi.NewFakeMasterControl(kubeCli) + masterClient := dmapi.NewFakeMasterClient() + s := NewServer(fakePDControl, faleMasterControl, cli, kubeCli) + httpServer := httptest.NewServer(s.(*server).container.ServeMux) + defer httpServer.Close() + + var lock sync.RWMutex + masterMemberInfos := make([]*dmapi.MastersInfo, 0) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + lock.RLock() + defer lock.RUnlock() + if len(masterMemberInfos) <= 0 { + return nil, fmt.Errorf("no members yet") + } + // as masterMemberInfos.Members maybe modified, we must return a copy + ret := append([]*dmapi.MastersInfo{}, masterMemberInfos...) + return ret, nil + }) + cli.PingcapV1alpha1().DMClusters(dc.Namespace).Create(dc) + faleMasterControl.SetMasterClient(dc.Namespace, dc.Name, masterClient) + + var ( + initial int32 + join int32 + ) + + errg, _ := errgroup.WithContext(context.Background()) + + for i := 0; i < 3; i++ { + i := i + errg.Go(func() error { + for { + svc := fmt.Sprintf(`foo-dm-master-%d.foo-dm-master-peer:2380`, i) + url := httpServer.URL + fmt.Sprintf("/new/%s/dm", base64.StdEncoding.EncodeToString([]byte(svc))) + resp, err := http.Get(url) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + time.Sleep(time.Millisecond * 100) + continue + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + lock.Lock() + masterMemberInfos = append(masterMemberInfos, &dmapi.MastersInfo{ + Name: svc, + PeerURLs: []string{ + svc, + }, + }) + lock.Unlock() + if strings.HasPrefix(string(data), "--join=") { + atomic.AddInt32(&join, 1) + } else if strings.HasPrefix(string(data), "--initial-cluster=") { + atomic.AddInt32(&initial, 1) + } + return nil + } + }) + } + + err := errg.Wait() + if err != nil { + t.Errorf("get dm-master info failed: %v", err) + } + + if initial != 1 { + t.Errorf("initial expects 1, got %d", initial) + } + if join != 2 { + t.Errorf("join expects 2, got %d", join) + } +} diff --git a/pkg/dmapi/dmapi.go b/pkg/dmapi/dmapi.go index 983d9fb84b5..1ab964e7701 100644 --- a/pkg/dmapi/dmapi.go +++ b/pkg/dmapi/dmapi.go @@ -203,13 +203,13 @@ func (mc *masterClient) deleteMember(query string) error { if err != nil { return err } - deleteMemeberResp := &RespHeader{} - err = json.Unmarshal(body, deleteMemeberResp) + deleteMemberResp := &RespHeader{} + err = json.Unmarshal(body, deleteMemberResp) if err != nil { return fmt.Errorf("unable to unmarshal delete member resp: %s, query: %s, err: %s", body, query, err) } - if !deleteMemeberResp.Result { - return fmt.Errorf("unable to delete member, query: %s, err: %s", query, deleteMemeberResp.Msg) + if !deleteMemberResp.Result { + return fmt.Errorf("unable to delete member, query: %s, err: %s", query, deleteMemberResp.Msg) } return nil diff --git a/pkg/dmapi/dmapi_test.go b/pkg/dmapi/dmapi_test.go new file mode 100644 index 00000000000..aaddd3e7d05 --- /dev/null +++ b/pkg/dmapi/dmapi_test.go @@ -0,0 +1,210 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmapi + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/onsi/gomega" +) + +const ( + ContentTypeJSON string = "application/json" +) + +func getClientServer(h func(http.ResponseWriter, *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(h)) +} + +func TestGetMembers(t *testing.T) { + g := NewGomegaWithT(t) + masters := []*MastersInfo{ + {Name: "dm-master1", MemberID: "1", Alive: false}, + {Name: "dm-master2", MemberID: "2", Alive: true}, + {Name: "dm-master3", MemberID: "3", Alive: true}, + } + masterResp := MastersResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberMaster{ + {MembersMaster{ + Msg: "", + Masters: masters, + }}, + }, + } + masterBytes, err := json.Marshal(masterResp) + g.Expect(err).NotTo(HaveOccurred()) + + workers := []*WorkersInfo{ + {Name: "dm-worker1", Addr: "127.0.0.1:8262", Stage: "free"}, + {Name: "dm-worker2", Addr: "127.0.0.1:8263", Stage: "bound", Source: "mysql-replica-01"}, + {Name: "dm-worker3", Addr: "127.0.0.1:8264", Stage: "offline"}, + } + workerResp := WorkerResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberWorker{ + {MembersWorker{ + Msg: "", + Workers: workers, + }}, + }, + } + workerBytes, err := json.Marshal(workerResp) + g.Expect(err).NotTo(HaveOccurred()) + + leader := MembersLeader{ + Msg: "", + Name: "dm-master2", + Addr: "127.0.0.1:8361", + } + leaderResp := LeaderResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberLeader{ + {leader}}, + } + leaderBytes, err := json.Marshal(leaderResp) + g.Expect(err).NotTo(HaveOccurred()) + + tcs := []struct { + caseName string + path string + method string + getType string + resp []byte + want interface{} + }{{ + caseName: "GetMasters", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: masterBytes, + want: masters, + getType: "master", + }, { + caseName: "GetWorkers", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: workerBytes, + want: workers, + getType: "worker", + }, { + caseName: "GetLeader", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: leaderBytes, + want: leader, + getType: "leader", + }} + + for _, tc := range tcs { + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal(tc.method), "check method") + g.Expect(request.URL.Path).To(Equal(tc.path), "check url") + g.Expect(request.FormValue(tc.getType)).To(Equal("true"), "check form value") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(tc.resp) + }) + defer svc.Close() + + var ( + result interface{} + err error + ) + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + switch tc.getType { + case "master": + result, err = masterClient.GetMasters() + case "worker": + result, err = masterClient.GetWorkers() + case "leader": + result, err = masterClient.GetLeader() + } + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result).To(Equal(tc.want)) + } +} + +func TestEvictLeader(t *testing.T) { + g := NewGomegaWithT(t) + evictLeaderResp := RespHeader{Result: true, Msg: ""} + evictLeaderBytes, err := json.Marshal(evictLeaderResp) + g.Expect(err).NotTo(HaveOccurred()) + + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal("PUT"), "check method") + g.Expect(request.URL.Path).To(Equal(fmt.Sprintf("/%s/1", leaderPrefix)), "check url") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(evictLeaderBytes) + }) + + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + err = masterClient.EvictLeader() + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestDeleteMember(t *testing.T) { + g := NewGomegaWithT(t) + deleteMemberResp := RespHeader{Result: true, Msg: ""} + deleteMemberBytes, err := json.Marshal(deleteMemberResp) + g.Expect(err).NotTo(HaveOccurred()) + + tcs := []struct { + caseName string + path string + method string + resp []byte + delType string + name string + }{{ + caseName: "DeleteMaster", + path: fmt.Sprintf("/%s", membersPrefix), + method: "DELETE", + resp: deleteMemberBytes, + delType: "master", + name: "dm-master-1", + }, { + caseName: "DeleteWorker", + path: fmt.Sprintf("/%s", membersPrefix), + method: "DELETE", + resp: deleteMemberBytes, + delType: "worker", + name: "dm-worker-1", + }} + + for _, tc := range tcs { + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal(tc.method), "check method") + g.Expect(request.URL.Path).To(Equal(fmt.Sprintf("%s/%s/%s", tc.path, tc.delType, tc.name)), "check url") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(tc.resp) + }) + defer svc.Close() + + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + switch tc.delType { + case "master": + err = masterClient.DeleteMaster(tc.name) + case "worker": + err = masterClient.DeleteWorker(tc.name) + } + g.Expect(err).NotTo(HaveOccurred()) + } +} From 6c2b1becb3e3dcd61038c20cdc906063deff449a Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Fri, 18 Sep 2020 20:28:10 +0800 Subject: [PATCH 2/7] add some unit tests --- .../pingcap/v1alpha1/validation/validation.go | 24 ++ .../v1alpha1/validation/validation_test.go | 212 +++++++++++ pkg/controller/controller_utils_test.go | 52 +++ .../dm_cluster_condition_updater_test.go | 151 ++++++++ .../dmcluster/dm_cluster_control.go | 23 ++ .../dmcluster/dm_cluster_control_test.go | 304 ++++++++++++++++ .../dmcluster/dm_cluster_controller.go | 23 +- .../dmcluster/dm_cluster_controller_test.go | 344 ++++++++++++++++++ pkg/controller/dmcluster_control.go | 34 ++ pkg/controller/dmcluster_control_test.go | 66 ++++ .../tidbcluster/tidb_cluster_controller.go | 6 +- .../tidb_cluster_controller_test.go | 20 +- pkg/controller/tidbcluster_control_test.go | 1 - pkg/label/label_test.go | 42 +++ .../member/dm_master_member_manager.go | 19 + .../member/dm_master_member_manager_test.go | 14 + .../member/dm_worker_member_manager.go | 19 + pkg/manager/meta/reclaim_policy_manager.go | 4 + 18 files changed, 1324 insertions(+), 34 deletions(-) create mode 100644 pkg/controller/dmcluster/dm_cluster_condition_updater_test.go create mode 100644 pkg/controller/dmcluster/dm_cluster_control_test.go create mode 100644 pkg/controller/dmcluster/dm_cluster_controller_test.go create mode 100644 pkg/controller/dmcluster_control_test.go create mode 100644 pkg/manager/member/dm_master_member_manager_test.go diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation.go b/pkg/apis/pingcap/v1alpha1/validation/validation.go index 37086176eb1..f5069961676 100644 --- a/pkg/apis/pingcap/v1alpha1/validation/validation.go +++ b/pkg/apis/pingcap/v1alpha1/validation/validation.go @@ -22,6 +22,7 @@ import ( "reflect" "strings" + "github.com/Masterminds/semver" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/label" corev1 "k8s.io/api/core/v1" @@ -222,6 +223,15 @@ func validatePumpSpec(spec *v1alpha1.PumpSpec, fldPath *field.Path) field.ErrorL func validateDMClusterSpec(spec *v1alpha1.DMClusterSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + if spec.Version != "" { + clusterVersionLT2, _ := clusterVersionLessThan2(spec.Version) + if clusterVersionLT2 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, "dm cluster version can't set to v1.x.y")) + } + } + if spec.Discovery.Address == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("discovery.address"), "discovery.address must not be empty")) + } allErrs = append(allErrs, validateMasterSpec(&spec.Master, fldPath.Child("master"))...) if spec.Worker != nil { allErrs = append(allErrs, validateWorkerSpec(spec.Worker, fldPath.Child("worker"))...) @@ -232,6 +242,10 @@ func validateDMClusterSpec(spec *v1alpha1.DMClusterSpec, fldPath *field.Path) fi func validateMasterSpec(spec *v1alpha1.MasterSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...) + // make sure that storageSize for dm-master is assigned + if spec.Replicas > 0 && spec.StorageSize == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("storageSize"), "storageSize must not be empty")) + } return allErrs } @@ -510,3 +524,13 @@ func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.Error } return allErrs } + +// clusterVersionLessThan2 makes sure that deployed dm cluster version not to be v1.0.x +func clusterVersionLessThan2(version string) (bool, error) { + v, err := semver.NewVersion(version) + if err != nil { + return false, err + } + + return v.Major() < 2, nil +} diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go index fbb441711fe..57f6780d244 100644 --- a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go +++ b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go @@ -189,6 +189,153 @@ func TestValidateAnnotations(t *testing.T) { } } +func TestValidateDMAnnotations(t *testing.T) { + successCases := []struct { + name string + dc v1alpha1.DMCluster + }{ + { + name: "all-fields-valid", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMMasterDeleteSlots: "[1,2]", + label.AnnDMWorkerDeleteSlots: "[1]", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + }, + { + name: "no delete slots", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + }, + // TODO: more cases + } + + for _, v := range successCases { + if errs := validateAnnotations(v.dc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations")); len(errs) != 0 { + t.Errorf("[%s]: unexpected error: %v", v.name, errs) + } + } + + errorCases := []struct { + name string + dc v1alpha1.DMCluster + errs []field.Error + }{ + { + name: "delete slots empty string", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMMasterDeleteSlots: "", + label.AnnDMWorkerDeleteSlots: "", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + errs: []field.Error{ + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-master.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-worker.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + }, + }, + { + name: "delete slots invalid format", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMWorkerDeleteSlots: "1,2,3", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + errs: []field.Error{ + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-worker.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + }, + }, + } + + for _, v := range errorCases { + errs := validateDMAnnotations(v.dc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations")) + if len(errs) != len(v.errs) { + t.Errorf("[%s]: expected %d failures, got %d failures: %v", v.name, len(v.errs), len(errs), errs) + continue + } + for i := range errs { + if errs[i].Type != v.errs[i].Type { + t.Errorf("[%s]: expected error type %q, got %q", v.name, v.errs[i].Type, errs[i].Type) + } + if !strings.Contains(errs[i].Detail, v.errs[i].Detail) { + t.Errorf("[%s]: expected error errs[i].Detail %q, got %q", v.name, v.errs[i].Detail, errs[i].Detail) + } + if len(v.errs[i].Field) > 0 { + if errs[i].Field != v.errs[i].Field { + t.Errorf("[%s]: expected error field %q, got %q", v.name, v.errs[i].Field, errs[i].Field) + } + } + } + } +} + func TestValidateRequestsStorage(t *testing.T) { g := NewGomegaWithT(t) tests := []struct { @@ -300,6 +447,58 @@ func TestValidateTidbMonitor(t *testing.T) { } } +func TestValidateDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + tests := []struct { + name string + version string + discoveryAddr string + masterReplicas int32 + masterStorageSize string + expectedError string + }{ + { + name: "invalid version", + version: "v1.0.6", + discoveryAddr: "http://basic-discovery.demo:10261", + expectedError: "dm cluster version can't set to v1.x.y", + }, + { + name: "empty discovery address", + expectedError: "discovery.address must not be empty", + }, + { + name: "dm-master storageSize not given", + version: "v2.0.0-rc.2", + discoveryAddr: "http://basic-discovery.demo:10261", + masterReplicas: 3, + expectedError: "storageSize must not be empty", + }, + { + name: "correct configuration", + version: "nightly", + discoveryAddr: "http://basic-discovery.demo:10261", + masterReplicas: 3, + masterStorageSize: "10Gi", + expectedError: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dc := newDMCluster() + dc.Spec.Version = tt.version + dc.Spec.Discovery.Address = tt.discoveryAddr + dc.Spec.Master.Replicas = tt.masterReplicas + dc.Spec.Master.StorageSize = tt.masterStorageSize + err := ValidateDMCluster(dc) + if tt.expectedError != "" { + g.Expect(len(err)).Should(Equal(1)) + g.Expect(err[0].Detail).To(ContainSubstring(tt.expectedError)) + } + }) + } +} + func newTidbCluster() *v1alpha1.TidbCluster { tc := &v1alpha1.TidbCluster{ Spec: v1alpha1.TidbClusterSpec{ @@ -329,6 +528,19 @@ func newTidbMonitor() *v1alpha1.TidbMonitor { return monitor } +func newDMCluster() *v1alpha1.DMCluster { + dc := &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Discovery: v1alpha1.DMDiscoverySpec{}, + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + } + dc.Name = "test-validate-dm-cluster" + dc.Namespace = "default" + return dc +} + func TestValidateLocalDescendingPath(t *testing.T) { successCases := []string{ "data", diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index e4b709120d3..5211c4e6369 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -54,6 +54,20 @@ func TestGetOwnerRef(t *testing.T) { g.Expect(*ref.BlockOwnerDeletion).To(BeTrue()) } +func TestGetDMOwnerRef(t *testing.T) { + g := NewGomegaWithT(t) + + dc := newDMCluster() + dc.UID = types.UID("demo-uid") + ref := GetDMOwnerRef(dc) + g.Expect(ref.APIVersion).To(Equal(DMControllerKind.GroupVersion().String())) + g.Expect(ref.Kind).To(Equal(DMControllerKind.Kind)) + g.Expect(ref.Name).To(Equal(dc.GetName())) + g.Expect(ref.UID).To(Equal(types.UID("demo-uid"))) + g.Expect(*ref.Controller).To(BeTrue()) + g.Expect(*ref.BlockOwnerDeletion).To(BeTrue()) +} + func TestGetServiceType(t *testing.T) { g := NewGomegaWithT(t) @@ -184,6 +198,26 @@ func TestDiscoveryMemberName(t *testing.T) { g.Expect(DiscoveryMemberName("demo")).To(Equal("demo-discovery")) } +func TestDMMasterMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMMasterMemberName("demo")).To(Equal("demo-dm-master")) +} + +func TestDMMasterPeerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMMasterPeerMemberName("demo")).To(Equal("demo-dm-master-peer")) +} + +func TestDMWorkerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMWorkerMemberName("demo")).To(Equal("demo-dm-worker")) +} + +func TestDMWorkerPeerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMWorkerPeerMemberName("demo")).To(Equal("demo-dm-worker-peer")) +} + func TestAnnProm(t *testing.T) { g := NewGomegaWithT(t) @@ -357,6 +391,24 @@ func newTidbCluster() *v1alpha1.TidbCluster { return tc } +func newDMCluster() *v1alpha1.DMCluster { + retainPVP := corev1.PersistentVolumeReclaimRetain + dc := &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "demo", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + PVReclaimPolicy: &retainPVP, + }, + } + return dc +} + func newService(tc *v1alpha1.TidbCluster, _ string) *corev1.Service { svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go b/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go new file mode 100644 index 00000000000..7a692f6e881 --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go @@ -0,0 +1,151 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + utildmcluster "github.com/pingcap/tidb-operator/pkg/util/dmcluster" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" +) + +func TestDMClusterConditionUpdater_Ready(t *testing.T) { + tests := []struct { + name string + dc *v1alpha1.DMCluster + wantStatus v1.ConditionStatus + wantReason string + wantMessage string + }{ + { + name: "statfulset(s) not up to date", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionFalse, + wantReason: utildmcluster.StatfulSetNotUpToDate, + wantMessage: "Statefulset(s) are in progress", + }, + { + name: "dm-master(s) not healthy", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 1, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "dm-master-1": { + Health: false, + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionFalse, + wantReason: utildmcluster.MasterUnhealthy, + wantMessage: "dm-master(s) are not healthy", + }, + { + name: "all ready", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 1, + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 1, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "dm-master-0": { + Health: true, + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + Members: map[string]v1alpha1.WorkerMember{ + "dm-worker-0": { + Stage: "free", + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionTrue, + wantReason: utildmcluster.Ready, + wantMessage: "DM cluster is fully up and running", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + conditionUpdater := &dmClusterConditionUpdater{} + conditionUpdater.Update(tt.dc) + cond := utildmcluster.GetDMClusterCondition(tt.dc.Status, v1alpha1.DMClusterReady) + if diff := cmp.Diff(tt.wantStatus, cond.Status); diff != "" { + t.Errorf("unexpected status (-want, +got): %s", diff) + } + if diff := cmp.Diff(tt.wantReason, cond.Reason); diff != "" { + t.Errorf("unexpected reason (-want, +got): %s", diff) + } + if diff := cmp.Diff(tt.wantMessage, cond.Message); diff != "" { + t.Errorf("unexpected message (-want, +got): %s", diff) + } + }) + } +} diff --git a/pkg/controller/dmcluster/dm_cluster_control.go b/pkg/controller/dmcluster/dm_cluster_control.go index 70e271b3194..9c21955783e 100644 --- a/pkg/controller/dmcluster/dm_cluster_control.go +++ b/pkg/controller/dmcluster/dm_cluster_control.go @@ -199,3 +199,26 @@ func (dcc *defaultDMClusterControl) updateDMCluster(dc *v1alpha1.DMCluster) erro } return errorutils.NewAggregate(errs) } + +var _ ControlInterface = &defaultDMClusterControl{} + +type FakeDMClusterControlInterface struct { + err error +} + +func NewFakeDMClusterControlInterface() *FakeDMClusterControlInterface { + return &FakeDMClusterControlInterface{} +} + +func (ftcc *FakeDMClusterControlInterface) SetUpdateDCError(err error) { + ftcc.err = err +} + +func (ftcc *FakeDMClusterControlInterface) UpdateDMCluster(_ *v1alpha1.DMCluster) error { + if ftcc.err != nil { + return ftcc.err + } + return nil +} + +var _ ControlInterface = &FakeDMClusterControlInterface{} diff --git a/pkg/controller/dmcluster/dm_cluster_control_test.go b/pkg/controller/dmcluster/dm_cluster_control_test.go new file mode 100644 index 00000000000..5634a883f9e --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_control_test.go @@ -0,0 +1,304 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "fmt" + "strings" + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" + "github.com/pingcap/tidb-operator/pkg/controller" + mm "github.com/pingcap/tidb-operator/pkg/manager/member" + "github.com/pingcap/tidb-operator/pkg/manager/meta" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" +) + +func TestTidbClusterControlUpdateTidbCluster(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(cluster *v1alpha1.DMCluster) + syncReclaimPolicyErr bool + orphanPodCleanerErr bool + syncMasterMemberManagerErr bool + syncWorkerMemberManagerErr bool + pvcCleanerErr bool + updateDCStatusErr bool + errExpectFn func(*GomegaWithT, error) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForDMClusterControl() + if test.update != nil { + test.update(dc) + } + control, reclaimPolicyManager, orphanPodCleaner, masterMemberManager, workerMemberManager, pvcCleaner, dcControl := newFakeDMClusterControl() + + if test.syncReclaimPolicyErr { + reclaimPolicyManager.SetSyncError(fmt.Errorf("reclaim policy sync error")) + } + if test.orphanPodCleanerErr { + orphanPodCleaner.SetnOrphanPodCleanerError(fmt.Errorf("clean orphan pod error")) + } + if test.syncMasterMemberManagerErr { + masterMemberManager.SetSyncError(fmt.Errorf("dm-master member manager sync error")) + } + if test.syncWorkerMemberManagerErr { + workerMemberManager.SetSyncError(fmt.Errorf("dm-worker member manager sync error")) + } + if test.pvcCleanerErr { + pvcCleaner.SetPVCCleanerError(fmt.Errorf("clean PVC error")) + } + + if test.updateDCStatusErr { + dcControl.SetUpdateDMClusterError(fmt.Errorf("update dmcluster status error"), 0) + } + + err := control.UpdateDMCluster(dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + } + tests := []testcase{ + { + name: "reclaim policy sync error", + update: nil, + syncReclaimPolicyErr: true, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "reclaim policy sync error")).To(Equal(true)) + }, + }, + { + name: "clean orphan pod error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: true, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "clean orphan pod error")).To(Equal(true)) + }, + }, + { + name: "dm-master member manager sync error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: true, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "dm-master member manager sync error")).To(Equal(true)) + }, + }, + { + name: "dm-worker member manager sync error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: true, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "dm-worker member manager sync error")).To(Equal(true)) + }, + }, + { + name: "clean PVC error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: true, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "clean PVC error")).To(Equal(true)) + }, + }, + { + name: "dmcluster status is not updated", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "dmcluster status update failed", + update: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.Members = map[string]v1alpha1.MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + cluster.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + cluster.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "dm-worker-0": {Name: "dm-worker-0", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-1": {Name: "dm-worker-1", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-2": {Name: "dm-worker-2", Stage: v1alpha1.DMWorkerStateFree}, + } + cluster.Status.Worker.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "update dmcluster status error")).To(Equal(true)) + }, + }, + { + name: "normal", + update: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.Members = map[string]v1alpha1.MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + cluster.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + cluster.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "dm-worker-0": {Name: "dm-worker-0", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-1": {Name: "dm-worker-1", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-2": {Name: "dm-worker-2", Stage: v1alpha1.DMWorkerStateFree}, + } + cluster.Status.Worker.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterStatusEquality(t *testing.T) { + g := NewGomegaWithT(t) + dcStatus := v1alpha1.DMClusterStatus{} + + tcStatusCopy := dcStatus.DeepCopy() + tcStatusCopy.Master = v1alpha1.MasterStatus{} + g.Expect(apiequality.Semantic.DeepEqual(&dcStatus, tcStatusCopy)).To(Equal(true)) + + tcStatusCopy = dcStatus.DeepCopy() + tcStatusCopy.Master.Phase = v1alpha1.NormalPhase + g.Expect(apiequality.Semantic.DeepEqual(&dcStatus, tcStatusCopy)).To(Equal(false)) +} + +func newFakeDMClusterControl() ( + ControlInterface, + *meta.FakeReclaimPolicyManager, + *mm.FakeOrphanPodsCleaner, + *mm.FakeMasterMemberManager, + *mm.FakeWorkerMemberManager, + *mm.FakePVCCleaner, + *controller.FakeDMClusterControl) { + cli := fake.NewSimpleClientset() + dcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().DMClusters() + recorder := record.NewFakeRecorder(10) + + dcControl := controller.NewFakeDMClusterControl(dcInformer) + masterMemberManager := mm.NewFakeMasterMemberManager() + workerMemberManager := mm.NewFakeWorkerMemberManager() + reclaimPolicyManager := meta.NewFakeReclaimPolicyManager() + orphanPodCleaner := mm.NewFakeOrphanPodsCleaner() + pvcCleaner := mm.NewFakePVCCleaner() + podRestarter := mm.NewFakePodRestarter() + pvcResizer := mm.NewFakePVCResizer() + control := NewDefaultDMClusterControl( + dcControl, + masterMemberManager, + workerMemberManager, + reclaimPolicyManager, + orphanPodCleaner, + pvcCleaner, + pvcResizer, + podRestarter, + &dmClusterConditionUpdater{}, + recorder, + ) + + return control, reclaimPolicyManager, orphanPodCleaner, masterMemberManager, workerMemberManager, pvcCleaner, dcControl +} + +func newDMClusterForDMClusterControl() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + StorageSize: "10Gi", + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + StorageSize: "10Gi", + }, + }, + } +} diff --git a/pkg/controller/dmcluster/dm_cluster_controller.go b/pkg/controller/dmcluster/dm_cluster_controller.go index 3f0ab76daa9..5e8ea4e9e55 100644 --- a/pkg/controller/dmcluster/dm_cluster_controller.go +++ b/pkg/controller/dmcluster/dm_cluster_controller.go @@ -17,7 +17,6 @@ import ( "fmt" "time" - "github.com/Masterminds/semver" perrors "github.com/pingcap/errors" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -197,7 +196,7 @@ func NewController( setInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: dcc.addStatefulSet, UpdateFunc: func(old, cur interface{}) { - dcc.updateStatefuSet(old, cur) + dcc.updateStatefulSet(old, cur) }, DeleteFunc: dcc.deleteStatefulSet, }) @@ -268,13 +267,6 @@ func (dcc *Controller) sync(key string) error { if err != nil { return err } - clusterVersionLT2, err := clusterVersionLessThan2(dc.MasterVersion()) - if err != nil { - klog.V(4).Infof("cluster version: %s is not semantic versioning compatible", dc.MasterVersion()) - } else if clusterVersionLT2 { - klog.Errorf("dm version %s not supported, only support to deploy dm from v2.0", dc.MasterVersion()) - return nil - } return dcc.syncDMCluster(dc.DeepCopy()) } @@ -315,8 +307,8 @@ func (dcc *Controller) addStatefulSet(obj interface{}) { dcc.enqueueDMCluster(dc) } -// updateStatefuSet adds the dmcluster for the current and old statefulsets to the sync queue. -func (dcc *Controller) updateStatefuSet(old, cur interface{}) { +// updateStatefulSet adds the dmcluster for the current and old statefulsets to the sync queue. +func (dcc *Controller) updateStatefulSet(old, cur interface{}) { curSet := cur.(*apps.StatefulSet) oldSet := old.(*apps.StatefulSet) ns := curSet.GetNamespace() @@ -392,12 +384,3 @@ func (dcc *Controller) resolveDMClusterFromSet(namespace string, set *apps.State } return dc } - -func clusterVersionLessThan2(version string) (bool, error) { - v, err := semver.NewVersion(version) - if err != nil { - return true, err - } - - return v.Major() < 2, nil -} diff --git a/pkg/controller/dmcluster/dm_cluster_controller_test.go b/pkg/controller/dmcluster/dm_cluster_controller_test.go new file mode 100644 index 00000000000..966e79e4b3b --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_controller_test.go @@ -0,0 +1,344 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/scheme" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + controllerfake "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestDMClusterControllerEnqueueDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + dc := newDMCluster() + dcc, _, _ := newFakeDMClusterController() + + dcc.enqueueDMCluster(dc) + g.Expect(dcc.queue.Len()).To(Equal(1)) +} + +func TestDMClusterControllerEnqueueDMClusterFailed(t *testing.T) { + g := NewGomegaWithT(t) + dcc, _, _ := newFakeDMClusterController() + + dcc.enqueueDMCluster(struct{}{}) + g.Expect(dcc.queue.Len()).To(Equal(0)) +} + +func TestDMClusterControllerAddStatefulSet(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modifySet func(*v1alpha1.DMCluster) *apps.StatefulSet + addDMClusterToIndexer bool + expectedLen int + } + + testFn := func(test *testcase, t *testing.T) { + t.Log("test: ", test.name) + + dc := newDMCluster() + set := test.modifySet(dc) + + dcc, dcIndexer, _ := newFakeDMClusterController() + + if test.addDMClusterToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + dcc.addStatefulSet(set) + g.Expect(dcc.queue.Len()).To(Equal(test.expectedLen)) + } + + tests := []testcase{ + { + name: "normal", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return newStatefulSet(dc) + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "have deletionTimestamp", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + set := newStatefulSet(dc) + set.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(30 * time.Second)} + return set + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "without controllerRef", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + set := newStatefulSet(dc) + set.OwnerReferences = nil + return set + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without dmcluster", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return newStatefulSet(dc) + }, + addDMClusterToIndexer: false, + expectedLen: 0, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterControllerUpdateStatefulSet(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + updateSet func(*apps.StatefulSet) *apps.StatefulSet + addDMClusterToIndexer bool + expectedLen int + } + + testFn := func(test *testcase, t *testing.T) { + t.Log("test: ", test.name) + + dc := newDMCluster() + set1 := newStatefulSet(dc) + set2 := test.updateSet(set1) + + dcc, dcIndexer, _ := newFakeDMClusterController() + + if test.addDMClusterToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + dcc.updateStatefulSet(set1, set2) + g.Expect(dcc.queue.Len()).To(Equal(test.expectedLen)) + } + + tests := []testcase{ + { + name: "normal", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "same resouceVersion", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without controllerRef", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + set2.OwnerReferences = nil + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without dmcluster", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + return &set2 + }, + addDMClusterToIndexer: false, + expectedLen: 0, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterControllerSync(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + addDcToIndexer bool + errWhenUpdateDMCluster bool + errExpectFn func(*GomegaWithT, error) + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + dcc, dcIndexer, dcControl := newFakeDMClusterController() + + if test.addDcToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(dc) + g.Expect(err).NotTo(HaveOccurred()) + + if test.errWhenUpdateDMCluster { + dcControl.SetUpdateDCError(fmt.Errorf("update dm cluster failed")) + } + + err = dcc.sync(key) + + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + } + + tests := []testcase{ + { + name: "normal", + addDcToIndexer: true, + errWhenUpdateDMCluster: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "can't found dm cluster", + addDcToIndexer: false, + errWhenUpdateDMCluster: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "update dm cluster failed", + addDcToIndexer: true, + errWhenUpdateDMCluster: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "update dm cluster failed")).To(Equal(true)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } + +} + +func alwaysReady() bool { return true } + +func newFakeDMClusterController() (*Controller, cache.Indexer, *FakeDMClusterControlInterface) { + cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + genericCli := controllerfake.NewFakeClientWithScheme(scheme.Scheme) + informerFactory := informers.NewSharedInformerFactory(cli, 0) + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0) + + dcInformer := informerFactory.Pingcap().V1alpha1().DMClusters() + autoFailover := true + dcControl := NewFakeDMClusterControlInterface() + + dcc := NewController( + kubeCli, + cli, + genericCli, + informerFactory, + kubeInformerFactory, + autoFailover, + 5*time.Minute, + 5*time.Minute, + ) + dcc.dcListerSynced = alwaysReady + dcc.setListerSynced = alwaysReady + + dcc.control = dcControl + return dcc, dcInformer.Informer().GetIndexer(), dcControl +} + +func newDMCluster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + StorageSize: "10Gi", + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + StorageSize: "10Gi", + }, + }, + } +} + +func newStatefulSet(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return &apps.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefuset", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(dc, controller.DMControllerKind), + }, + ResourceVersion: "1", + }, + Spec: apps.StatefulSetSpec{ + Replicas: &dc.Spec.Master.Replicas, + }, + } +} diff --git a/pkg/controller/dmcluster_control.go b/pkg/controller/dmcluster_control.go index 1901d51d871..01d0b59dd7a 100644 --- a/pkg/controller/dmcluster_control.go +++ b/pkg/controller/dmcluster_control.go @@ -18,8 +18,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog" @@ -79,3 +81,35 @@ func (rdc *realDMClusterControl) UpdateDMCluster(dc *v1alpha1.DMCluster, newStat } return updateDC, err } + +// FakeDMClusterControl is a fake DMClusterControlInterface +type FakeDMClusterControl struct { + DcLister listers.DMClusterLister + DcIndexer cache.Indexer + updateDMClusterTracker RequestTracker +} + +// NewFakeDMClusterControl returns a FakeDMClusterControl +func NewFakeDMClusterControl(dcInformer tcinformers.DMClusterInformer) *FakeDMClusterControl { + return &FakeDMClusterControl{ + dcInformer.Lister(), + dcInformer.Informer().GetIndexer(), + RequestTracker{}, + } +} + +// SetUpdateDMClusterError sets the error attributes of updateDMClusterTracker +func (ssc *FakeDMClusterControl) SetUpdateDMClusterError(err error, after int) { + ssc.updateDMClusterTracker.SetError(err).SetAfter(after) +} + +// UpdateDMCluster updates the DMCluster +func (ssc *FakeDMClusterControl) UpdateDMCluster(dc *v1alpha1.DMCluster, _ *v1alpha1.DMClusterStatus, _ *v1alpha1.DMClusterStatus) (*v1alpha1.DMCluster, error) { + defer ssc.updateDMClusterTracker.Inc() + if ssc.updateDMClusterTracker.ErrorReady() { + defer ssc.updateDMClusterTracker.Reset() + return dc, ssc.updateDMClusterTracker.GetError() + } + + return dc, ssc.DcIndexer.Update(dc) +} diff --git a/pkg/controller/dmcluster_control_test.go b/pkg/controller/dmcluster_control_test.go new file mode 100644 index 00000000000..6dc08f588a8 --- /dev/null +++ b/pkg/controller/dmcluster_control_test.go @@ -0,0 +1,66 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +func TestDMClusterControlUpdateDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + recorder := record.NewFakeRecorder(10) + dc := newDMCluster() + dc.Spec.Master.Replicas = int32(5) + fakeClient := &fake.Clientset{} + control := NewRealDMClusterControl(fakeClient, nil, recorder) + fakeClient.AddReactor("update", "dmclusters", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + return true, update.GetObject(), nil + }) + updateDC, err := control.UpdateDMCluster(dc, &v1alpha1.DMClusterStatus{}, &v1alpha1.DMClusterStatus{}) + g.Expect(err).To(Succeed()) + g.Expect(updateDC.Spec.Master.Replicas).To(Equal(int32(5))) +} + +func TestDMClusterControlUpdateDMClusterConflictSuccess(t *testing.T) { + g := NewGomegaWithT(t) + recorder := record.NewFakeRecorder(10) + dc := newDMCluster() + fakeClient := &fake.Clientset{} + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + dcLister := listers.NewDMClusterLister(indexer) + control := NewRealDMClusterControl(fakeClient, dcLister, recorder) + conflict := false + fakeClient.AddReactor("update", "dmclusters", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + if !conflict { + conflict = true + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), dc.Name, errors.New("conflict")) + } + return true, update.GetObject(), nil + }) + _, err := control.UpdateDMCluster(dc, &v1alpha1.DMClusterStatus{}, &v1alpha1.DMClusterStatus{}) + g.Expect(err).To(Succeed()) +} diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index b7c2f9e28ce..88289189616 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -259,7 +259,7 @@ func NewController( setInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: tcc.addStatefulSet, UpdateFunc: func(old, cur interface{}) { - tcc.updateStatefuSet(old, cur) + tcc.updateStatefulSet(old, cur) }, DeleteFunc: tcc.deleteStatefulSet, }) @@ -370,8 +370,8 @@ func (tcc *Controller) addStatefulSet(obj interface{}) { tcc.enqueueTidbCluster(tc) } -// updateStatefuSet adds the tidbcluster for the current and old statefulsets to the sync queue. -func (tcc *Controller) updateStatefuSet(old, cur interface{}) { +// updateStatefulSet adds the tidbcluster for the current and old statefulsets to the sync queue. +func (tcc *Controller) updateStatefulSet(old, cur interface{}) { curSet := cur.(*apps.StatefulSet) oldSet := old.(*apps.StatefulSet) ns := curSet.GetNamespace() diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go index 79e13a95aab..68e4397bbd3 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go @@ -53,7 +53,7 @@ func TestTidbClusterControllerEnqueueTidbClusterFailed(t *testing.T) { g.Expect(tcc.queue.Len()).To(Equal(0)) } -func TestTidbClusterControllerAddStatefuSet(t *testing.T) { +func TestTidbClusterControllerAddStatefulSet(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { name string @@ -82,7 +82,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "normal", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - return newStatefuSet(tc) + return newStatefulSet(tc) }, addTidbClusterToIndexer: true, expectedLen: 1, @@ -90,7 +90,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "have deletionTimestamp", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - set := newStatefuSet(tc) + set := newStatefulSet(tc) set.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(30 * time.Second)} return set }, @@ -100,7 +100,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "without controllerRef", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - set := newStatefuSet(tc) + set := newStatefulSet(tc) set.OwnerReferences = nil return set }, @@ -110,7 +110,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "without tidbcluster", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - return newStatefuSet(tc) + return newStatefulSet(tc) }, addTidbClusterToIndexer: false, expectedLen: 0, @@ -122,7 +122,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { } } -func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { +func TestTidbClusterControllerUpdateStatefulSet(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { name string @@ -135,7 +135,7 @@ func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { t.Log("test: ", test.name) tc := newTidbCluster() - set1 := newStatefuSet(tc) + set1 := newStatefulSet(tc) set2 := test.updateSet(set1) tcc, tcIndexer, _ := newFakeTidbClusterController() @@ -144,7 +144,7 @@ func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { err := tcIndexer.Add(tc) g.Expect(err).NotTo(HaveOccurred()) } - tcc.updateStatefuSet(set1, set2) + tcc.updateStatefulSet(set1, set2) g.Expect(tcc.queue.Len()).To(Equal(test.expectedLen)) } @@ -336,14 +336,14 @@ func newTidbCluster() *v1alpha1.TidbCluster { } } -func newStatefuSet(tc *v1alpha1.TidbCluster) *apps.StatefulSet { +func newStatefulSet(tc *v1alpha1.TidbCluster) *apps.StatefulSet { return &apps.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefuset", + Name: "test-statefulset", Namespace: corev1.NamespaceDefault, UID: types.UID("test"), OwnerReferences: []metav1.OwnerReference{ diff --git a/pkg/controller/tidbcluster_control_test.go b/pkg/controller/tidbcluster_control_test.go index f07a985984b..8d219fdc1b4 100644 --- a/pkg/controller/tidbcluster_control_test.go +++ b/pkg/controller/tidbcluster_control_test.go @@ -16,7 +16,6 @@ package controller import ( "errors" "testing" - "time" . "github.com/onsi/gomega" diff --git a/pkg/label/label_test.go b/pkg/label/label_test.go index f79ae5cf8e4..5204f17d221 100644 --- a/pkg/label/label_test.go +++ b/pkg/label/label_test.go @@ -28,6 +28,14 @@ func TestLabelNew(t *testing.T) { g.Expect(l[ManagedByLabelKey]).To(Equal("tidb-operator")) } +func TestLabelNewDM(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + g.Expect(l[NameLabelKey]).To(Equal("dm-cluster")) + g.Expect(l[ManagedByLabelKey]).To(Equal("tidb-operator")) +} + func TestLabelInstance(t *testing.T) { g := NewGomegaWithT(t) @@ -76,6 +84,22 @@ func TestLabelTiKV(t *testing.T) { g.Expect(l.IsTiKV()).To(BeTrue()) } +func TestLabelDMMaster(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMMaster() + g.Expect(l.IsDMMaster()).To(BeTrue()) +} + +func TestLabelDMWorker(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMWorker() + g.Expect(l.IsDMWorker()).To(BeTrue()) +} + func TestLabelSelector(t *testing.T) { g := NewGomegaWithT(t) @@ -130,3 +154,21 @@ func TestLabelLabels(t *testing.T) { } g.Expect(ls).To(Equal(m)) } + +func TestDMLabelLabels(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMMaster() + l.Instance("demo") + l.Namespace("ns-1") + ls := l.Labels() + m := map[string]string{ + NameLabelKey: "dm-cluster", + ManagedByLabelKey: "tidb-operator", + ComponentLabelKey: "dm-master", + InstanceLabelKey: "demo", + NamespaceLabelKey: "ns-1", + } + g.Expect(ls).To(Equal(m)) +} diff --git a/pkg/manager/member/dm_master_member_manager.go b/pkg/manager/member/dm_master_member_manager.go index 14d4a2eafb1..e9f222ce811 100644 --- a/pkg/manager/member/dm_master_member_manager.go +++ b/pkg/manager/member/dm_master_member_manager.go @@ -807,3 +807,22 @@ func (mmm *masterMemberManager) collectUnjoinedMembers(dc *v1alpha1.DMCluster, s } return nil } + +type FakeMasterMemberManager struct { + err error +} + +func NewFakeMasterMemberManager() *FakeMasterMemberManager { + return &FakeMasterMemberManager{} +} + +func (fpmm *FakeMasterMemberManager) SetSyncError(err error) { + fpmm.err = err +} + +func (fpmm *FakeMasterMemberManager) SyncDM(dc *v1alpha1.DMCluster) error { + if fpmm.err != nil { + return fpmm.err + } + return nil +} diff --git a/pkg/manager/member/dm_master_member_manager_test.go b/pkg/manager/member/dm_master_member_manager_test.go new file mode 100644 index 00000000000..897a0cbc54d --- /dev/null +++ b/pkg/manager/member/dm_master_member_manager_test.go @@ -0,0 +1,14 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member diff --git a/pkg/manager/member/dm_worker_member_manager.go b/pkg/manager/member/dm_worker_member_manager.go index 2ecb675a9f9..cc86f4fc8ed 100644 --- a/pkg/manager/member/dm_worker_member_manager.go +++ b/pkg/manager/member/dm_worker_member_manager.go @@ -584,3 +584,22 @@ func isWorkerPodDesired(dc *v1alpha1.DMCluster, podName string) bool { } return ordinals.Has(ordinal) } + +type FakeWorkerMemberManager struct { + err error +} + +func NewFakeWorkerMemberManager() *FakeWorkerMemberManager { + return &FakeWorkerMemberManager{} +} + +func (ftmm *FakeWorkerMemberManager) SetSyncError(err error) { + ftmm.err = err +} + +func (ftmm *FakeWorkerMemberManager) SyncDM(dc *v1alpha1.DMCluster) error { + if ftmm.err != nil { + return ftmm.err + } + return nil +} diff --git a/pkg/manager/meta/reclaim_policy_manager.go b/pkg/manager/meta/reclaim_policy_manager.go index d0315a99ce1..8e7e0ce830e 100644 --- a/pkg/manager/meta/reclaim_policy_manager.go +++ b/pkg/manager/meta/reclaim_policy_manager.go @@ -166,3 +166,7 @@ func (frpm *FakeReclaimPolicyManager) SetSyncError(err error) { func (frpm *FakeReclaimPolicyManager) Sync(_ *v1alpha1.TidbCluster) error { return frpm.err } + +func (frpm *FakeReclaimPolicyManager) SyncDM(_ *v1alpha1.DMCluster) error { + return frpm.err +} From 4080edb73ad516e605ae05a85e53f6bf7f3fc229 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 24 Sep 2020 11:44:44 +0800 Subject: [PATCH 3/7] add uts for dm-master and dm-worker member manager --- pkg/controller/service_control.go | 8 +- pkg/controller/stateful_set_control.go | 8 +- pkg/manager/member/dm_master_failover.go | 17 + .../member/dm_master_member_manager.go | 31 +- .../member/dm_master_member_manager_test.go | 2047 +++++++++++++++++ pkg/manager/member/dm_master_scaler.go | 30 + pkg/manager/member/dm_worker_failover.go | 17 + .../member/dm_worker_member_manager.go | 10 +- .../member/dm_worker_member_manager_test.go | 1213 ++++++++++ pkg/manager/member/dm_worker_scaler.go | 30 + pkg/manager/member/pd_member_manager_test.go | 18 +- .../member/pump_member_manager_test.go | 4 +- .../member/tidb_member_manager_test.go | 4 +- .../member/tiflash_member_manager_test.go | 8 +- .../member/tikv_member_manager_test.go | 8 +- 15 files changed, 3396 insertions(+), 57 deletions(-) create mode 100644 pkg/manager/member/dm_worker_member_manager_test.go diff --git a/pkg/controller/service_control.go b/pkg/controller/service_control.go index d8ca9e54ebb..bd061a74145 100644 --- a/pkg/controller/service_control.go +++ b/pkg/controller/service_control.go @@ -17,8 +17,6 @@ import ( "fmt" "strings" - tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" - v1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -138,21 +136,17 @@ type FakeServiceControl struct { SvcLister corelisters.ServiceLister SvcIndexer cache.Indexer EpsIndexer cache.Indexer - TcLister v1listers.TidbClusterLister - TcIndexer cache.Indexer createServiceTracker RequestTracker updateServiceTracker RequestTracker deleteStatefulSetTracker RequestTracker } // NewFakeServiceControl returns a FakeServiceControl -func NewFakeServiceControl(svcInformer coreinformers.ServiceInformer, epsInformer coreinformers.EndpointsInformer, tcInformer tcinformers.TidbClusterInformer) *FakeServiceControl { +func NewFakeServiceControl(svcInformer coreinformers.ServiceInformer, epsInformer coreinformers.EndpointsInformer) *FakeServiceControl { return &FakeServiceControl{ svcInformer.Lister(), svcInformer.Informer().GetIndexer(), epsInformer.Informer().GetIndexer(), - tcInformer.Lister(), - tcInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, RequestTracker{}, diff --git a/pkg/controller/stateful_set_control.go b/pkg/controller/stateful_set_control.go index a1fbdd95a2a..157d8b0c8e5 100644 --- a/pkg/controller/stateful_set_control.go +++ b/pkg/controller/stateful_set_control.go @@ -17,8 +17,6 @@ import ( "fmt" "strings" - tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" - v1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -144,8 +142,6 @@ var _ StatefulSetControlInterface = &realStatefulSetControl{} type FakeStatefulSetControl struct { SetLister appslisters.StatefulSetLister SetIndexer cache.Indexer - TcLister v1listers.TidbClusterLister - TcIndexer cache.Indexer createStatefulSetTracker RequestTracker updateStatefulSetTracker RequestTracker deleteStatefulSetTracker RequestTracker @@ -153,12 +149,10 @@ type FakeStatefulSetControl struct { } // NewFakeStatefulSetControl returns a FakeStatefulSetControl -func NewFakeStatefulSetControl(setInformer appsinformers.StatefulSetInformer, tcInformer tcinformers.TidbClusterInformer) *FakeStatefulSetControl { +func NewFakeStatefulSetControl(setInformer appsinformers.StatefulSetInformer) *FakeStatefulSetControl { return &FakeStatefulSetControl{ setInformer.Lister(), setInformer.Informer().GetIndexer(), - tcInformer.Lister(), - tcInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, RequestTracker{}, diff --git a/pkg/manager/member/dm_master_failover.go b/pkg/manager/member/dm_master_failover.go index 4dee6bd15d7..9d2c154ea27 100644 --- a/pkg/manager/member/dm_master_failover.go +++ b/pkg/manager/member/dm_master_failover.go @@ -259,3 +259,20 @@ func setDMMemberDeleted(dc *v1alpha1.DMCluster, podName string) { dc.Status.Master.FailureMembers[podName] = failureMember klog.Infof("dm-master failover: set dm-master member: %s/%s deleted", dc.GetName(), podName) } + +type fakeMasterFailover struct{} + +// NewFakeMasterFailover returns a fake Failover +func NewFakeMasterFailover() DMFailover { + return &fakeMasterFailover{} +} + +func (fmf *fakeMasterFailover) Failover(_ *v1alpha1.DMCluster) error { + return nil +} + +func (fmf *fakeMasterFailover) Recover(_ *v1alpha1.DMCluster) { +} + +func (fmf *fakeMasterFailover) RemoveUndesiredFailures(_ *v1alpha1.DMCluster) { +} diff --git a/pkg/manager/member/dm_master_member_manager.go b/pkg/manager/member/dm_master_member_manager.go index e9f222ce811..e4dc97ba559 100644 --- a/pkg/manager/member/dm_master_member_manager.go +++ b/pkg/manager/member/dm_master_member_manager.go @@ -236,16 +236,6 @@ func (mmm *masterMemberManager) syncMasterStatefulSetForDMCluster(dc *v1alpha1.D return controller.RequeueErrorf("DMCluster: [%s/%s], waiting for dm-master cluster running", ns, dcName) } - if !dc.Status.Master.Synced { - force := NeedForceUpgrade(dc.Annotations) - if force { - dc.Status.Master.Phase = v1alpha1.UpgradePhase - setUpgradePartition(newMasterSet, 0) - errSTS := updateStatefulSet(mmm.setControl, dc, newMasterSet, oldMasterSet) - return controller.RequeueErrorf("dmcluster: [%s/%s]'s dm-master needs force upgrade, %v", ns, dcName, errSTS) - } - } - // Scaling takes precedence over upgrading because: // - if a dm-master fails in the upgrading, users may want to delete it or add // new replicas @@ -268,6 +258,16 @@ func (mmm *masterMemberManager) syncMasterStatefulSetForDMCluster(dc *v1alpha1.D } } + if !dc.Status.Master.Synced { + force := NeedForceUpgrade(dc.Annotations) + if force { + dc.Status.Master.Phase = v1alpha1.UpgradePhase + setUpgradePartition(newMasterSet, 0) + errSTS := updateStatefulSet(mmm.setControl, dc, newMasterSet, oldMasterSet) + return controller.RequeueErrorf("dmcluster: [%s/%s]'s dm-master needs force upgrade, %v", ns, dcName, errSTS) + } + } + if !templateEqual(newMasterSet, oldMasterSet) || dc.Status.Master.Phase == v1alpha1.UpgradePhase { if err := mmm.masterUpgrader.Upgrade(dc, oldMasterSet, newMasterSet); err != nil { return err @@ -461,6 +461,9 @@ func (mmm *masterMemberManager) getNewMasterServiceForDMCluster(dc *v1alpha1.DMC if svcSpec.ClusterIP != nil { masterSvc.Spec.ClusterIP = *svcSpec.ClusterIP } + if svcSpec.PortName != nil { + masterSvc.Spec.Ports[0].Name = *svcSpec.PortName + } } return masterSvc } @@ -539,7 +542,10 @@ func getNewMasterSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( dcName := dc.Name baseMasterSpec := dc.BaseMasterSpec() instanceName := dc.GetInstanceName() - masterConfigMap := cm.Name + masterConfigMap := "" + if cm != nil { + masterConfigMap = cm.Name + } annMount, annVolume := annotationsMountVolume() volMounts := []corev1.VolumeMount{ @@ -716,10 +722,9 @@ func getNewMasterSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( } func getMasterConfigMap(dc *v1alpha1.DMCluster) (*corev1.ConfigMap, error) { - // For backward compatibility, only sync dm configmap when .master.config is non-nil config := dc.Spec.Master.Config if config == nil { - return nil, nil + config = &v1alpha1.MasterConfig{} } // override CA if tls enabled diff --git a/pkg/manager/member/dm_master_member_manager_test.go b/pkg/manager/member/dm_master_member_manager_test.go index 897a0cbc54d..a5d056aebfe 100644 --- a/pkg/manager/member/dm_master_member_manager_test.go +++ b/pkg/manager/member/dm_master_member_manager_test.go @@ -12,3 +12,2050 @@ // limitations under the License. package member + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + "github.com/pingcap/tidb-operator/pkg/label" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" +) + +func TestMasterMemberManagerSyncCreate(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + prepare func(cluster *v1alpha1.DMCluster) + errWhenCreateStatefulSet bool + errWhenCreateMasterService bool + errWhenCreateMasterPeerService bool + errExpectFn func(*GomegaWithT, error) + masterSvcCreated bool + masterPeerSvcCreated bool + setCreated bool + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + oldSpec := dc.Spec + if test.prepare != nil { + test.prepare(dc) + } + + mmm, fakeSetControl, fakeSvcControl, _, _, _, _ := newFakeMasterMemberManager() + + if test.errWhenCreateStatefulSet { + fakeSetControl.SetCreateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenCreateMasterService { + fakeSvcControl.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenCreateMasterPeerService { + fakeSvcControl.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 1) + } + + err := mmm.SyncDM(dc) + test.errExpectFn(g, err) + g.Expect(dc.Spec).To(Equal(oldSpec)) + + svc1, err := mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + eps1, eperr := mmm.epsLister.Endpoints(ns).Get(controller.DMMasterMemberName(dcName)) + if test.masterSvcCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc1).NotTo(Equal(nil)) + g.Expect(eperr).NotTo(HaveOccurred()) + g.Expect(eps1).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + expectErrIsNotFound(g, eperr) + } + + svc2, err := mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + eps2, eperr := mmm.epsLister.Endpoints(ns).Get(controller.DMMasterPeerMemberName(dcName)) + if test.masterPeerSvcCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc2).NotTo(Equal(nil)) + g.Expect(eperr).NotTo(HaveOccurred()) + g.Expect(eps2).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + expectErrIsNotFound(g, eperr) + } + + dc1, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + if test.setCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(dc1).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + } + } + + tests := []testcase{ + { + name: "normal", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: false, + errExpectFn: errExpectRequeue, + masterSvcCreated: true, + masterPeerSvcCreated: true, + setCreated: true, + }, + { + name: "error when create statefulset", + prepare: nil, + errWhenCreateStatefulSet: true, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: true, + masterPeerSvcCreated: true, + setCreated: false, + }, + { + name: "error when create dm-master service", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: true, + errWhenCreateMasterPeerService: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: false, + masterPeerSvcCreated: false, + setCreated: false, + }, + { + name: "error when create dm-master peer service", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: true, + masterPeerSvcCreated: false, + setCreated: false, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestMasterMemberManagerSyncUpdate(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + errWhenUpdateStatefulSet bool + errWhenUpdateMasterService bool + errWhenUpdateMasterPeerService bool + errWhenGetLeader bool + errWhenGetMasterInfos bool + statusChange func(*apps.StatefulSet) + err bool + expectMasterServiceFn func(*GomegaWithT, *corev1.Service, error) + expectMasterPeerServiceFn func(*GomegaWithT, *corev1.Service, error) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, fakeSvcControl, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + if test.errWhenGetMasterInfos { + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("failed to get master infos of dm-master cluster") + }) + } else { + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + } + if test.errWhenGetLeader { + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("failed to get leader info of dm-master cluster") + }) + } else { + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + } + + if test.statusChange == nil { + fakeSetControl.SetStatusChange(func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }) + } else { + fakeSetControl.SetStatusChange(test.statusChange) + } + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.epsLister.Endpoints(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.epsLister.Endpoints(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + if test.errWhenUpdateMasterService { + fakeSvcControl.SetUpdateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenUpdateStatefulSet { + fakeSetControl.SetUpdateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + err = mmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectMasterServiceFn != nil { + svc, err := mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectMasterServiceFn(g, svc, err) + } + if test.expectMasterPeerServiceFn != nil { + svc, err := mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + test.expectMasterPeerServiceFn(g, svc, err) + } + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + + tests := []testcase{ + { + name: "normal", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + masterNodePort := 30160 + dc.Spec.Master.Service = &v1alpha1.MasterServiceSpec{MasterNodePort: &masterNodePort} + dc.Spec.Master.Service.Type = corev1.ServiceTypeNodePort + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:2379", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: false, + expectMasterServiceFn: func(g *GomegaWithT, svc *corev1.Service, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + }, + expectMasterPeerServiceFn: func(g *GomegaWithT, svc *corev1.Service, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(dc.Status.Master.StatefulSet.ObservedGeneration).To(Equal(int64(1))) + g.Expect(len(dc.Status.Master.Members)).To(Equal(3)) + g.Expect(dc.Status.Master.Members["master1"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master2"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master3"].Health).To(Equal(false)) + }, + }, + { + name: "error when update dm-master service", + modify: func(dc *v1alpha1.DMCluster) { + masterNodePort := 30160 + dc.Spec.Master.Service = &v1alpha1.MasterServiceSpec{MasterNodePort: &masterNodePort} + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: true, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: true, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: nil, + }, + { + name: "error when update statefulset", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: true, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: true, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "error when get dm-master leader", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: true, + errWhenGetMasterInfos: false, + err: false, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Synced).To(BeFalse()) + g.Expect(dc.Status.Master.Members).To(BeNil()) + }, + }, + { + name: "error when sync dm-master infos", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: true, + err: false, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Synced).To(BeFalse()) + g.Expect(dc.Status.Master.Members).To(BeNil()) + }, + }, + } + + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterMemberManagerMasterStatefulSetIsUpgrading(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + setUpdate func(*apps.StatefulSet) + hasPod bool + updatePod func(*corev1.Pod) + errExpectFn func(*GomegaWithT, error) + expectUpgrading bool + } + testFn := func(test *testcase, t *testing.T) { + mmm, _, _, _, podIndexer, _, _ := newFakeMasterMemberManager() + dc := newDMClusterForMaster() + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ + UpdateRevision: "v3", + } + + set := &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + }, + } + if test.setUpdate != nil { + test.setUpdate(set) + } + + if test.hasPod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMMaster().Labels(), + }, + } + if test.updatePod != nil { + test.updatePod(pod) + } + podIndexer.Add(pod) + } + b, err := mmm.masterStatefulSetIsUpgrading(set, dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + if test.expectUpgrading { + g.Expect(b).To(BeTrue()) + } else { + g.Expect(b).NotTo(BeTrue()) + } + } + tests := []testcase{ + { + name: "stateful set is upgrading", + setUpdate: func(set *apps.StatefulSet) { + set.Status.CurrentRevision = "v1" + set.Status.UpdateRevision = "v2" + set.Status.ObservedGeneration = 1000 + }, + hasPod: false, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod don't have revision hash", + setUpdate: nil, + hasPod: true, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: false, + }, + { + name: "pod have revision hash, not equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[apps.ControllerRevisionHashLabelKey] = "v2" + }, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod have revision hash, equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[apps.ControllerRevisionHashLabelKey] = "v3" + }, + errExpectFn: nil, + expectUpgrading: false, + }, + } + + for i := range tests { + t.Logf(tests[i].name) + testFn(&tests[i], t) + } +} + +func TestMasterMemberManagerUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + err bool + statusChange func(*apps.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, _, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + fakeSetControl.SetStatusChange(test.statusChange) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + err = mmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + tests := []testcase{ + { + name: "upgrade successful", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: false, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(len(dc.Status.Master.Members)).To(Equal(3)) + g.Expect(dc.Status.Master.Members["master1"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master2"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master3"].Health).To(Equal(false)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterMemberManagerSyncMasterSts(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + err bool + statusChange func(*apps.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, _, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + fakeSetControl.SetStatusChange(test.statusChange) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + test.modify(dc) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("cannot get leader") + }) + err = mmm.syncMasterStatefulSetForDMCluster(dc) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc) + } + } + tests := []testcase{ + { + name: "force upgrade", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + cluster.Spec.Master.Replicas = 1 + cluster.ObjectMeta.Annotations = make(map[string]string) + cluster.ObjectMeta.Annotations["tidb.pingcap.com/force-upgrade"] = "true" + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: true, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + // scale in one pd from 3 -> 2 + g.Expect(*set.Spec.Replicas).To(Equal(int32(2))) + g.Expect(*set.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(0))) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + }, + }, + { + name: "non force upgrade", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + cluster.Spec.Master.Replicas = 1 + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: true, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image:v2.0.0-rc.2")) + g.Expect(*set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(*set.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(3))) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.ScalePhase)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func newFakeMasterMemberManager() (*masterMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *dmapi.FakeMasterControl, cache.Indexer, cache.Indexer, *controller.FakePodControl) { + kubeCli := kubefake.NewSimpleClientset() + setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() + svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() + podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() + epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() + pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) + podControl := controller.NewFakePodControl(podInformer) + masterControl := dmapi.NewFakeMasterControl(kubeCli) + masterScaler := NewFakeMasterScaler() + autoFailover := true + masterFailover := NewFakeMasterFailover() + masterUpgrader := NewFakeMasterUpgrader() + genericControll := controller.NewFakeGenericControl() + + return &masterMemberManager{ + masterControl, + setControl, + svcControl, + controller.NewTypedControl(genericControll), + setInformer.Lister(), + svcInformer.Lister(), + podInformer.Lister(), + epsInformer.Lister(), + pvcInformer.Lister(), + masterScaler, + masterUpgrader, + autoFailover, + masterFailover, + }, setControl, svcControl, masterControl, podInformer.Informer().GetIndexer(), pvcInformer.Informer().GetIndexer(), podControl +} + +func newDMClusterForMaster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + BaseImage: "dm-test-image", + StorageSize: "100Gi", + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "dm-test-image", + StorageSize: "100Gi", + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + } +} + +func TestGetNewMasterHeadlessServiceForDMCluster(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master-peer", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "peer", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "dm-master-peer", + Port: 8291, + TargetPort: intstr.FromInt(8291), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := getNewMasterHeadlessServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewMasterSetForDMCluster(t *testing.T) { + enable := true + tests := []struct { + name string + dc v1alpha1.DMCluster + wantErr bool + testSts func(sts *apps.StatefulSet) + }{ + { + name: "dm-master network is not host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-master network is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, true, v1.DNSClusterFirstWithHostNet), + }, + { + name: "dm-master network is not host when dm-worker is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-master should respect resources config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + }, + StorageSize: "100Gi", + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(sts.Spec.VolumeClaimTemplates[0].Spec.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + })) + nameToContainer := MapContainers(&sts.Spec.Template.Spec) + masterContainer := nameToContainer[v1alpha1.DMMasterMemberType.String()] + g.Expect(masterContainer.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + })) + }, + }, + { + name: "set custom env", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Env: []corev1.EnvVar{ + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + { + Name: "TZ", + Value: "ignored", + }, + }, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testContainerEnv(t, []corev1.EnvVar{ + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "PEER_SERVICE_NAME", + Value: "dc-dm-master-peer", + }, + { + Name: "SERVICE_NAME", + Value: "dc-dm-master", + }, + { + Name: "SET_NAME", + Value: "dc-dm-master", + }, + { + Name: "TZ", + Value: "UTC", + }, + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + }, + v1alpha1.DMMasterMemberType, + ), + }, + { + name: "dm version nightly, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-nightly", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(hasClusterTLSVol(sts, "dm-master-tls")).To(BeTrue()) + g.Expect(hasClusterVolMount(sts, v1alpha1.DMMasterMemberType)).To(BeTrue()) + }, + }, + { + name: "dmcluster with failureMember nonDeleted", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "test": { + MemberDeleted: false, + }, + }, + }, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(3))) + }, + }, + { + name: "dmcluster with failureMember Deleted", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "test": { + MemberDeleted: true, + }, + }, + }, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(4))) + }, + }, + { + name: "dm-master additional containers", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalContainers: []corev1.Container{customSideCarContainers[0]}, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testAdditionalContainers(t, []corev1.Container{customSideCarContainers[0]}), + }, + { + name: "dm-master additional volumes", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalVolumes: []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testAdditionalVolumes(t, []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}), + }, + // TODO add more tests + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sts, err := getNewMasterSetForDMCluster(&tt.dc, nil) + if (err != nil) != tt.wantErr { + t.Fatalf("error %v, wantErr %v", err, tt.wantErr) + } + tt.testSts(sts) + }) + } +} + +func TestGetMasterConfigMap(t *testing.T) { + g := NewGomegaWithT(t) + testCases := []struct { + name string + dc v1alpha1.DMCluster + expected *corev1.ConfigMap + }{ + { + name: "dm-master config is nil", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": "", + }, + }, + }, + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Config: &v1alpha1.MasterConfig{ + LogLevel: pointer.StringPtr("debug"), + RPCTimeoutStr: pointer.StringPtr("40s"), + RPCRateLimit: pointer.Float64Ptr(15), + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": `log-level = "debug" +rpc-timeout = "40s" +rpc-rate-limit = 15.0 +`, + }, + }, + }, + { + name: "dm version v2.0.0-rc.2, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-v2", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + }, + Worker: &v1alpha1.WorkerSpec{}, + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + Version: "v2.0.0-rc.2", + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-v2-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "tls-v2", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "tls-v2", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": `ssl-ca = "/var/lib/dm-master-tls/ca.crt" +ssl-cert = "/var/lib/dm-master-tls/tls.crt" +ssl-key = "/var/lib/dm-master-tls/tls.key" +`, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + cm, err := getMasterConfigMap(&tt.dc) + g.Expect(err).To(Succeed()) + // startup-script is better to be tested in e2e + tt.expected.Data["startup-script"] = cm.Data["startup-script"] + g.Expect(AddConfigMapDigestSuffix(tt.expected)).To(Succeed()) + if diff := cmp.Diff(*tt.expected, *cm); diff != "" { + t.Errorf("unexpected plugin configuration (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewMasterServiceForDMCluster(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify ClusterIP type,clusterIP", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ServiceSpec: v1alpha1.ServiceSpec{ClusterIP: pointer.StringPtr("172.20.10.1")}}, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify LoadBalancerIP type, LoadBalancerType", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerIP: pointer.StringPtr("172.20.10.1"), + }}, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + LoadBalancerIP: "172.20.10.1", + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify dm-master service NodePort", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + ClusterIP: pointer.StringPtr("172.20.10.1"), + }, + MasterNodePort: intPtr(30020), + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + NodePort: 30020, + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify dm-master service portname", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: pointer.StringPtr("172.20.10.1"), + PortName: pointer.StringPtr("http-dm-master"), + }, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "http-dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mmm, _, _, _, _, _, _ := newFakeMasterMemberManager() + svc := mmm.getNewMasterServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestMasterMemberManagerSyncMasterStsWhenMasterNotJoinCluster(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + dcStatusChange func(cluster *v1alpha1.DMCluster) + err bool + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, _, _, fakeMasterControl, podIndexer, pvcIndexer, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + if test.dcStatusChange != nil { + test.dcStatusChange(dc) + } + test.modify(dc, podIndexer, pvcIndexer) + err = mmm.syncMasterStatefulSetForDMCluster(dc) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc) + } + } + tests := []testcase{ + { + name: "add dm-master unjoin cluster member info", + modify: func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) { + for ordinal := 0; ordinal < 3; ordinal++ { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, cluster.GetName(), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + podIndexer.Add(pod) + } + for ordinal := 0; ordinal < 3; ordinal++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(cluster.GetName()), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + pvcIndexer.Add(pvc) + } + + }, + leaderInfo: dmapi.MembersLeader{ + Name: "test-dm-master-0", + Addr: "http://test-dm-master-0:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "test-dm-master-0", MemberID: "1", ClientURLs: []string{"http://test-dm-master-0:8261"}, Alive: false}, + {Name: "test-dm-master-1", MemberID: "2", ClientURLs: []string{"http://test-dm-master-1:8261"}, Alive: false}, + }, + err: false, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.UnjoinedMembers["test-dm-master-2"]).NotTo(BeNil()) + }, + }, + { + name: "clear unjoin cluster member info when the member join the cluster", + dcStatusChange: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.UnjoinedMembers = map[string]v1alpha1.UnjoinedMember{ + "test-dm-master-0": { + PodName: "test-dm-master-0", + CreatedAt: metav1.Now(), + }, + } + }, + modify: func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) { + for ordinal := 0; ordinal < 3; ordinal++ { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, cluster.GetName(), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + podIndexer.Add(pod) + } + for ordinal := 0; ordinal < 3; ordinal++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(cluster.GetName()), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + pvcIndexer.Add(pvc) + } + }, + leaderInfo: dmapi.MembersLeader{ + Name: "test-dm-master-0", + Addr: "http://test-dm-master-0:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "test-dm-master-0", MemberID: "1", ClientURLs: []string{"http://test-dm-master-0:8261"}, Alive: false}, + {Name: "test-dm-master-1", MemberID: "2", ClientURLs: []string{"http://test-dm-master-1:8261"}, Alive: false}, + {Name: "test-dm-master-2", MemberID: "3", ClientURLs: []string{"http://test-dm-master-2:8261"}, Alive: false}, + }, + err: false, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.UnjoinedMembers).To(BeEmpty()) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterShouldRecover(t *testing.T) { + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-0", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-1", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + } + podsWithFailover := append(pods, &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-2", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }) + tests := []struct { + name string + dc *v1alpha1.DMCluster + pods []*v1.Pod + want bool + }{ + { + name: "should not recover if no failure members", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Status: v1alpha1.DMClusterStatus{}, + }, + pods: pods, + want: false, + }, + { + name: "should not recover if a member is not healthy", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: false, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: pods, + want: false, + }, + { + name: "should recover if all members are ready and healthy", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: true, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: pods, + want: true, + }, + { + name: "should recover if all members are ready and healthy (ignore auto-created failover pods)", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: true, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + "failover-dm-master-2": { + Name: "failover-dm-master-1", + Health: false, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: podsWithFailover, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client := kubefake.NewSimpleClientset() + for _, pod := range tt.pods { + client.CoreV1().Pods(pod.Namespace).Create(pod) + } + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(client, 0) + podLister := kubeInformerFactory.Core().V1().Pods().Lister() + kubeInformerFactory.Start(ctx.Done()) + kubeInformerFactory.WaitForCacheSync(ctx.Done()) + masterMemberManager := &masterMemberManager{podLister: podLister} + got := masterMemberManager.shouldRecover(tt.dc) + if got != tt.want { + t.Fatalf("wants %v, got %v", tt.want, got) + } + }) + } +} + +func intPtr(i int) *int { + return &i +} + +func hasClusterTLSVol(sts *apps.StatefulSet, volName string) bool { + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == volName { + return true + } + } + return false +} + +func hasClusterVolMount(sts *apps.StatefulSet, memberType v1alpha1.MemberType) bool { + var vmName string + switch memberType { + case v1alpha1.DMMasterMemberType: + vmName = "dm-master-tls" + case v1alpha1.DMWorkerMemberType: + vmName = "dm-worker-tls" + default: + return false + } + for _, container := range sts.Spec.Template.Spec.Containers { + if container.Name == memberType.String() { + for _, vm := range container.VolumeMounts { + if vm.Name == vmName { + return true + } + } + } + } + return false +} diff --git a/pkg/manager/member/dm_master_scaler.go b/pkg/manager/member/dm_master_scaler.go index a371c880889..db47735bb4c 100644 --- a/pkg/manager/member/dm_master_scaler.go +++ b/pkg/manager/member/dm_master_scaler.go @@ -177,3 +177,33 @@ func (msd *masterScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, n func (msd *masterScaler) SyncAutoScalerAnn(meta metav1.Object, oldSet *apps.StatefulSet) error { return nil } + +type fakeMasterScaler struct{} + +// NewFakeMasterScaler returns a fake Scaler +func NewFakeMasterScaler() Scaler { + return &fakeMasterScaler{} +} + +func (fms *fakeMasterScaler) Scale(meta metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + if *newSet.Spec.Replicas > *oldSet.Spec.Replicas { + return fms.ScaleOut(meta, oldSet, newSet) + } else if *newSet.Spec.Replicas < *oldSet.Spec.Replicas { + return fms.ScaleIn(meta, oldSet, newSet) + } + return nil +} + +func (fms *fakeMasterScaler) ScaleOut(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas+1, nil) + return nil +} + +func (fms *fakeMasterScaler) ScaleIn(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil) + return nil +} + +func (fms *fakeMasterScaler) SyncAutoScalerAnn(dc metav1.Object, actual *apps.StatefulSet) error { + return nil +} diff --git a/pkg/manager/member/dm_worker_failover.go b/pkg/manager/member/dm_worker_failover.go index 5d4da53c172..a05cb522b56 100644 --- a/pkg/manager/member/dm_worker_failover.go +++ b/pkg/manager/member/dm_worker_failover.go @@ -97,3 +97,20 @@ func (wf *workerFailover) RemoveUndesiredFailures(dc *v1alpha1.DMCluster) { } } } + +type fakeWorkerFailover struct{} + +// NewFakeMasterFailover returns a fake Failover +func NewFakeWorkerFailover() DMFailover { + return &fakeWorkerFailover{} +} + +func (fwf *fakeWorkerFailover) Failover(_ *v1alpha1.DMCluster) error { + return nil +} + +func (fwf *fakeWorkerFailover) Recover(_ *v1alpha1.DMCluster) { +} + +func (fwf *fakeWorkerFailover) RemoveUndesiredFailures(_ *v1alpha1.DMCluster) { +} diff --git a/pkg/manager/member/dm_worker_member_manager.go b/pkg/manager/member/dm_worker_member_manager.go index cc86f4fc8ed..a51733bcd1b 100644 --- a/pkg/manager/member/dm_worker_member_manager.go +++ b/pkg/manager/member/dm_worker_member_manager.go @@ -272,7 +272,7 @@ func (wmm *workerMemberManager) syncDMClusterStatus(dc *v1alpha1.DMCluster, set workersInfo, err := dmClient.GetWorkers() if err != nil { - dc.Status.Master.Synced = false + dc.Status.Worker.Synced = false return err } @@ -360,7 +360,10 @@ func getNewWorkerSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( dcName := dc.Name baseWorkerSpec := dc.BaseWorkerSpec() instanceName := dc.GetInstanceName() - workerConfigMap := cm.Name + workerConfigMap := "" + if cm != nil { + workerConfigMap = cm.Name + } annMount, annVolume := annotationsMountVolume() volMounts := []corev1.VolumeMount{ @@ -529,10 +532,9 @@ func getNewWorkerSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( } func getWorkerConfigMap(dc *v1alpha1.DMCluster) (*corev1.ConfigMap, error) { - // For backward compatibility, only sync dm configmap when .worker.config is non-nil config := dc.Spec.Worker.Config if config == nil { - return nil, nil + config = &v1alpha1.WorkerConfig{} } // override CA if tls enabled diff --git a/pkg/manager/member/dm_worker_member_manager_test.go b/pkg/manager/member/dm_worker_member_manager_test.go new file mode 100644 index 00000000000..e537796e4b8 --- /dev/null +++ b/pkg/manager/member/dm_worker_member_manager_test.go @@ -0,0 +1,1213 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/pingcap/tidb-operator/pkg/label" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/intstr" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestWorkerMemberManagerSyncCreate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + svc *corev1.Service + getSvc error + set *appsv1.StatefulSet + getSet error + cm *corev1.ConfigMap + getCm error + } + + type testcase struct { + name string + prepare func(cluster *v1alpha1.DMCluster) + errOnCreateSet bool + errOnCreateCm bool + errOnCreateSvc bool + expectFn func(*GomegaWithT, *result) + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + if test.prepare != nil { + test.prepare(dc) + } + + wmm, ctls, _, _ := newFakeWorkerMemberManager() + + if test.errOnCreateSet { + ctls.set.SetCreateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnCreateSvc { + ctls.svc.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnCreateCm { + ctls.generic.SetCreateOrUpdateError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + syncErr := wmm.SyncDM(dc) + svc, getSvcErr := wmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + set, getStsErr := wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + + cmName := controller.DMWorkerMemberName(dcName) + if dc.Spec.Worker != nil { + cmGen, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + cmName = cmGen.Name + g.Expect(strings.HasPrefix(cmName, controller.DMWorkerMemberName(dcName))).To(BeTrue()) + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: cmName}} + key, err := client.ObjectKeyFromObject(cm) + g.Expect(err).To(Succeed()) + getCmErr := ctls.generic.FakeCli.Get(context.TODO(), key, cm) + result := result{syncErr, svc, getSvcErr, set, getStsErr, cm, getCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.getCm).To(Succeed()) + g.Expect(r.getSet).To(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + { + name: "do not sync if dm-worker spec is nil", + prepare: func(dc *v1alpha1.DMCluster) { + dc.Spec.Worker = nil + }, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getSvc).NotTo(Succeed()) + }, + }, + { + name: "error when create dm-worker statefulset", + prepare: nil, + errOnCreateSet: true, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).To(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + { + name: "error when create dm-worker peer service", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: true, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSvc).NotTo(Succeed()) + }, + }, + { + name: "error when create dm-worker configmap", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: true, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +func TestWorkerMemberManagerSyncUpdate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + oldSvc *corev1.Service + svc *corev1.Service + getSvc error + oldSet *appsv1.StatefulSet + set *appsv1.StatefulSet + getSet error + oldCm *corev1.ConfigMap + cm *corev1.ConfigMap + getCm error + } + type testcase struct { + name string + prepare func(*v1alpha1.DMCluster, *workerFakeIndexers) + errOnUpdateSet bool + errOnUpdateCm bool + errOnUpdateSvc bool + expectFn func(*GomegaWithT, *result) + workerInfos []*dmapi.WorkersInfo + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + mmm, ctls, indexers, fakeMasterControl := newFakeWorkerMemberManager() + + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + if test.errOnUpdateSet { + ctls.set.SetUpdateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnUpdateSvc { + ctls.svc.SetUpdateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnUpdateCm { + ctls.generic.SetCreateOrUpdateError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + oldCm, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + oldSvc := getNewWorkerHeadlessServiceForDMCluster(dc) + oldSvc.Spec.Ports[0].Port = 8888 + oldSet, err := getNewWorkerSetForDMCluster(dc, oldCm) + g.Expect(err).To(Succeed()) + + g.Expect(indexers.set.Add(oldSet)).To(Succeed()) + g.Expect(indexers.svc.Add(oldSvc)).To(Succeed()) + + g.Expect(ctls.generic.AddObject(oldCm)).To(Succeed()) + + if test.prepare != nil { + test.prepare(dc, indexers) + } + + syncErr := mmm.SyncDM(dc) + svc, getSvcErr := mmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + set, getStsErr := mmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + + cmName := controller.DMWorkerMemberName(dcName) + if dc.Spec.Worker != nil { + cmGen, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + cmName = cmGen.Name + g.Expect(strings.HasPrefix(cmName, controller.DMWorkerMemberName(dcName))).To(BeTrue()) + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: cmName}} + key, err := client.ObjectKeyFromObject(cm) + g.Expect(err).To(Succeed()) + getCmErr := ctls.generic.FakeCli.Get(context.TODO(), key, cm) + result := result{syncErr, oldSvc, svc, getSvcErr, oldSet, set, getStsErr, oldCm, cm, getCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: false, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(4))) + }, + workerInfos: nil, + }, + { + name: "error on update configmap", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: true, + errOnUpdateSvc: false, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + { + name: "error on update service", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: true, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).To(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + { + name: "error on update statefulset", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: false, + errOnUpdateSet: true, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +func TestWorkerMemberManagerWorkerStatefulSetIsUpgrading(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + setUpdate func(*appsv1.StatefulSet) + hasPod bool + updatePod func(*corev1.Pod) + errExpectFn func(*GomegaWithT, error) + expectUpgrading bool + } + testFn := func(test *testcase, t *testing.T) { + mmm, _, indexers, _ := newFakeWorkerMemberManager() + dc := newDMClusterForWorker() + dc.Status.Worker.StatefulSet = &appsv1.StatefulSetStatus{ + UpdateRevision: "v3", + } + + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + }, + } + if test.setUpdate != nil { + test.setUpdate(set) + } + + if test.hasPod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMWorkerMemberType, dc.GetName(), 0), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMWorker().Labels(), + }, + } + if test.updatePod != nil { + test.updatePod(pod) + } + indexers.pod.Add(pod) + } + b, err := mmm.workerStatefulSetIsUpgrading(set, dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + if test.expectUpgrading { + g.Expect(b).To(BeTrue()) + } else { + g.Expect(b).NotTo(BeTrue()) + } + } + tests := []testcase{ + { + name: "stateful set is upgrading", + setUpdate: func(set *appsv1.StatefulSet) { + set.Status.CurrentRevision = "v1" + set.Status.UpdateRevision = "v2" + set.Status.ObservedGeneration = 1000 + }, + hasPod: false, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod don't have revision hash", + setUpdate: nil, + hasPod: true, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: false, + }, + { + name: "pod have revision hash, not equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[appsv1.ControllerRevisionHashLabelKey] = "v2" + }, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod have revision hash, equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[appsv1.ControllerRevisionHashLabelKey] = "v3" + }, + errExpectFn: nil, + expectUpgrading: false, + }, + } + + for i := range tests { + t.Logf(tests[i].name) + testFn(&tests[i], t) + } +} + +func TestWorkerMemberManagerUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + workerInfos []*dmapi.WorkersInfo + err bool + statusChange func(*appsv1.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *appsv1.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + wmm, ctls, _, fakeMasterControl := newFakeWorkerMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + ctls.set.SetStatusChange(test.statusChange) + + err := wmm.SyncDM(dc) + g.Expect(err).To(Succeed()) + + _, err = wmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + err = wmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + tests := []testcase{ + { + name: "upgrade successful", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Worker.BaseImage = "dm-test-image-2" + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateBound, Source: "mysql1"}, + }, + err: false, + statusChange: func(set *appsv1.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-worker-1" + set.Status.UpdateRevision = "dm-worker-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *appsv1.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(len(dc.Status.Worker.Members)).To(Equal(3)) + g.Expect(dc.Status.Worker.Members["worker1"].Stage).To(Equal(v1alpha1.DMWorkerStateFree)) + g.Expect(dc.Status.Worker.Members["worker2"].Stage).To(Equal(v1alpha1.DMWorkerStateFree)) + g.Expect(dc.Status.Worker.Members["worker3"].Stage).To(Equal(v1alpha1.DMWorkerStateBound)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestWorkerSyncConfigUpdate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + oldSet *appsv1.StatefulSet + set *appsv1.StatefulSet + getSet error + oldCm *corev1.ConfigMap + cms []corev1.ConfigMap + listCm error + } + type testcase struct { + name string + prepare func(*v1alpha1.DMCluster, *workerFakeIndexers) + expectFn func(*GomegaWithT, *result) + workerInfos []*dmapi.WorkersInfo + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + mmm, controls, indexers, fakeMasterControl := newFakeWorkerMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + oldCm, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + oldSvc := getNewWorkerHeadlessServiceForDMCluster(dc) + oldSvc.Spec.Ports[0].Port = 8888 + oldSet, err := getNewWorkerSetForDMCluster(dc, oldCm) + g.Expect(err).To(Succeed()) + + g.Expect(indexers.set.Add(oldSet)).To(Succeed()) + g.Expect(indexers.svc.Add(oldSvc)).To(Succeed()) + g.Expect(controls.generic.AddObject(oldCm)).To(Succeed()) + + if test.prepare != nil { + test.prepare(dc, indexers) + } + + syncErr := mmm.SyncDM(dc) + set, getStsErr := mmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + cmList := &corev1.ConfigMapList{} + g.Expect(err).To(Succeed()) + listCmErr := controls.generic.FakeCli.List(context.TODO(), cmList) + result := result{syncErr, oldSet, set, getStsErr, oldCm, cmList.Items, listCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: func(tc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + tc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + }, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.listCm).To(Succeed()) + g.Expect(r.cms).To(HaveLen(2)) + g.Expect(r.getSet).To(Succeed()) + using := FindConfigMapVolume(&r.set.Spec.Template.Spec, func(name string) bool { + return strings.HasPrefix(name, controller.DMWorkerMemberName("test")) + }) + g.Expect(using).NotTo(BeEmpty()) + var usingCm *corev1.ConfigMap + for _, cm := range r.cms { + if cm.Name == using { + usingCm = &cm + } + } + g.Expect(usingCm).NotTo(BeNil(), "The configmap used by statefulset must be created") + g.Expect(usingCm.Data["config-file"]).To(ContainSubstring("keepalive-ttl"), + "The configmap used by statefulset should be the latest one") + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +type workerFakeIndexers struct { + svc cache.Indexer + set cache.Indexer + pod cache.Indexer +} + +type workerFakeControls struct { + svc *controller.FakeServiceControl + set *controller.FakeStatefulSetControl + generic *controller.FakeGenericControl +} + +func newFakeWorkerMemberManager() (*workerMemberManager, *workerFakeControls, *workerFakeIndexers, *dmapi.FakeMasterControl) { + // cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() + // dcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().DMClusters() + svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() + epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() + podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) + genericControl := controller.NewFakeGenericControl() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + workerScaler := NewFakeWorkerScaler() + autoFailover := true + workerFailover := NewFakeWorkerFailover() + pmm := &workerMemberManager{ + masterControl, + setControl, + svcControl, + controller.NewTypedControl(genericControl), + setInformer.Lister(), + svcInformer.Lister(), + podInformer.Lister(), + workerScaler, + autoFailover, + workerFailover, + } + controls := &workerFakeControls{ + svc: svcControl, + set: setControl, + generic: genericControl, + } + indexers := &workerFakeIndexers{ + svc: svcInformer.Informer().GetIndexer(), + set: setInformer.Informer().GetIndexer(), + pod: podInformer.Informer().GetIndexer(), + } + + return pmm, controls, indexers, masterControl +} + +func newDMClusterForWorker() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Master: v1alpha1.MasterSpec{ + BaseImage: "dm-test-image", + Replicas: 1, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "dm-test-image", + Replicas: 3, + Config: &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("debug"), + KeepAliveTTL: pointer.Int64Ptr(15), + }, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + }, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Synced: true, + Members: map[string]v1alpha1.MasterMember{"test-dm-master-0": { + Name: "test-dm-master-0", + Health: true, + }}, + StatefulSet: &appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + }, + } +} + +func TestGetNewWorkerHeadlessService(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker-peer", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "dm-worker", + Port: 8262, + TargetPort: intstr.FromInt(8262), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := getNewWorkerHeadlessServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewWorkerSetForDMCluster(t *testing.T) { + enable := true + tests := []struct { + name string + dc v1alpha1.DMCluster + wantErr bool + testSts func(sts *appsv1.StatefulSet) + }{ + { + name: "dm-worker network is not host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-worker network is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + }, + }, + testSts: testHostNetwork(t, true, v1.DNSClusterFirstWithHostNet), + }, + { + name: "dm-worker network is not host when dm-master is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-worker should respect resources config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + }, + StorageSize: "100Gi", + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(sts.Spec.VolumeClaimTemplates[0].Spec.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + })) + nameToContainer := MapContainers(&sts.Spec.Template.Spec) + masterContainer := nameToContainer[v1alpha1.DMWorkerMemberType.String()] + g.Expect(masterContainer.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + })) + }, + }, + { + name: "set custom env", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Env: []corev1.EnvVar{ + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + { + Name: "TZ", + Value: "ignored", + }, + }, + }, + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: testContainerEnv(t, []corev1.EnvVar{ + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "CLUSTER_NAME", + Value: "dc", + }, + { + Name: "HEADLESS_SERVICE_NAME", + Value: "dc-dm-worker-peer", + }, + { + Name: "SET_NAME", + Value: "dc-dm-worker", + }, + { + Name: "TZ", + Value: "UTC", + }, + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + }, + v1alpha1.DMWorkerMemberType, + ), + }, + { + name: "dm version nightly, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-nightly", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + }, + Version: "nightly", + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(hasClusterTLSVol(sts, "dm-worker-tls")).To(BeTrue()) + g.Expect(hasClusterVolMount(sts, v1alpha1.DMWorkerMemberType)).To(BeTrue()) + }, + }, + { + name: "dmcluster worker with failureMember", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Worker: v1alpha1.WorkerStatus{ + FailureMembers: map[string]v1alpha1.WorkerFailureMember{ + "test": { + PodName: "test", + }, + }, + }, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(4))) + }, + }, + { + name: "dm-worker additional containers", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalContainers: []corev1.Container{customSideCarContainers[0]}, + }, + }, + }, + }, + testSts: testAdditionalContainers(t, []corev1.Container{customSideCarContainers[0]}), + }, + { + name: "dm-worker additional volumes", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalVolumes: []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + }, + }, + }, + }, + testSts: testAdditionalVolumes(t, []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}), + }, + // TODO add more tests + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sts, err := getNewWorkerSetForDMCluster(&tt.dc, nil) + if (err != nil) != tt.wantErr { + t.Fatalf("error %v, wantErr %v", err, tt.wantErr) + } + tt.testSts(sts) + }) + } +} + +func TestGetNewWorkerConfigMap(t *testing.T) { + g := NewGomegaWithT(t) + + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.ConfigMap + }{ + { + name: "empty config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + Config: nil, + }, + }, + }, + expected: corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "config-file": "", + "startup-script": "", + }, + }, + }, + { + name: "rolling update", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + Config: &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + }, + }, + }, + }, + expected: corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "config-file": `log-level = "info" +keepalive-ttl = 25 +`, + "startup-script": "", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm, err := getWorkerConfigMap(&tt.dc) + g.Expect(err).To(Succeed()) + g.Expect(strings.HasPrefix(cm.Name, "foo-dm-worker")).To(BeTrue()) + tt.expected.Name = cm.Name + // startup-script is better to be validated in e2e test + cm.Data["startup-script"] = "" + if diff := cmp.Diff(tt.expected, *cm); diff != "" { + t.Errorf("unexpected ConfigMap (-want, +got): %s", diff) + } + }) + } +} diff --git a/pkg/manager/member/dm_worker_scaler.go b/pkg/manager/member/dm_worker_scaler.go index 3fa3dda3c2f..91d9961b140 100644 --- a/pkg/manager/member/dm_worker_scaler.go +++ b/pkg/manager/member/dm_worker_scaler.go @@ -132,3 +132,33 @@ func (wsd *workerScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, n func (wsd *workerScaler) SyncAutoScalerAnn(meta metav1.Object, oldSet *apps.StatefulSet) error { return nil } + +type fakeWorkerScaler struct{} + +// NewFakeWorkerScaler returns a fake Scaler +func NewFakeWorkerScaler() Scaler { + return &fakeWorkerScaler{} +} + +func (fws *fakeWorkerScaler) Scale(meta metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + if *newSet.Spec.Replicas > *oldSet.Spec.Replicas { + return fws.ScaleOut(meta, oldSet, newSet) + } else if *newSet.Spec.Replicas < *oldSet.Spec.Replicas { + return fws.ScaleIn(meta, oldSet, newSet) + } + return nil +} + +func (fws *fakeWorkerScaler) ScaleOut(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas+1, nil) + return nil +} + +func (fws *fakeWorkerScaler) ScaleIn(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil) + return nil +} + +func (fws *fakeWorkerScaler) SyncAutoScalerAnn(dc metav1.Object, actual *apps.StatefulSet) error { + return nil +} diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index eab5ef822f6..7a91a8e86ae 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -740,16 +738,14 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { } func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDControl, cache.Indexer, cache.Indexer, *controller.FakePodControl) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podControl := controller.NewFakePodControl(podInformer) pdControl := pdapi.NewFakePDControl(kubeCli) pdScaler := NewFakePDScaler() @@ -917,11 +913,11 @@ func testAnnotations(t *testing.T, annotations map[string]string) func(sts *apps } } -func testPDContainerEnv(t *testing.T, env []corev1.EnvVar) func(sts *apps.StatefulSet) { +func testContainerEnv(t *testing.T, env []corev1.EnvVar, memberType v1alpha1.MemberType) func(sts *apps.StatefulSet) { return func(sts *apps.StatefulSet) { got := []corev1.EnvVar{} for _, c := range sts.Spec.Template.Spec.Containers { - if c.Name == v1alpha1.PDMemberType.String() { + if c.Name == memberType.String() { got = c.Env } } @@ -1112,7 +1108,7 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, }, }, - testSts: testPDContainerEnv(t, []corev1.EnvVar{ + testSts: testContainerEnv(t, []corev1.EnvVar{ { Name: "NAMESPACE", ValueFrom: &corev1.EnvVarSource{ @@ -1147,7 +1143,9 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { }, }, }, - }), + }, + v1alpha1.PDMemberType, + ), }, { name: "tidb version v3.1.0, tidb client tls is enabled", diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go index e1b081273d4..bf80529a1bf 100644 --- a/pkg/manager/member/pump_member_manager_test.go +++ b/pkg/manager/member/pump_member_manager_test.go @@ -444,8 +444,8 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) cmControl := controller.NewFakeConfigMapControl(cmInformer) genericControl := controller.NewFakeGenericControl() pmm := &pumpMemberManager{ diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index 9e10de2d820..5cfacd45527 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -802,8 +802,8 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) genericControl := controller.NewFakeGenericControl() tidbUpgrader := NewFakeTiDBUpgrader() tidbFailover := NewFakeTiDBFailover() diff --git a/pkg/manager/member/tiflash_member_manager_test.go b/pkg/manager/member/tiflash_member_manager_test.go index a784bc1fe7e..802ca51e02e 100644 --- a/pkg/manager/member/tiflash_member_manager_test.go +++ b/pkg/manager/member/tiflash_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -1127,16 +1125,14 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { func newFakeTiFlashMemberManager(tc *v1alpha1.TidbCluster) ( *tiflashMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDClient, cache.Indexer, cache.Indexer) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() pdControl := pdapi.NewFakePDControl(kubeCli) pdClient := controller.NewFakePDClient(pdControl, tc) setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() nodeInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Nodes() tiflashScaler := NewFakeTiFlashScaler() diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index 3ce97dcd514..3e1fa36f24d 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -1481,16 +1479,14 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { func newFakeTiKVMemberManager(tc *v1alpha1.TidbCluster) ( *tikvMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDClient, cache.Indexer, cache.Indexer) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() pdControl := pdapi.NewFakePDControl(kubeCli) pdClient := controller.NewFakePDClient(pdControl, tc) setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() nodeInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Nodes() tikvScaler := NewFakeTiKVScaler() From 22358e8c25d505db41ea1386babde816f0ede143 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 24 Sep 2020 13:17:02 +0800 Subject: [PATCH 4/7] remove pod restarted --- pkg/controller/dmcluster/dm_cluster_control_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/controller/dmcluster/dm_cluster_control_test.go b/pkg/controller/dmcluster/dm_cluster_control_test.go index 5634a883f9e..18fb9317708 100644 --- a/pkg/controller/dmcluster/dm_cluster_control_test.go +++ b/pkg/controller/dmcluster/dm_cluster_control_test.go @@ -255,7 +255,6 @@ func newFakeDMClusterControl() ( reclaimPolicyManager := meta.NewFakeReclaimPolicyManager() orphanPodCleaner := mm.NewFakeOrphanPodsCleaner() pvcCleaner := mm.NewFakePVCCleaner() - podRestarter := mm.NewFakePodRestarter() pvcResizer := mm.NewFakePVCResizer() control := NewDefaultDMClusterControl( dcControl, @@ -265,7 +264,6 @@ func newFakeDMClusterControl() ( orphanPodCleaner, pvcCleaner, pvcResizer, - podRestarter, &dmClusterConditionUpdater{}, recorder, ) From a47253de31535d3ef3c6126583673abd73c0d125 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 24 Sep 2020 13:51:46 +0800 Subject: [PATCH 5/7] fix lint --- .../member/dm_master_member_manager_test.go | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/pkg/manager/member/dm_master_member_manager_test.go b/pkg/manager/member/dm_master_member_manager_test.go index a5d056aebfe..ee6907055ff 100644 --- a/pkg/manager/member/dm_master_member_manager_test.go +++ b/pkg/manager/member/dm_master_member_manager_test.go @@ -176,21 +176,20 @@ func TestMasterMemberManagerSyncCreate(t *testing.T) { func TestMasterMemberManagerSyncUpdate(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { - name string - modify func(cluster *v1alpha1.DMCluster) - leaderInfo dmapi.MembersLeader - masterInfos []*dmapi.MastersInfo - errWhenUpdateStatefulSet bool - errWhenUpdateMasterService bool - errWhenUpdateMasterPeerService bool - errWhenGetLeader bool - errWhenGetMasterInfos bool - statusChange func(*apps.StatefulSet) - err bool - expectMasterServiceFn func(*GomegaWithT, *corev1.Service, error) - expectMasterPeerServiceFn func(*GomegaWithT, *corev1.Service, error) - expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) - expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + errWhenUpdateStatefulSet bool + errWhenUpdateMasterService bool + errWhenGetLeader bool + errWhenGetMasterInfos bool + statusChange func(*apps.StatefulSet) + err bool + expectMasterServiceFn func(*GomegaWithT, *corev1.Service, error) + expectMasterPeerServiceFn func(*GomegaWithT, *corev1.Service, error) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) } testFn := func(test *testcase, t *testing.T) { From 3cd855702820602e74d62c48417694b6c552c6f2 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Sun, 27 Sep 2020 14:48:02 +0800 Subject: [PATCH 6/7] add unit tests for dm-master/worker failover/scaler/upgrader and some other components --- pkg/controller/dmmaster_control.go | 6 + pkg/dmapi/master_control.go | 24 +- pkg/manager/member/dm_master_failover.go | 2 +- pkg/manager/member/dm_master_failover_test.go | 707 ++++++++++++++++++ pkg/manager/member/dm_master_scaler_test.go | 418 +++++++++++ pkg/manager/member/dm_master_upgrader_test.go | 396 ++++++++++ pkg/manager/member/dm_worker_failover_test.go | 295 ++++++++ .../member/dm_worker_member_manager_test.go | 57 +- pkg/manager/member/dm_worker_scaler_test.go | 276 +++++++ pkg/manager/member/orphan_pods_cleaner.go | 2 +- .../member/orphan_pods_cleaner_test.go | 55 +- pkg/manager/member/pd_scaler_test.go | 16 +- pkg/manager/member/pvc_resizer.go | 2 +- pkg/manager/member/pvc_resizer_test.go | 121 ++- .../meta/reclaim_policy_manager_test.go | 65 +- 15 files changed, 2408 insertions(+), 34 deletions(-) create mode 100644 pkg/manager/member/dm_master_failover_test.go create mode 100644 pkg/manager/member/dm_master_scaler_test.go create mode 100644 pkg/manager/member/dm_master_upgrader_test.go create mode 100644 pkg/manager/member/dm_worker_failover_test.go create mode 100644 pkg/manager/member/dm_worker_scaler_test.go diff --git a/pkg/controller/dmmaster_control.go b/pkg/controller/dmmaster_control.go index e2c6ef1e214..914b3cffeea 100644 --- a/pkg/controller/dmmaster_control.go +++ b/pkg/controller/dmmaster_control.go @@ -34,3 +34,9 @@ func NewFakeMasterClient(dmControl *dmapi.FakeMasterControl, dc *v1alpha1.DMClus dmControl.SetMasterClient(dc.GetNamespace(), dc.GetName(), masterClient) return masterClient } + +func NewFakeMasterPeerClient(dmControl *dmapi.FakeMasterControl, dc *v1alpha1.DMCluster, podName string) *dmapi.FakeMasterClient { + masterClient := dmapi.NewFakeMasterClient() + dmControl.SetMasterPeerClient(dc.GetNamespace(), dc.GetName(), podName, masterClient) + return masterClient +} diff --git a/pkg/dmapi/master_control.go b/pkg/dmapi/master_control.go index 032fa23826d..496237c8b57 100644 --- a/pkg/dmapi/master_control.go +++ b/pkg/dmapi/master_control.go @@ -98,6 +98,10 @@ func masterClientKey(scheme, namespace, clusterName string) string { return fmt.Sprintf("%s.%s.%s", scheme, clusterName, namespace) } +func masterPeerClientKey(schema, namespace, clusterName, podName string) string { + return fmt.Sprintf("%s.%s.%s.%s", schema, clusterName, namespace, podName) +} + // MasterClientURL builds the url of master client func MasterClientURL(namespace, clusterName, scheme string) string { return fmt.Sprintf("%s://%s-dm-master.%s:8261", scheme, clusterName, namespace) @@ -111,14 +115,28 @@ func MasterPeerClientURL(namespace, clusterName, podName, scheme string) string // FakeMasterControl implements a fake version of MasterControlInterface. type FakeMasterControl struct { defaultMasterControl + masterPeerClients map[string]MasterClient } func NewFakeMasterControl(kubeCli kubernetes.Interface) *FakeMasterControl { return &FakeMasterControl{ - defaultMasterControl{kubeCli: kubeCli, masterClients: map[string]MasterClient{}}, + defaultMasterControl: defaultMasterControl{kubeCli: kubeCli, masterClients: map[string]MasterClient{}}, + masterPeerClients: map[string]MasterClient{}, } } -func (fmc *FakeMasterControl) SetMasterClient(namespace string, tcName string, masterClient MasterClient) { - fmc.defaultMasterControl.masterClients[masterClientKey("http", namespace, tcName)] = masterClient +func (fmc *FakeMasterControl) SetMasterClient(namespace, dcName string, masterClient MasterClient) { + fmc.defaultMasterControl.masterClients[masterClientKey("http", namespace, dcName)] = masterClient +} + +func (fmc *FakeMasterControl) SetMasterPeerClient(namespace, dcName, podName string, masterPeerClient MasterClient) { + fmc.masterPeerClients[masterPeerClientKey("http", namespace, dcName, podName)] = masterPeerClient +} + +func (fmc *FakeMasterControl) GetMasterClient(namespace string, dcName string, tlsEnabled bool) MasterClient { + return fmc.defaultMasterControl.GetMasterClient(namespace, dcName, tlsEnabled) +} + +func (fmc *FakeMasterControl) GetMasterPeerClient(namespace, dcName, podName string, tlsEnabled bool) MasterClient { + return fmc.masterPeerClients[masterPeerClientKey("http", namespace, dcName, podName)] } diff --git a/pkg/manager/member/dm_master_failover.go b/pkg/manager/member/dm_master_failover.go index 9d2c154ea27..3bf5772cffb 100644 --- a/pkg/manager/member/dm_master_failover.go +++ b/pkg/manager/member/dm_master_failover.go @@ -93,7 +93,7 @@ func (mf *masterFailover) Failover(dc *v1alpha1.DMCluster) error { } inQuorum := healthCount > len(dc.Status.Master.Members)/2 if !inQuorum { - return fmt.Errorf("DMCluster: %s/%s's dm-master cluster is not health: %d/%d, "+ + return fmt.Errorf("DMCluster: %s/%s's dm-master cluster is not healthy: %d/%d, "+ "replicas: %d, failureCount: %d, can't failover", ns, dcName, healthCount, dc.MasterStsDesiredReplicas(), dc.Spec.Master.Replicas, len(dc.Status.Master.FailureMembers)) } diff --git a/pkg/manager/member/dm_master_failover_test.go b/pkg/manager/member/dm_master_failover_test.go new file mode 100644 index 00000000000..dc8e98b69e4 --- /dev/null +++ b/pkg/manager/member/dm_master_failover_test.go @@ -0,0 +1,707 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "fmt" + "sort" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/pkg/dmapi" + + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + "github.com/pingcap/tidb-operator/pkg/controller" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" +) + +func TestMasterFailoverFailover(t *testing.T) { + g := NewGomegaWithT(t) + + recorder := record.NewFakeRecorder(100) + type testcase struct { + name string + update func(*v1alpha1.DMCluster) + maxFailoverCount int32 + hasPVC bool + hasPod bool + podWithDeletionTimestamp bool + pvcWithDeletionTimestamp bool + delMemberFailed bool + delPodFailed bool + delPVCFailed bool + statusSyncFailed bool + errExpectFn func(*GomegaWithT, error) + expectFn func(*v1alpha1.DMCluster, *masterFailover) + } + + tests := []testcase{ + { + name: "all dm-master members are ready", + update: allMasterMembersReady, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(0)) + }, + }, + { + name: "dm-master status sync failed", + update: allMasterMembersReady, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: true, + errExpectFn: errExpectNotNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(0)) + }, + }, + { + name: "two dm-master members are not ready, not in quorum", + update: twoMasterMembersNotReady, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "dm-master cluster is not healthy")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + sort.Strings(events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-0(0) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "two dm-master members are ready and a failure dm-master member", + update: oneFailureMasterMember, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(1)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(true)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + { + name: "has one not ready dm-master member, but not exceed deadline", + update: func(dc *v1alpha1.DMCluster) { + oneNotReadyMasterMember(dc) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1 := dc.Status.Master.Members[master1Name] + master1.LastTransitionTime = metav1.Time{Time: time.Now().Add(-2 * time.Minute)} + dc.Status.Master.Members[master1Name] = master1 + }, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, lastTransitionTime is zero", + update: func(dc *v1alpha1.DMCluster) { + oneNotReadyMasterMember(dc) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1 := dc.Status.Master.Members[master1Name] + master1.LastTransitionTime = metav1.Time{} + dc.Status.Master.Members[master1Name] = master1 + }, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member, don't have pvc", + update: oneNotReadyMasterMember, + maxFailoverCount: 3, + hasPVC: false, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "persistentvolumeclaim \"dm-master-test-dm-master-1\" not found")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member", + update: oneNotReadyMasterMember, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "marking Pod: default/test-dm-master-1 dm-master member: test-dm-master-1 as failure")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(1)) + failureMembers := dc.Status.Master.FailureMembers["test-dm-master-1"] + g.Expect(failureMembers.PodName).To(Equal("test-dm-master-1")) + g.Expect(failureMembers.MemberID).To(Equal("12891273174085095651")) + g.Expect(string(failureMembers.PVCUID)).To(Equal("pvc-1-uid")) + g.Expect(failureMembers.MemberDeleted).To(BeFalse()) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("Unhealthy dm-master pod[test-dm-master-1] is unhealthy, msg:dm-master member[12891273174085095651] is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member but maxFailoverCount is 0", + update: oneNotReadyMasterMember, + maxFailoverCount: 0, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, don't have PVC, has Pod, delete pod success", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: false, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(true)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + { + name: "has one not dm-master ready member, and exceed deadline, don't have PVC, has Pod, delete dm-master member failed", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: false, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: true, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "failed to delete member")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(false)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(1)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, don't have PVC, has Pod, delete pod failed", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: false, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: true, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "delete pod: API server failed")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(false)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, has Pod, delete pvc failed", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: true, + statusSyncFailed: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "delete pvc: API server failed")).To(Equal(true)) + }, + expectFn: func(dc *v1alpha1.DMCluster, _ *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(false)) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, has Pod with deletion timestamp", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: true, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, mf *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + pvcName := ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(dc.GetName()), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(true)) + _, err := mf.podLister.Pods(metav1.NamespaceDefault).Get(master1Name) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mf.pvcLister.PersistentVolumeClaims(metav1.NamespaceDefault).Get(pvcName) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + { + name: "has one not ready dm-master member, and exceed deadline, has PVC with deletion timestamp", + update: oneNotReadyMasterMemberAndAFailureMasterMember, + maxFailoverCount: 3, + hasPVC: true, + hasPod: true, + podWithDeletionTimestamp: false, + pvcWithDeletionTimestamp: true, + delMemberFailed: false, + delPodFailed: false, + delPVCFailed: false, + statusSyncFailed: false, + errExpectFn: errExpectNil, + expectFn: func(dc *v1alpha1.DMCluster, mf *masterFailover) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + master1Name := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + pvcName := ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(dc.GetName()), 1) + master1, ok := dc.Status.Master.FailureMembers[master1Name] + g.Expect(ok).To(Equal(true)) + g.Expect(master1.MemberDeleted).To(Equal(true)) + _, err := mf.podLister.Pods(metav1.NamespaceDefault).Get(master1Name) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + _, err = mf.pvcLister.PersistentVolumeClaims(metav1.NamespaceDefault).Get(pvcName) + g.Expect(err).NotTo(HaveOccurred()) + events := collectEvents(recorder.Events) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0]).To(ContainSubstring("test-dm-master-1(12891273174085095651) is unhealthy")) + g.Expect(events[1]).To(ContainSubstring("[default/test-dm-master-1] deleted from dmcluster")) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dc := newDMClusterForMaster() + dc.Spec.Master.MaxFailoverCount = pointer.Int32Ptr(test.maxFailoverCount) + test.update(dc) + + masterFailover, pvcIndexer, podIndexer, fakeMasterControl, fakePodControl, fakePVCControl := newFakeMasterFailover() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterFailover.recorder = recorder + + masterClient.AddReaction(dmapi.DeleteMasterActionType, func(action *dmapi.Action) (interface{}, error) { + if test.delMemberFailed { + return nil, fmt.Errorf("failed to delete member") + } + return nil, nil + }) + + if test.hasPVC { + pvc := newPVCForMasterFailover(dc, v1alpha1.DMMasterMemberType, 1) + if test.pvcWithDeletionTimestamp { + pvc.DeletionTimestamp = &metav1.Time{Time: time.Now()} + } + pvcIndexer.Add(pvc) + } + if test.hasPod { + pod := newPodForMasterFailover(dc, v1alpha1.DMMasterMemberType, 1) + if test.podWithDeletionTimestamp { + pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} + } + podIndexer.Add(pod) + } + if test.delPodFailed { + fakePodControl.SetDeletePodError(errors.NewInternalError(fmt.Errorf("delete pod: API server failed")), 0) + } + if test.delPVCFailed { + fakePVCControl.SetDeletePVCError(errors.NewInternalError(fmt.Errorf("delete pvc: API server failed")), 0) + } + + dc.Status.Master.Synced = !test.statusSyncFailed + + err := masterFailover.Failover(dc) + test.errExpectFn(g, err) + test.expectFn(dc, masterFailover) + }) + } +} + +func TestMasterFailoverRecovery(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*v1alpha1.DMCluster) + expectFn func(*v1alpha1.DMCluster) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + dc := newDMClusterForMaster() + test.update(dc) + + masterFailover, _, _, _, _, _ := newFakeMasterFailover() + masterFailover.Recover(dc) + test.expectFn(dc) + } + tests := []testcase{ + { + name: "two failure member, user don't modify the replicas", + update: func(dc *v1alpha1.DMCluster) { + twoFailureMasterMembers(dc) + dc.Spec.Master.Replicas = 3 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "two failure member, user modify the replicas to 4", + update: func(dc *v1alpha1.DMCluster) { + twoFailureMasterMembers(dc) + dc.Spec.Master.Replicas = 4 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(4)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "two failure member, user increase the replicas", + update: func(dc *v1alpha1.DMCluster) { + twoFailureMasterMembers(dc) + dc.Spec.Master.Replicas = 7 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(7)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "two failure member, user decrease the replicas", + update: func(dc *v1alpha1.DMCluster) { + twoFailureMasterMembers(dc) + dc.Spec.Master.Replicas = 1 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(1)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "one failure member, user don't modify the replicas", + update: func(dc *v1alpha1.DMCluster) { + oneFailureMasterMember(dc) + dc.Spec.Master.Replicas = 3 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(3)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "two failure member, user increase the replicas", + update: func(dc *v1alpha1.DMCluster) { + oneFailureMasterMember(dc) + dc.Spec.Master.Replicas = 5 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(5)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "two failure member, user decrease the replicas", + update: func(dc *v1alpha1.DMCluster) { + oneFailureMasterMember(dc) + dc.Spec.Master.Replicas = 1 + }, + expectFn: func(dc *v1alpha1.DMCluster) { + g.Expect(int(dc.Spec.Master.Replicas)).To(Equal(1)) + g.Expect(len(dc.Status.Master.FailureMembers)).To(Equal(0)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func newFakeMasterFailover() (*masterFailover, cache.Indexer, cache.Indexer, *dmapi.FakeMasterControl, *controller.FakePodControl, *controller.FakePVCControl) { + cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0) + podInformer := kubeInformerFactory.Core().V1().Pods() + pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() + pvInformer := kubeInformerFactory.Core().V1().PersistentVolumes() + podControl := controller.NewFakePodControl(podInformer) + pvcControl := controller.NewFakePVCControl(pvcInformer) + + return &masterFailover{ + cli, + masterControl, + 5 * time.Minute, + podInformer.Lister(), + podControl, + pvcInformer.Lister(), + pvcControl, + pvInformer.Lister(), + nil}, + pvcInformer.Informer().GetIndexer(), + podInformer.Informer().GetIndexer(), + masterControl, podControl, pvcControl +} + +func oneFailureMasterMember(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master0: {Name: master0, ID: "0", Health: true}, + master2: {Name: master2, ID: "2", Health: true}, + } + dc.Status.Master.FailureMembers = map[string]v1alpha1.MasterFailureMember{ + master1: {PodName: master1, PVCUID: "pvc-1-uid", MemberID: "12891273174085095651"}, + } +} + +func twoFailureMasterMembers(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master2: {Name: master2, ID: "2", Health: true}, + } + dc.Status.Master.FailureMembers = map[string]v1alpha1.MasterFailureMember{ + master0: {PodName: master0}, + master1: {PodName: master1}, + } +} + +func oneNotReadyMasterMember(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master0: {Name: master0, ID: "0", Health: true}, + master1: {Name: master1, ID: "12891273174085095651", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, + master2: {Name: master2, ID: "2", Health: true}, + } +} + +func oneNotReadyMasterMemberAndAFailureMasterMember(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master0: {Name: master0, ID: "0", Health: true}, + master1: {Name: master1, ID: "12891273174085095651", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, + master2: {Name: master2, ID: "2", Health: true}, + } + dc.Status.Master.FailureMembers = map[string]v1alpha1.MasterFailureMember{ + master1: {PodName: master1, PVCUID: "pvc-1-uid", MemberID: "12891273174085095651"}, + } +} + +func allMasterMembersReady(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master0: {Name: master0, ID: "0", Health: true}, + master1: {Name: master1, ID: "12891273174085095651", Health: true}, + master2: {Name: master2, ID: "2", Health: true}, + } +} + +func twoMasterMembersNotReady(dc *v1alpha1.DMCluster) { + master0 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0) + master1 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 1) + master2 := ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 2) + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + master0: {Name: master0, ID: "0", Health: false}, + master1: {Name: master1, ID: "12891273174085095651", Health: false}, + master2: {Name: master2, ID: "2", Health: true}, + } +} + +func newPVCForMasterFailover(dc *v1alpha1.DMCluster, memberType v1alpha1.MemberType, ordinal int32) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPVCName(memberType, controller.DMMasterMemberName(dc.GetName()), ordinal), + Namespace: metav1.NamespaceDefault, + UID: types.UID("pvc-1-uid"), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: fmt.Sprintf("pv-%d", ordinal), + }, + } +} + +func newPodForMasterFailover(dc *v1alpha1.DMCluster, memberType v1alpha1.MemberType, ordinal int32) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(memberType, dc.GetName(), ordinal), + Namespace: metav1.NamespaceDefault, + }, + } +} diff --git a/pkg/manager/member/dm_master_scaler_test.go b/pkg/manager/member/dm_master_scaler_test.go new file mode 100644 index 00000000000..abcd46108b0 --- /dev/null +++ b/pkg/manager/member/dm_master_scaler_test.go @@ -0,0 +1,418 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + "github.com/pingcap/tidb-operator/pkg/label" + apps "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" +) + +func TestMasterScalerScaleOut(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + update func(cluster *v1alpha1.DMCluster) + masterUpgrading bool + hasPVC bool + hasDeferAnn bool + annoIsNil bool + pvcDeleteErr bool + statusSyncFailed bool + err bool + changed bool + } + + testFn := func(test testcase, t *testing.T) { + dc := newDMClusterForMaster() + test.update(dc) + + if test.masterUpgrading { + dc.Status.Master.Phase = v1alpha1.UpgradePhase + } + + oldSet := newStatefulSetForDMScale() + newSet := oldSet.DeepCopy() + newSet.Spec.Replicas = pointer.Int32Ptr(7) + + scaler, _, pvcIndexer, pvcControl := newFakeMasterScaler() + + pvc := newPVCForStatefulSet(oldSet, v1alpha1.DMMasterMemberType, dc.Name) + pvc.Name = ordinalPVCName(v1alpha1.DMMasterMemberType, oldSet.GetName(), *oldSet.Spec.Replicas) + if !test.annoIsNil { + pvc.Annotations = map[string]string{} + } + + if test.hasDeferAnn { + pvc.Annotations = map[string]string{} + pvc.Annotations[label.AnnPVCDeferDeleting] = time.Now().Format(time.RFC3339) + } + if test.hasPVC { + pvcIndexer.Add(pvc) + } + + if test.pvcDeleteErr { + pvcControl.SetDeletePVCError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + dc.Status.Master.Synced = !test.statusSyncFailed + + err := scaler.ScaleOut(dc, oldSet, newSet) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(6)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } + + tests := []testcase{ + { + name: "normal", + update: normalMasterMember, + masterUpgrading: false, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "dm-master is upgrading", + update: normalMasterMember, + masterUpgrading: true, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "cache don't have pvc", + update: normalMasterMember, + masterUpgrading: false, + hasPVC: false, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "pvc annotation is not nil but doesn't contain defer deletion annotation", + update: normalMasterMember, + masterUpgrading: false, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: false, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "pvc annotations defer deletion is not nil, pvc delete failed", + update: normalMasterMember, + masterUpgrading: false, + hasPVC: true, + hasDeferAnn: true, + annoIsNil: false, + pvcDeleteErr: true, + statusSyncFailed: false, + err: true, + changed: false, + }, + { + name: "dm-master status sync failed", + update: normalMasterMember, + masterUpgrading: false, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: true, + err: true, + changed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFn(tt, t) + }) + } +} + +func TestMasterScalerScaleIn(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + masterUpgrading bool + hasPVC bool + pvcUpdateErr bool + deleteMemberErr bool + statusSyncFailed bool + err bool + changed bool + isMemberStillRemain bool + isLeader bool + } + + testFn := func(test testcase, t *testing.T) { + dc := newDMClusterForMaster() + leaderPodName := DMMasterPodName(dc.GetName(), 4) + + if test.masterUpgrading { + dc.Status.Master.Phase = v1alpha1.UpgradePhase + } + if test.isLeader { + dc.Status.Master.Leader = v1alpha1.MasterMember{Name: leaderPodName, Health: true} + } + + oldSet := newStatefulSetForDMScale() + newSet := oldSet.DeepCopy() + newSet.Spec.Replicas = pointer.Int32Ptr(3) + + scaler, masterControl, pvcIndexer, pvcControl := newFakeMasterScaler() + + if test.hasPVC { + pvc := newScaleInPVCForStatefulSet(oldSet, v1alpha1.DMMasterMemberType, dc.Name) + pvcIndexer.Add(pvc) + } + + masterClient := controller.NewFakeMasterClient(masterControl, dc) + + if test.deleteMemberErr { + masterClient.AddReaction(dmapi.DeleteMasterActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("error") + }) + } else { + masterClient.AddReaction(dmapi.DeleteMasterActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, nil + }) + } + if test.pvcUpdateErr { + pvcControl.SetUpdatePVCError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + if test.isLeader { + masterPeerClient := controller.NewFakeMasterPeerClient(masterControl, dc, leaderPodName) + masterPeerClient.AddReaction(dmapi.EvictLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, nil + }) + } + + var membersInfo []*dmapi.MastersInfo + if test.isMemberStillRemain { + membersInfo = []*dmapi.MastersInfo{ + { + Name: fmt.Sprintf("%s-dm-master-%d", dc.GetName(), 4), + }, + } + } else { + membersInfo = []*dmapi.MastersInfo{ + { + Name: fmt.Sprintf("%s-dm-master-%d", dc.GetName(), 1), + }, + } + } + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (i interface{}, err error) { + return membersInfo, nil + }) + + dc.Status.Master.Synced = !test.statusSyncFailed + + err := scaler.ScaleIn(dc, oldSet, newSet) + if test.err { + g.Expect(err).To(HaveOccurred()) + if test.isLeader { + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + } + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(4)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } + + tests := []testcase{ + { + name: "normal", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: false, + changed: true, + isMemberStillRemain: false, + }, + { + name: "able to scale in while dm-master is upgrading", + masterUpgrading: true, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: false, + changed: true, + isMemberStillRemain: false, + }, + { + name: "error when delete dm-master", + hasPVC: true, + pvcUpdateErr: false, + masterUpgrading: false, + deleteMemberErr: true, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "cache don't have pvc", + masterUpgrading: false, + hasPVC: false, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "error when update pvc", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: true, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "dm-master status sync failed", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: true, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "delete dm-master success, but get dm-master still remain", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: true, + }, + { + name: "delete dm-master success, but get dm-master still remain", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: true, + }, + { + name: "scaled dm-master is leader", + masterUpgrading: false, + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + isLeader: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFn(tt, t) + }) + } +} + +func newFakeMasterScaler() (*masterScaler, *dmapi.FakeMasterControl, cache.Indexer, *controller.FakePVCControl) { + kubeCli := kubefake.NewSimpleClientset() + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0) + pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + pvcControl := controller.NewFakePVCControl(pvcInformer) + + return &masterScaler{generalScaler{pvcInformer.Lister(), pvcControl}, masterControl}, + masterControl, pvcInformer.Informer().GetIndexer(), pvcControl +} + +func newStatefulSetForDMScale() *apps.StatefulSet { + set := &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "scaler", + Namespace: metav1.NamespaceDefault, + }, + Spec: apps.StatefulSetSpec{ + Replicas: pointer.Int32Ptr(5), + }, + } + return set +} + +func normalMasterMember(dc *v1alpha1.DMCluster) { + dcName := dc.GetName() + dc.Status.Master.Members = map[string]v1alpha1.MasterMember{ + ordinalPodName(v1alpha1.DMMasterMemberType, dcName, 0): {Health: true}, + ordinalPodName(v1alpha1.DMMasterMemberType, dcName, 1): {Health: true}, + ordinalPodName(v1alpha1.DMMasterMemberType, dcName, 2): {Health: true}, + ordinalPodName(v1alpha1.DMMasterMemberType, dcName, 3): {Health: true}, + ordinalPodName(v1alpha1.DMMasterMemberType, dcName, 4): {Health: true}, + } +} diff --git a/pkg/manager/member/dm_master_upgrader_test.go b/pkg/manager/member/dm_master_upgrader_test.go new file mode 100644 index 00000000000..18b0eec67a9 --- /dev/null +++ b/pkg/manager/member/dm_master_upgrader_test.go @@ -0,0 +1,396 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "fmt" + "testing" + + "github.com/pingcap/tidb-operator/pkg/label" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/pkg/dmapi" + kubeinformers "k8s.io/client-go/informers" + podinformers "k8s.io/client-go/informers/core/v1" + kubefake "k8s.io/client-go/kubernetes/fake" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" +) + +func TestMasterUpgraderUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + changeFn func(*v1alpha1.DMCluster) + changePods func(pods []*corev1.Pod) + changeOldSet func(set *apps.StatefulSet) + transferLeaderErr bool + errExpectFn func(*GomegaWithT, error) + expectFn func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) + } + + testFn := func(test *testcase) { + t.Log(test.name) + upgrader, masterControl, _, podInformer := newMasterUpgrader() + dc := newDMClusterForMasterUpgrader() + leaderPodName := DMMasterPodName(upgradeTcName, 1) + masterPeerClient := controller.NewFakeMasterPeerClient(masterControl, dc, leaderPodName) + + if test.changeFn != nil { + test.changeFn(dc) + } + + if test.transferLeaderErr { + masterPeerClient.AddReaction(dmapi.EvictLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("failed to transfer leader") + }) + } else { + masterPeerClient.AddReaction(dmapi.EvictLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, nil + }) + } + + pods := getMasterPods() + if test.changePods != nil { + test.changePods(pods) + } + for i := range pods { + podInformer.Informer().GetIndexer().Add(pods[i]) + } + + newSet := newStatefulSetForMasterUpgrader() + oldSet := newSet.DeepCopy() + if test.changeOldSet != nil { + test.changeOldSet(oldSet) + } + SetStatefulSetLastAppliedConfigAnnotation(oldSet) + + newSet.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32Ptr(3) + + err := upgrader.Upgrade(dc, oldSet, newSet) + test.errExpectFn(g, err) + test.expectFn(g, dc, newSet) + } + + tests := []testcase{ + { + name: "normal upgrade", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + }, + changePods: nil, + changeOldSet: nil, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(1))) + }, + }, + { + name: "modify oldSet update strategy to OnDelete", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ + Type: apps.OnDeleteStatefulSetStrategyType, + } + }, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType})) + }, + }, + { + name: "set oldSet's RollingUpdate strategy to nil", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ + Type: apps.RollingUpdateStatefulSetStrategyType, + } + }, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType})) + }, + }, + { + name: "newSet template changed", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.Template.Spec.Containers[0].Image = "dm-test-image:old" + }, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "dm-master scaling", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + dc.Status.Master.Phase = v1alpha1.ScalePhase + }, + changePods: nil, + changeOldSet: func(set *apps.StatefulSet) { + set.Spec.Template.Spec.Containers[0].Image = "dm-test-image:old" + }, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "update revision equals current revision", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + dc.Status.Master.StatefulSet.UpdateRevision = dc.Status.Master.StatefulSet.CurrentRevision + }, + changePods: nil, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "skip to wait all members health", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + dc.Status.Master.Members[DMMasterPodName(upgradeTcName, 2)] = v1alpha1.MasterMember{Name: DMMasterPodName(upgradeTcName, 2), Health: false} + }, + changePods: nil, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err.Error()).To(Equal(fmt.Sprintf("dmcluster: [default/upgrader]'s dm-master upgraded pod: [%s] is not ready", DMMasterPodName(upgradeTcName, 2)))) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(2))) + }, + }, + { + name: "transfer leader", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + dc.Status.Master.Leader = v1alpha1.MasterMember{Name: DMMasterPodName(upgradeTcName, 1), Health: true} + }, + changePods: nil, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(2))) + }, + }, + { + name: "dm-master sync failed", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = false + }, + changePods: nil, + transferLeaderErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.NormalPhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(3))) + }, + }, + { + name: "error when transfer leader", + changeFn: func(dc *v1alpha1.DMCluster) { + dc.Status.Master.Synced = true + dc.Status.Master.Leader = v1alpha1.MasterMember{Name: DMMasterPodName(upgradeTcName, 1), Health: true} + }, + changePods: nil, + transferLeaderErr: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + }, + expectFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster, newSet *apps.StatefulSet) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(pointer.Int32Ptr(2))) + }, + }, + } + + for _, test := range tests { + testFn(&test) + } + +} + +func newMasterUpgrader() (DMUpgrader, *dmapi.FakeMasterControl, *controller.FakePodControl, podinformers.PodInformer) { + kubeCli := kubefake.NewSimpleClientset() + podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + podControl := controller.NewFakePodControl(podInformer) + return &masterUpgrader{ + masterControl: masterControl, + podLister: podInformer.Lister()}, + masterControl, podControl, podInformer +} + +func newStatefulSetForMasterUpgrader() *apps.StatefulSet { + return &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.DMMasterMemberName(upgradeTcName), + Namespace: metav1.NamespaceDefault, + }, + Spec: apps.StatefulSetSpec{ + Replicas: pointer.Int32Ptr(3), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "dm-master", + Image: "dm-test-image", + }, + }, + }, + }, + UpdateStrategy: apps.StatefulSetUpdateStrategy{ + Type: apps.RollingUpdateStatefulSetStrategyType, + RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{Partition: pointer.Int32Ptr(2)}, + }, + }, + Status: apps.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + ReadyReplicas: 3, + Replicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, + }, + } +} + +func newDMClusterForMasterUpgrader() *v1alpha1.DMCluster { + podName0 := DMMasterPodName(upgradeTcName, 0) + podName1 := DMMasterPodName(upgradeTcName, 1) + podName2 := DMMasterPodName(upgradeTcName, 2) + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: upgradeTcName, + Namespace: corev1.NamespaceDefault, + UID: types.UID(upgradeTcName), + Labels: label.NewDM().Instance(upgradeInstanceName), + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "dm-test-image", + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + Version: "v2.0.0-rc.2", + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Phase: v1alpha1.NormalPhase, + StatefulSet: &apps.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + ReadyReplicas: 3, + Replicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, + }, + Members: map[string]v1alpha1.MasterMember{ + podName0: {Name: podName0, Health: true}, + podName1: {Name: podName1, Health: true}, + podName2: {Name: podName2, Health: true}, + }, + Leader: v1alpha1.MasterMember{Name: podName2, Health: true}, + }, + }, + } +} + +func getMasterPods() []*corev1.Pod { + lc := label.NewDM().Instance(upgradeInstanceName).DMMaster().Labels() + lc[apps.ControllerRevisionHashLabelKey] = "1" + lu := label.NewDM().Instance(upgradeInstanceName).DMMaster().Labels() + lu[apps.ControllerRevisionHashLabelKey] = "2" + pods := []*corev1.Pod{ + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: DMMasterPodName(upgradeTcName, 0), + Namespace: corev1.NamespaceDefault, + Labels: lc, + }, + }, + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: DMMasterPodName(upgradeTcName, 1), + Namespace: corev1.NamespaceDefault, + Labels: lc, + }, + }, + { + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: DMMasterPodName(upgradeTcName, 2), + Namespace: corev1.NamespaceDefault, + Labels: lu, + }, + }, + } + return pods +} diff --git a/pkg/manager/member/dm_worker_failover_test.go b/pkg/manager/member/dm_worker_failover_test.go new file mode 100644 index 00000000000..632c8fe1245 --- /dev/null +++ b/pkg/manager/member/dm_worker_failover_test.go @@ -0,0 +1,295 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" +) + +func TestWorkerFailoverFailover(t *testing.T) { + tests := []struct { + name string + update func(*v1alpha1.DMCluster) + err bool + expectFn func(t *testing.T, dc *v1alpha1.DMCluster) + }{ + { + name: "normal", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "1": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-1", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "2": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-2", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(2)) + }, + }, + { + name: "dm-worker stage is not Offline", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "1": {Stage: v1alpha1.DMWorkerStateBound, Name: "dm-worker-1"}, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "deadline not exceed", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "1": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-1", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-30 * time.Minute)}, + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "lastTransitionTime is zero", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "1": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-1", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(0)) + }, + }, + { + name: "exist in failureStores", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "1": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-1", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + } + dc.Status.Worker.FailureMembers = map[string]v1alpha1.WorkerFailureMember{ + "1": { + PodName: "dm-worker-1", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(1)) + }, + }, + { + name: "not exceed max failover count", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "3": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-0", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "4": { + Stage: v1alpha1.DMWorkerStateFree, + Name: "dm-worker-4", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "5": { + Stage: v1alpha1.DMWorkerStateFree, + Name: "dm-worker-5", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, + }, + } + dc.Status.Worker.FailureMembers = map[string]v1alpha1.WorkerFailureMember{ + "1": { + PodName: "dm-worker-1", + }, + "2": { + PodName: "dm-worker-2", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(3)) + }, + }, + { + name: "exceed max failover count1", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "3": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-3", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "4": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-4", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "5": { + Stage: v1alpha1.DMWorkerStateFree, + Name: "dm-worker-5", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, + }, + } + dc.Status.Worker.FailureMembers = map[string]v1alpha1.WorkerFailureMember{ + "1": { + PodName: "dm-worker-1", + }, + "2": { + PodName: "dm-worker-2", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(3)) + }, + }, + { + name: "exceed max failover count2", + update: func(dc *v1alpha1.DMCluster) { + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "0": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-0", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "4": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-4", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, + }, + "5": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-5", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + } + dc.Status.Worker.FailureMembers = map[string]v1alpha1.WorkerFailureMember{ + "1": { + PodName: "dm-worker-1", + }, + "2": { + PodName: "dm-worker-2", + }, + "3": { + PodName: "dm-worker-3", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(3)) + }, + }, + { + name: "exceed max failover count2 but maxFailoverCount = 0", + update: func(dc *v1alpha1.DMCluster) { + dc.Spec.Worker.MaxFailoverCount = pointer.Int32Ptr(0) + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "12": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-12", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + "13": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-13", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, + }, + "14": { + Stage: v1alpha1.DMWorkerStateOffline, + Name: "dm-worker-14", + LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, + }, + } + dc.Status.Worker.FailureMembers = map[string]v1alpha1.WorkerFailureMember{ + "1": { + PodName: "dm-worker-1", + }, + "2": { + PodName: "dm-worker-2", + }, + "3": { + PodName: "dm-worker-3", + }, + } + }, + err: false, + expectFn: func(t *testing.T, dc *v1alpha1.DMCluster) { + g := NewGomegaWithT(t) + g.Expect(len(dc.Status.Worker.FailureMembers)).To(Equal(3)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + dc := newDMClusterForMaster() + dc.Spec.Worker.Replicas = 6 + dc.Spec.Worker.MaxFailoverCount = pointer.Int32Ptr(3) + tt.update(dc) + workerFailover := newFakeWorkerFailover() + + err := workerFailover.Failover(dc) + if tt.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + tt.expectFn(t, dc) + }) + } +} + +func newFakeWorkerFailover() *workerFailover { + recorder := record.NewFakeRecorder(100) + return &workerFailover{1 * time.Hour, recorder} +} diff --git a/pkg/manager/member/dm_worker_member_manager_test.go b/pkg/manager/member/dm_worker_member_manager_test.go index e537796e4b8..0e7c73cc123 100644 --- a/pkg/manager/member/dm_worker_member_manager_test.go +++ b/pkg/manager/member/dm_worker_member_manager_test.go @@ -193,6 +193,8 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { oldCm *corev1.ConfigMap cm *corev1.ConfigMap getCm error + + triggerDeleteWorker bool } type testcase struct { name string @@ -210,6 +212,7 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { dc := newDMClusterForWorker() ns := dc.Namespace dcName := dc.Name + triggerDeleteWorker := false mmm, ctls, indexers, fakeMasterControl := newFakeWorkerMemberManager() @@ -217,6 +220,10 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { return test.workerInfos, nil }) + masterClient.AddReaction(dmapi.DeleteWorkerActionType, func(action *dmapi.Action) (interface{}, error) { + triggerDeleteWorker = true + return nil, nil + }) if test.errOnUpdateSet { ctls.set.SetUpdateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) @@ -259,7 +266,7 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { key, err := client.ObjectKeyFromObject(cm) g.Expect(err).To(Succeed()) getCmErr := ctls.generic.FakeCli.Get(context.TODO(), key, cm) - result := result{syncErr, oldSvc, svc, getSvcErr, oldSet, set, getStsErr, oldCm, cm, getCmErr} + result := result{syncErr, oldSvc, svc, getSvcErr, oldSet, set, getStsErr, oldCm, cm, getCmErr, triggerDeleteWorker} test.expectFn(g, &result) } @@ -281,6 +288,7 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) g.Expect(*r.set.Spec.Replicas).To(Equal(int32(4))) + g.Expect(r.triggerDeleteWorker).To(BeFalse()) }, workerInfos: nil, }, @@ -301,11 +309,12 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(r.triggerDeleteWorker).To(BeFalse()) }, workerInfos: []*dmapi.WorkersInfo{ - {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 0), Addr: "http://worker0:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 1), Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 2), Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, }, }, { @@ -325,11 +334,12 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { g.Expect(r.svc.Spec.Ports[0].Port).To(Equal(int32(8888))) g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(r.triggerDeleteWorker).To(BeFalse()) }, workerInfos: []*dmapi.WorkersInfo{ - {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 0), Addr: "http://worker0:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 1), Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 2), Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, }, }, { @@ -349,11 +359,38 @@ func TestWorkerMemberManagerSyncUpdate(t *testing.T) { g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(r.triggerDeleteWorker).To(BeFalse()) }, workerInfos: []*dmapi.WorkersInfo{ - {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, - {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 0), Addr: "http://worker0:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 1), Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 2), Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + { + name: "offline scaled dm-worker", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 3 + }, + errOnUpdateCm: false, + errOnUpdateSvc: false, + errOnUpdateSet: true, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(r.triggerDeleteWorker).To(BeTrue()) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 0), Addr: "http://worker0:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 1), Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 2), Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: ordinalPodName(v1alpha1.DMWorkerMemberType, "test", 3), Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateOffline}, }, }, } diff --git a/pkg/manager/member/dm_worker_scaler_test.go b/pkg/manager/member/dm_worker_scaler_test.go new file mode 100644 index 00000000000..b9f855b3271 --- /dev/null +++ b/pkg/manager/member/dm_worker_scaler_test.go @@ -0,0 +1,276 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "fmt" + "testing" + "time" + + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/label" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/utils/pointer" +) + +func TestWorkerScalerScaleOut(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + update func(cluster *v1alpha1.DMCluster) + hasPVC bool + hasDeferAnn bool + annoIsNil bool + pvcDeleteErr bool + statusSyncFailed bool + err bool + changed bool + } + + testFn := func(test testcase, t *testing.T) { + dc := newDMClusterForWorker() + test.update(dc) + + oldSet := newStatefulSetForDMScale() + newSet := oldSet.DeepCopy() + newSet.Spec.Replicas = pointer.Int32Ptr(7) + + scaler, _, pvcIndexer, pvcControl := newFakeWorkerScaler() + + pvc := newPVCForStatefulSet(oldSet, v1alpha1.DMWorkerMemberType, dc.Name) + pvc.Name = ordinalPVCName(v1alpha1.DMWorkerMemberType, oldSet.GetName(), *oldSet.Spec.Replicas) + if !test.annoIsNil { + pvc.Annotations = map[string]string{} + } + + if test.hasDeferAnn { + pvc.Annotations = map[string]string{} + pvc.Annotations[label.AnnPVCDeferDeleting] = time.Now().Format(time.RFC3339) + } + if test.hasPVC { + pvcIndexer.Add(pvc) + } + + if test.pvcDeleteErr { + pvcControl.SetDeletePVCError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + dc.Status.Worker.Synced = !test.statusSyncFailed + + err := scaler.ScaleOut(dc, oldSet, newSet) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(6)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } + + tests := []testcase{ + { + name: "normal", + update: normalWorkerMember, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "cache don't have pvc", + update: normalWorkerMember, + hasPVC: false, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "pvc annotation is not nil but doesn't contain defer deletion annotation", + update: normalWorkerMember, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: false, + pvcDeleteErr: false, + statusSyncFailed: false, + err: false, + changed: true, + }, + { + name: "pvc annotations defer deletion is not nil, pvc delete failed", + update: normalWorkerMember, + hasPVC: true, + hasDeferAnn: true, + annoIsNil: false, + pvcDeleteErr: true, + statusSyncFailed: false, + err: true, + changed: false, + }, + { + name: "dm-worker status sync failed", + update: normalWorkerMember, + hasPVC: true, + hasDeferAnn: false, + annoIsNil: true, + pvcDeleteErr: false, + statusSyncFailed: true, + err: true, + changed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFn(tt, t) + }) + } +} + +func TestWorkerScalerScaleIn(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + hasPVC bool + pvcUpdateErr bool + deleteMemberErr bool + statusSyncFailed bool + err bool + changed bool + isMemberStillRemain bool + isLeader bool + } + + testFn := func(test testcase, t *testing.T) { + dc := newDMClusterForWorker() + oldSet := newStatefulSetForDMScale() + newSet := oldSet.DeepCopy() + newSet.Spec.Replicas = pointer.Int32Ptr(3) + + scaler, _, pvcIndexer, pvcControl := newFakeWorkerScaler() + + if test.hasPVC { + pvc := newScaleInPVCForStatefulSet(oldSet, v1alpha1.DMWorkerMemberType, dc.Name) + pvcIndexer.Add(pvc) + } + + if test.pvcUpdateErr { + pvcControl.SetUpdatePVCError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + dc.Status.Worker.Synced = !test.statusSyncFailed + + err := scaler.ScaleIn(dc, oldSet, newSet) + if test.err { + g.Expect(err).To(HaveOccurred()) + if test.isLeader { + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + } + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.changed { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(4)) + } else { + g.Expect(int(*newSet.Spec.Replicas)).To(Equal(5)) + } + } + + tests := []testcase{ + { + name: "normal", + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: false, + changed: true, + isMemberStillRemain: false, + }, + { + name: "cache don't have pvc", + hasPVC: false, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "error when update pvc", + hasPVC: true, + pvcUpdateErr: true, + deleteMemberErr: false, + statusSyncFailed: false, + err: true, + changed: false, + isMemberStillRemain: false, + }, + { + name: "dm-worker status sync failed", + hasPVC: true, + pvcUpdateErr: false, + deleteMemberErr: false, + statusSyncFailed: true, + err: true, + changed: false, + isMemberStillRemain: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFn(tt, t) + }) + } +} + +func newFakeWorkerScaler() (*workerScaler, *dmapi.FakeMasterControl, cache.Indexer, *controller.FakePVCControl) { + kubeCli := kubefake.NewSimpleClientset() + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0) + pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + pvcControl := controller.NewFakePVCControl(pvcInformer) + + return &workerScaler{generalScaler{pvcInformer.Lister(), pvcControl}}, + masterControl, pvcInformer.Informer().GetIndexer(), pvcControl +} + +func normalWorkerMember(dc *v1alpha1.DMCluster) { + dcName := dc.GetName() + dc.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + ordinalPodName(v1alpha1.DMWorkerMemberType, dcName, 0): {Stage: v1alpha1.DMWorkerStateFree}, + ordinalPodName(v1alpha1.DMWorkerMemberType, dcName, 1): {Stage: v1alpha1.DMWorkerStateFree}, + ordinalPodName(v1alpha1.DMWorkerMemberType, dcName, 2): {Stage: v1alpha1.DMWorkerStateFree}, + ordinalPodName(v1alpha1.DMWorkerMemberType, dcName, 3): {Stage: v1alpha1.DMWorkerStateFree}, + ordinalPodName(v1alpha1.DMWorkerMemberType, dcName, 4): {Stage: v1alpha1.DMWorkerStateFree}, + } +} diff --git a/pkg/manager/member/orphan_pods_cleaner.go b/pkg/manager/member/orphan_pods_cleaner.go index b96188913a9..258dbf409f7 100644 --- a/pkg/manager/member/orphan_pods_cleaner.go +++ b/pkg/manager/member/orphan_pods_cleaner.go @@ -99,7 +99,7 @@ func (opc *orphanPodsCleaner) Clean(meta metav1.Object) (map[string]string, erro for _, pod := range pods { podName := pod.GetName() l := label.Label(pod.Labels) - if !(l.IsPD() || l.IsTiKV() || l.IsTiFlash() || l.IsDMMaster() || l.IsDMMaster()) { + if !(l.IsPD() || l.IsTiKV() || l.IsTiFlash() || l.IsDMMaster() || l.IsDMWorker()) { skipReason[podName] = skipReasonOrphanPodsCleanerIsNotTarget continue } diff --git a/pkg/manager/member/orphan_pods_cleaner_test.go b/pkg/manager/member/orphan_pods_cleaner_test.go index fecb4fd1e26..d94be499190 100644 --- a/pkg/manager/member/orphan_pods_cleaner_test.go +++ b/pkg/manager/member/orphan_pods_cleaner_test.go @@ -33,6 +33,7 @@ func TestOrphanPodsCleanerClean(t *testing.T) { g := NewGomegaWithT(t) tc := newTidbClusterForPD() + dc := newDMClusterForMaster() tests := []struct { name string @@ -40,6 +41,7 @@ func TestOrphanPodsCleanerClean(t *testing.T) { apiPods []*corev1.Pod pvcs []*corev1.PersistentVolumeClaim deletePodFailed bool + testOnDM bool expectFn func(*GomegaWithT, map[string]string, *orphanPodsCleaner, error) }{ { @@ -90,6 +92,50 @@ func TestOrphanPodsCleanerClean(t *testing.T) { g.Expect(skipReason["pod-1"]).To(Equal(skipReasonOrphanPodsCleanerPVCNameIsEmpty)) }, }, + { + name: "has no spec.volumes for dm-master", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: metav1.NamespaceDefault, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMMaster().Labels(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + }, + pvcs: nil, + testOnDM: true, + expectFn: func(g *GomegaWithT, skipReason map[string]string, _ *orphanPodsCleaner, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(skipReason)).To(Equal(1)) + g.Expect(skipReason["pod-1"]).To(Equal(skipReasonOrphanPodsCleanerPVCNameIsEmpty)) + }, + }, + { + name: "has no spec.volumes for dm-worker", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: metav1.NamespaceDefault, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMWorker().Labels(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + }, + pvcs: nil, + testOnDM: true, + expectFn: func(g *GomegaWithT, skipReason map[string]string, _ *orphanPodsCleaner, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(skipReason)).To(Equal(1)) + g.Expect(skipReason["pod-1"]).To(Equal(skipReasonOrphanPodsCleanerPVCNameIsEmpty)) + }, + }, { name: "claimName is empty", pods: []*corev1.Pod{ @@ -583,7 +629,14 @@ func TestOrphanPodsCleanerClean(t *testing.T) { podControl.SetDeletePodError(fmt.Errorf("delete pod failed"), 0) } - skipReason, err := opc.Clean(tc) + var skipReason map[string]string + var err error + + if tt.testOnDM { + skipReason, err = opc.Clean(dc) + } else { + skipReason, err = opc.Clean(tc) + } tt.expectFn(g, skipReason, opc, err) }) } diff --git a/pkg/manager/member/pd_scaler_test.go b/pkg/manager/member/pd_scaler_test.go index 3e1c8f0e2a2..b0c719112e3 100644 --- a/pkg/manager/member/pd_scaler_test.go +++ b/pkg/manager/member/pd_scaler_test.go @@ -453,7 +453,13 @@ func newStatefulSetForPDScale() *apps.StatefulSet { func newPVCForStatefulSet(set *apps.StatefulSet, memberType v1alpha1.MemberType, name string) *corev1.PersistentVolumeClaim { podName := ordinalPodName(memberType, name, *set.Spec.Replicas) - l := label.New().Instance(name) + var l label.Label + switch memberType { + case v1alpha1.DMMasterMemberType, v1alpha1.DMWorkerMemberType: + l = label.NewDM().Instance(name) + default: + l = label.New().Instance(name) + } l[label.AnnPodNameKey] = podName return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -466,7 +472,13 @@ func newPVCForStatefulSet(set *apps.StatefulSet, memberType v1alpha1.MemberType, func newScaleInPVCForStatefulSet(set *apps.StatefulSet, memberType v1alpha1.MemberType, name string) *corev1.PersistentVolumeClaim { podName := ordinalPodName(memberType, name, *set.Spec.Replicas-1) - l := label.New().Instance(name) + var l label.Label + switch memberType { + case v1alpha1.DMMasterMemberType, v1alpha1.DMWorkerMemberType: + l = label.NewDM().Instance(name) + default: + l = label.New().Instance(name) + } l[label.AnnPodNameKey] = podName return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/manager/member/pvc_resizer.go b/pkg/manager/member/pvc_resizer.go index fb7bb76a02c..ca09aa4d0b3 100644 --- a/pkg/manager/member/pvc_resizer.go +++ b/pkg/manager/member/pvc_resizer.go @@ -130,7 +130,7 @@ func (p *pvcResizer) Resize(tc *v1alpha1.TidbCluster) error { } func (p *pvcResizer) ResizeDM(dc *v1alpha1.DMCluster) error { - selector, err := label.New().Instance(dc.GetInstanceName()).Selector() + selector, err := label.NewDM().Instance(dc.GetInstanceName()).Selector() if err != nil { return err } diff --git a/pkg/manager/member/pvc_resizer_test.go b/pkg/manager/member/pvc_resizer_test.go index 58743b36980..e988ed967fc 100644 --- a/pkg/manager/member/pvc_resizer_test.go +++ b/pkg/manager/member/pvc_resizer_test.go @@ -31,15 +31,15 @@ import ( "k8s.io/utils/pointer" ) -func newPVCWithStorage(name string, component string, storaegClass, storageRequest string) *v1.PersistentVolumeClaim { +func newFullPVC(name, component, storageClass, storageRequest, nameLabel, instance string) *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.NamespaceDefault, Name: name, Labels: map[string]string{ - label.NameLabelKey: "tidb-cluster", + label.NameLabelKey: nameLabel, label.ManagedByLabelKey: label.TiDBOperator, - label.InstanceLabelKey: "tc", + label.InstanceLabelKey: instance, label.ComponentLabelKey: component, }, }, @@ -49,11 +49,19 @@ func newPVCWithStorage(name string, component string, storaegClass, storageReque v1.ResourceStorage: resource.MustParse(storageRequest), }, }, - StorageClassName: pointer.StringPtr(storaegClass), + StorageClassName: pointer.StringPtr(storageClass), }, } } +func newPVCWithStorage(name string, component string, storageClass, storageRequest string) *v1.PersistentVolumeClaim { + return newFullPVC(name, component, storageClass, storageRequest, "tidb-cluster", "tc") +} + +func newDMPVCWithStorage(name string, component string, storageClass, storageRequest string) *v1.PersistentVolumeClaim { + return newFullPVC(name, component, storageClass, storageRequest, "dm-cluster", "dc") +} + func newStorageClass(name string, volumeExpansion bool) *storagev1.StorageClass { return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -317,3 +325,108 @@ func TestPVCResizer(t *testing.T) { }) } } + +func TestDMPVCResizer(t *testing.T) { + tests := []struct { + name string + dc *v1alpha1.DMCluster + sc *storagev1.StorageClass + pvcs []*v1.PersistentVolumeClaim + wantPVCs []*v1.PersistentVolumeClaim + wantErr error + }{ + { + name: "no PVCs", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{}, + }, + }, + { + name: "resize dm-master PVCs", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: v1.NamespaceDefault, + Name: "dc", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + StorageSize: "2Gi", + }, + }, + }, + sc: newStorageClass("sc", true), + pvcs: []*v1.PersistentVolumeClaim{ + newDMPVCWithStorage("dm-master-0", label.DMMasterLabelVal, "sc", "1Gi"), + newDMPVCWithStorage("dm-master-1", label.DMMasterLabelVal, "sc", "1Gi"), + newDMPVCWithStorage("dm-master-2", label.DMMasterLabelVal, "sc", "1Gi"), + }, + wantPVCs: []*v1.PersistentVolumeClaim{ + newDMPVCWithStorage("dm-master-0", label.DMMasterLabelVal, "sc", "2Gi"), + newDMPVCWithStorage("dm-master-1", label.DMMasterLabelVal, "sc", "2Gi"), + newDMPVCWithStorage("dm-master-2", label.DMMasterLabelVal, "sc", "2Gi"), + }, + }, + { + name: "resize dm-worker PVCs", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: v1.NamespaceDefault, + Name: "dc", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + StorageSize: "2Gi", + }, + }, + }, + sc: newStorageClass("sc", true), + pvcs: []*v1.PersistentVolumeClaim{ + newDMPVCWithStorage("dm-worker-0", label.DMWorkerLabelVal, "sc", "1Gi"), + newDMPVCWithStorage("dm-worker-1", label.DMWorkerLabelVal, "sc", "1Gi"), + newDMPVCWithStorage("dm-worker-2", label.DMWorkerLabelVal, "sc", "1Gi"), + }, + wantPVCs: []*v1.PersistentVolumeClaim{ + newDMPVCWithStorage("dm-worker-0", label.DMWorkerLabelVal, "sc", "2Gi"), + newDMPVCWithStorage("dm-worker-1", label.DMWorkerLabelVal, "sc", "2Gi"), + newDMPVCWithStorage("dm-worker-2", label.DMWorkerLabelVal, "sc", "2Gi"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + kubeCli := fake.NewSimpleClientset() + for _, pvc := range tt.pvcs { + kubeCli.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + } + if tt.sc != nil { + kubeCli.StorageV1().StorageClasses().Create(tt.sc) + } + + informerFactory := informers.NewSharedInformerFactory(kubeCli, 0) + resizer := NewPVCResizer(kubeCli, informerFactory.Core().V1().PersistentVolumeClaims(), informerFactory.Storage().V1().StorageClasses()) + + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + + err := resizer.ResizeDM(tt.dc) + if !reflect.DeepEqual(tt.wantErr, err) { + t.Errorf("want %v, got %v", tt.wantErr, err) + } + + for i, pvc := range tt.pvcs { + wantPVC := tt.wantPVCs[i] + got, err := kubeCli.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(wantPVC, got); diff != "" { + t.Errorf("unexpected (-want, +got): %s", diff) + } + } + }) + } +} diff --git a/pkg/manager/meta/reclaim_policy_manager_test.go b/pkg/manager/meta/reclaim_policy_manager_test.go index 61911d2c88b..7a1e95e8c1c 100644 --- a/pkg/manager/meta/reclaim_policy_manager_test.go +++ b/pkg/manager/meta/reclaim_policy_manager_test.go @@ -44,11 +44,17 @@ func TestReclaimPolicyManagerSync(t *testing.T) { hasDeferDeleteAnn bool } - testFn := func(test *testcase, t *testing.T) { - t.Log(test.name) - tc := newTidbClusterForMeta() + testFn := func(test *testcase, t *testing.T, kind string) { + t.Log(test.name + ": " + kind) + var obj metav1.Object + switch kind { + case v1alpha1.TiDBClusterKind: + obj = newTidbClusterForMeta() + case v1alpha1.DMClusterKind: + obj = newDMClusterForMeta() + } pv1 := newPV("1") - pvc1 := newPVC(tc, "1") + pvc1 := newPVC(obj, "1") if !test.pvcHasLabels { pvc1.Labels = nil @@ -56,7 +62,12 @@ func TestReclaimPolicyManagerSync(t *testing.T) { if !test.pvcHasVolumeName { pvc1.Spec.VolumeName = "" } - tc.Spec.EnablePVReclaim = &test.enablePVRecalim + switch kind { + case v1alpha1.TiDBClusterKind: + obj.(*v1alpha1.TidbCluster).Spec.EnablePVReclaim = &test.enablePVRecalim + case v1alpha1.DMClusterKind: + obj.(*v1alpha1.DMCluster).Spec.EnablePVReclaim = &test.enablePVRecalim + } if test.hasDeferDeleteAnn { pvc1.Annotations = map[string]string{label.AnnPVCDeferDeleting: time.Now().String()} } @@ -71,7 +82,12 @@ func TestReclaimPolicyManagerSync(t *testing.T) { fakePVControl.SetUpdatePVError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) } - err = rpm.Sync(tc) + switch kind { + case v1alpha1.TiDBClusterKind: + err = rpm.Sync(obj.(*v1alpha1.TidbCluster)) + case v1alpha1.DMClusterKind: + err = rpm.SyncDM(obj.(*v1alpha1.DMCluster)) + } if test.err { g.Expect(err).To(HaveOccurred()) pv, err := rpm.pvLister.Get(pv1.Name) @@ -144,7 +160,8 @@ func TestReclaimPolicyManagerSync(t *testing.T) { } for i := range tests { - testFn(&tests[i], t) + testFn(&tests[i], t, v1alpha1.TiDBClusterKind) + testFn(&tests[i], t, v1alpha1.DMClusterKind) } } @@ -183,6 +200,25 @@ func newTidbClusterForMeta() *v1alpha1.TidbCluster { } } +func newDMClusterForMeta() *v1alpha1.DMCluster { + pvp := corev1.PersistentVolumeReclaimRetain + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: controller.TestClusterName, + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + Labels: label.NewDM().Instance(controller.TestClusterName), + }, + Spec: v1alpha1.DMClusterSpec{ + PVReclaimPolicy: &pvp, + }, + } +} + func newPV(index string) *corev1.PersistentVolume { return &corev1.PersistentVolume{ TypeMeta: metav1.TypeMeta{ @@ -207,7 +243,14 @@ func newPV(index string) *corev1.PersistentVolume { } } -func newPVC(tc *v1alpha1.TidbCluster, index string) *corev1.PersistentVolumeClaim { +func newPVC(obj metav1.Object, index string) *corev1.PersistentVolumeClaim { + nameLabel := controller.TestName + componentLabel := controller.TestComponentName + if _, ok := obj.(*v1alpha1.DMCluster); ok { + nameLabel = "dm-cluster" + componentLabel = "dm-master" + } + return &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{ Kind: "PersistentVolumeClaim", @@ -218,10 +261,10 @@ func newPVC(tc *v1alpha1.TidbCluster, index string) *corev1.PersistentVolumeClai Namespace: corev1.NamespaceDefault, UID: types.UID("pvc" + index), Labels: map[string]string{ - label.NameLabelKey: controller.TestName, - label.ComponentLabelKey: controller.TestComponentName, + label.NameLabelKey: nameLabel, + label.ComponentLabelKey: componentLabel, label.ManagedByLabelKey: controller.TestManagedByName, - label.InstanceLabelKey: tc.GetName(), + label.InstanceLabelKey: obj.GetName(), }, }, Spec: corev1.PersistentVolumeClaimSpec{ From 78f0e01b7b41321868328e204f3e5dc4fcc6763f Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Sun, 27 Sep 2020 20:29:48 +0800 Subject: [PATCH 7/7] fix lint error --- pkg/manager/member/dm_worker_scaler_test.go | 72 +++++++++------------ 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/pkg/manager/member/dm_worker_scaler_test.go b/pkg/manager/member/dm_worker_scaler_test.go index b9f855b3271..23a38eb51cc 100644 --- a/pkg/manager/member/dm_worker_scaler_test.go +++ b/pkg/manager/member/dm_worker_scaler_test.go @@ -156,15 +156,13 @@ func TestWorkerScalerScaleOut(t *testing.T) { func TestWorkerScalerScaleIn(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { - name string - hasPVC bool - pvcUpdateErr bool - deleteMemberErr bool - statusSyncFailed bool - err bool - changed bool - isMemberStillRemain bool - isLeader bool + name string + hasPVC bool + pvcUpdateErr bool + statusSyncFailed bool + err bool + changed bool + isLeader bool } testFn := func(test testcase, t *testing.T) { @@ -204,44 +202,36 @@ func TestWorkerScalerScaleIn(t *testing.T) { tests := []testcase{ { - name: "normal", - hasPVC: true, - pvcUpdateErr: false, - deleteMemberErr: false, - statusSyncFailed: false, - err: false, - changed: true, - isMemberStillRemain: false, + name: "normal", + hasPVC: true, + pvcUpdateErr: false, + statusSyncFailed: false, + err: false, + changed: true, }, { - name: "cache don't have pvc", - hasPVC: false, - pvcUpdateErr: false, - deleteMemberErr: false, - statusSyncFailed: false, - err: true, - changed: false, - isMemberStillRemain: false, + name: "cache don't have pvc", + hasPVC: false, + pvcUpdateErr: false, + statusSyncFailed: false, + err: true, + changed: false, }, { - name: "error when update pvc", - hasPVC: true, - pvcUpdateErr: true, - deleteMemberErr: false, - statusSyncFailed: false, - err: true, - changed: false, - isMemberStillRemain: false, + name: "error when update pvc", + hasPVC: true, + pvcUpdateErr: true, + statusSyncFailed: false, + err: true, + changed: false, }, { - name: "dm-worker status sync failed", - hasPVC: true, - pvcUpdateErr: false, - deleteMemberErr: false, - statusSyncFailed: true, - err: true, - changed: false, - isMemberStillRemain: false, + name: "dm-worker status sync failed", + hasPVC: true, + pvcUpdateErr: false, + statusSyncFailed: true, + err: true, + changed: false, }, }