Skip to content

Commit

Permalink
Merge pull request kubernetes#103405 from ravisantoshgudimetla/automa…
Browse files Browse the repository at this point in the history
…ted-cherry-pick-of-#103160-upstream-release-1.21

Automated cherry pick of kubernetes#103160: Remove extra zone test
  • Loading branch information
k8s-ci-robot authored Aug 6, 2021
2 parents fbf1915 + aac1ab5 commit 25c25b6
Showing 1 changed file with 0 additions and 150 deletions.
150 changes: 0 additions & 150 deletions test/e2e/storage/ubernetes_lite_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,13 @@ package storage
import (
"context"
"fmt"
"strconv"

"github.com/onsi/ginkgo"
compute "google.golang.org/api/compute/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils"
Expand All @@ -57,130 +50,8 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() {
ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() {
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
})

ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() {
OnlyAllowNodeZones(f, zoneCount, image)
})
})

// OnlyAllowNodeZones tests that PDs are only provisioned in zones with nodes.
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)

// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
framework.ExpectNoError(err)
framework.Logf("Expected zones: %v", expectedZones)

// Get all the zones in this current region
region := gceCloud.Region()
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
framework.ExpectNoError(err)

var extraZone string
for _, zone := range allZonesInRegion {
if !expectedZones.Has(zone.Name) {
extraZone = zone.Name
break
}
}

if extraZone == "" {
e2eskipper.Skipf("All zones in region %s have compute instances, no extra zones available", region)
}

ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
project := framework.TestContext.CloudConfig.ProjectID
zone := extraZone
myuuid := string(uuid.NewUUID())
name := "compute-" + myuuid
imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"

rb := &compute.Instance{
MachineType: "zones/" + zone + "/machineTypes/f1-micro",
Disks: []*compute.AttachedDisk{
{
AutoDelete: true,
Boot: true,
Type: "PERSISTENT",
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: "my-root-pd-" + myuuid,
SourceImage: imageURL,
},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
AccessConfigs: []*compute.AccessConfig{
{
Type: "ONE_TO_ONE_NAT",
Name: "External NAT",
},
},
Network: "/global/networks/default",
},
},
Name: name,
}

err = gceCloud.InsertInstance(project, zone, rb)
framework.ExpectNoError(err)

defer func() {
// Teardown of the compute instance
framework.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
framework.ExpectNoError(err)
}()

ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
// Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
// This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
var pvcList []*v1.PersistentVolumeClaim
c := f.ClientSet
ns := f.Namespace.Name

for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = e2epv.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err)
pvcList = append(pvcList, pvc)

// Defer the cleanup
defer func() {
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
if err != nil {
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
}

// Wait for all claims bound
for _, claim := range pvcList {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}

pvZones := sets.NewString()
ginkgo.By("Checking that PDs have been provisioned in only the expected zones")
for _, claim := range pvcList {
// Get a new copy of the claim to have all fields populated
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)

// Get the related PV
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)

pvZone, ok := pv.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone]
framework.ExpectEqual(ok, true, "PV has no LabelZone to be found")
pvZones.Insert(pvZone)
}
framework.ExpectEqual(pvZones.Equal(expectedZones), true, fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
}

// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := e2enode.GetClusterZones(c)
Expand Down Expand Up @@ -261,24 +132,3 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
framework.ExpectNoError(err)
}
}

func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-" + strconv.Itoa(index),
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}

return &claim
}

0 comments on commit 25c25b6

Please sign in to comment.