Skip to content

Commit

Permalink
fix e2e: add context
Browse files Browse the repository at this point in the history
  • Loading branch information
cvvz committed Apr 24, 2023
1 parent 9c903ce commit 3ad423d
Show file tree
Hide file tree
Showing 25 changed files with 390 additions and 364 deletions.
147 changes: 73 additions & 74 deletions test/e2e/dynamic_provisioning_test.go

Large diffs are not rendered by default.

37 changes: 18 additions & 19 deletions test/e2e/pre_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package e2e

import (
"context"
"fmt"

"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"
Expand Down Expand Up @@ -50,7 +49,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
skipManuallyDeletingVolume bool
)

ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) {
checkPodsRestart := testCmd{
command: "bash",
args: []string{"test/utils/check_driver_pods_restart.sh"},
Expand All @@ -64,24 +63,24 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
testDriver = driver.InitAzureFileDriver()
})

ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx ginkgo.SpecContext) {
if !skipManuallyDeletingVolume {
req := &csi.DeleteVolumeRequest{
VolumeId: volumeID,
}
_, err := azurefileDriver.DeleteVolume(context.Background(), req)
_, err := azurefileDriver.DeleteVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume %q error: %v", volumeID, err))
}
}
})

ginkgo.It("should use a pre-provisioned volume and mount it as readOnly in a pod [file.csi.azure.com] [Windows]", func() {
ginkgo.It("should use a pre-provisioned volume and mount it as readOnly in a pod [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) {
// Az tests are not yet working for in-tree
skipIfUsingInTreeVolumePlugin()

req := makeCreateVolumeReq("pre-provisioned-readonly", ns.Name)
resp, err := azurefileDriver.CreateVolume(context.Background(), req)
resp, err := azurefileDriver.CreateVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
Expand Down Expand Up @@ -111,15 +110,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
CSIDriver: testDriver,
Pods: pods,
}
test.Run(cs, ns)
test.Run(ctx, cs, ns)
})

ginkgo.It("should use a pre-provisioned volume and mount it by multiple pods [file.csi.azure.com] [Windows]", func() {
ginkgo.It("should use a pre-provisioned volume and mount it by multiple pods [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) {
// Az tests are not yet working for in-tree
skipIfUsingInTreeVolumePlugin()

req := makeCreateVolumeReq("pre-provisioned-multiple-pods", ns.Name)
resp, err := azurefileDriver.CreateVolume(context.Background(), req)
resp, err := azurefileDriver.CreateVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
Expand Down Expand Up @@ -152,15 +151,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
CSIDriver: testDriver,
Pods: pods,
}
test.Run(cs, ns)
test.Run(ctx, cs, ns)
})

ginkgo.It(fmt.Sprintf("should use a pre-provisioned volume and retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func() {
ginkgo.It(fmt.Sprintf("should use a pre-provisioned volume and retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) {
// Az tests are not yet working for in tree driver
skipIfUsingInTreeVolumePlugin()

req := makeCreateVolumeReq("pre-provisioned-retain-reclaimpolicy", ns.Name)
resp, err := azurefileDriver.CreateVolume(context.Background(), req)
resp, err := azurefileDriver.CreateVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
Expand All @@ -180,15 +179,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
CSIDriver: testDriver,
Volumes: volumes,
}
test.Run(cs, ns)
test.Run(ctx, cs, ns)
})

ginkgo.It("should use existing credentials in k8s cluster [file.csi.azure.com] [Windows]", func() {
ginkgo.It("should use existing credentials in k8s cluster [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) {
// Az tests are not yet working for in tree driver
skipIfUsingInTreeVolumePlugin()

req := makeCreateVolumeReq("pre-provisioned-existing-credentials", ns.Name)
resp, err := azurefileDriver.CreateVolume(context.Background(), req)
resp, err := azurefileDriver.CreateVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
Expand Down Expand Up @@ -224,15 +223,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
Pods: pods,
Azurefile: azurefileDriver,
}
test.Run(cs, ns)
test.Run(ctx, cs, ns)
})

ginkgo.It("should use provided credentials [file.csi.azure.com] [Windows]", func() {
ginkgo.It("should use provided credentials [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) {
// Az tests are not yet working for in tree driver
skipIfUsingInTreeVolumePlugin()

req := makeCreateVolumeReq("pre-provisioned-provided-credentials", ns.Name)
resp, err := azurefileDriver.CreateVolume(context.Background(), req)
resp, err := azurefileDriver.CreateVolume(ctx, req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
Expand Down Expand Up @@ -269,7 +268,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() {
Pods: pods,
Azurefile: azurefileDriver,
}
test.Run(cs, ns)
test.Run(ctx, cs, ns)
})
})

Expand Down
12 changes: 6 additions & 6 deletions test/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ type testCmd struct {
endLog string
}

var _ = ginkgo.BeforeSuite(func() {
var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) {
log.Println(driver.AzureDriverNameVar, os.Getenv(driver.AzureDriverNameVar), fmt.Sprintf("%v", isUsingInTreeVolumePlugin))
log.Println(testMigrationEnvVar, os.Getenv(testMigrationEnvVar), fmt.Sprintf("%v", isTestingMigration))
log.Println(testWindowsEnvVar, os.Getenv(testWindowsEnvVar), fmt.Sprintf("%v", isWindowsCluster))
Expand All @@ -94,7 +94,7 @@ var _ = ginkgo.BeforeSuite(func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
azureClient, err := azure.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = azureClient.EnsureResourceGroup(context.Background(), creds.ResourceGroup, creds.Location, nil)
_, err = azureClient.EnsureResourceGroup(ctx, creds.ResourceGroup, creds.Location, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

// check whether current region supports Premium_ZRS with NFS protocol
Expand Down Expand Up @@ -145,7 +145,7 @@ var _ = ginkgo.BeforeSuite(func() {
}
})

var _ = ginkgo.AfterSuite(func() {
var _ = ginkgo.AfterSuite(func(ctx ginkgo.SpecContext) {
if testutil.IsRunningInProw() {
if isTestingMigration || isUsingInTreeVolumePlugin {
cmLog := testCmd{
Expand Down Expand Up @@ -221,7 +221,7 @@ var _ = ginkgo.AfterSuite(func() {
execTestCmd([]testCmd{installDriver, uninstallDriver})
}

checkAccountCreationLeak()
checkAccountCreationLeak(ctx)

err := credentials.DeleteAzureCredentialFile()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -263,13 +263,13 @@ func execTestCmd(cmds []testCmd) {
}
}

func checkAccountCreationLeak() {
func checkAccountCreationLeak(ctx context.Context) {
creds, err := credentials.CreateAzureCredentialFile(false)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
azureClient, err := azure.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

accountNum, err := azureClient.GetAccountNumByResourceGroup(context.TODO(), creds.ResourceGroup)
accountNum, err := azureClient.GetAccountNumByResourceGroup(ctx, creds.ResourceGroup)
framework.ExpectNoError(err, fmt.Sprintf("failed to GetAccountNumByResourceGroup(%s): %v", creds.ResourceGroup, err))
ginkgo.By(fmt.Sprintf("GetAccountNumByResourceGroup(%s) returns %d accounts", creds.ResourceGroup, accountNum))

Expand Down
17 changes: 9 additions & 8 deletions test/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package testsuites

import (
"context"
"fmt"

"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"
Expand All @@ -36,25 +37,25 @@ type DynamicallyProvisionedCmdVolumeTest struct {
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
func (t *DynamicallyProvisionedCmdVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
defer cleanup[i](ctx)
}

ginkgo.By("deploying the pod")
tpod.Create()
defer tpod.Cleanup()
tpod.Create(ctx)
defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error")
if pod.WinServerVer == "windows-2022" {
if err := e2epod.WaitForPodSuccessInNamespaceSlow(tpod.client, tpod.pod.Name, tpod.namespace.Name); err != nil {
if err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, tpod.client, tpod.pod.Name, tpod.namespace.Name); err != nil {
ginkgo.By(fmt.Sprintf("hit error(%v) in first run, give another try", err))
}
tpod.WaitForSuccess()
tpod.WaitForSuccess(ctx)
} else {
tpod.WaitForSuccess()
tpod.WaitForSuccess(ctx)
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ limitations under the License.
package testsuites

import (
"context"

"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"

"github.com/onsi/ginkgo/v2"
Expand All @@ -34,24 +36,24 @@ type DynamicallyProvisionedCollocatedPodTest struct {
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) {
func (t *DynamicallyProvisionedCollocatedPodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
nodeName := ""
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
if t.ColocatePods && nodeName != "" {
tpod.SetNodeSelector(map[string]string{"name": nodeName})
}
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
defer cleanup[i](ctx)
}

ginkgo.By("deploying the pod")
tpod.Create()
defer tpod.Cleanup()
tpod.Create(ctx)
defer tpod.Cleanup(ctx)

ginkgo.By("checking that the pod is running")
tpod.WaitForRunning()
tpod.WaitForRunning(ctx)
nodeName = tpod.pod.Spec.NodeName
}

Expand Down
15 changes: 8 additions & 7 deletions test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package testsuites

import (
"context"
"fmt"
"time"

Expand All @@ -42,18 +43,18 @@ type PodExecCheck struct {
ExpectedString string
}

func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) {
tDeployment, cleanup, _ := t.Pod.SetupDeployment(client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters)
func (t *DynamicallyProvisionedDeletePodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
tDeployment, cleanup, _ := t.Pod.SetupDeployment(ctx, client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
defer cleanup[i](ctx)
}

ginkgo.By("deploying the deployment")
tDeployment.Create()
tDeployment.Create(ctx)

ginkgo.By("checking that the pod is running")
tDeployment.WaitForPodReady()
tDeployment.WaitForPodReady(ctx)

if t.PodCheck != nil {
time.Sleep(time.Second)
Expand All @@ -64,10 +65,10 @@ func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, na
// repeat to make sure mount/unmount is stable
for i := 0; i < 10; i++ {
ginkgo.By(fmt.Sprintf("deleting the pod for deployment, %d times", i))
tDeployment.DeletePodAndWait()
tDeployment.DeletePodAndWait(ctx)

ginkgo.By(fmt.Sprintf("checking again that the pod is running, %d times", i))
tDeployment.WaitForPodReady()
tDeployment.WaitForPodReady(ctx)
}

if t.PodCheck != nil {
Expand Down
10 changes: 6 additions & 4 deletions test/e2e/testsuites/dynamically_provisioned_inline_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ limitations under the License.
package testsuites

import (
"context"

"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"

"github.com/onsi/ginkgo/v2"
Expand All @@ -37,7 +39,7 @@ type DynamicallyProvisionedInlineVolumeTest struct {
CSIInlineVolume bool
}

func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
func (t *DynamicallyProvisionedInlineVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
var tpod *TestPod
var cleanup []func()
Expand All @@ -53,9 +55,9 @@ func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface,
}

ginkgo.By("deploying the pod")
tpod.Create()
defer tpod.Cleanup()
tpod.Create(ctx)
defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess()
tpod.WaitForSuccess(ctx)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ limitations under the License.
package testsuites

import (
"context"

"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"

"github.com/onsi/ginkgo/v2"
Expand All @@ -32,18 +34,18 @@ type DynamicallyProvisionedInvalidMountOptions struct {
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedInvalidMountOptions) Run(client clientset.Interface, namespace *v1.Namespace) {
func (t *DynamicallyProvisionedInvalidMountOptions) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
defer cleanup[i](ctx)
}

ginkgo.By("deploying the pod")
tpod.Create()
defer tpod.Cleanup()
tpod.Create(ctx)
defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pod has 'FailedMount' event")
tpod.WaitForFailedMountError()
tpod.WaitForFailedMountError(ctx)
}
}
Loading

0 comments on commit 3ad423d

Please sign in to comment.