diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index 75270e1ed0..063b3684a8 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -17,7 +17,6 @@ limitations under the License. package e2e import ( - "context" "fmt" "log" "os" @@ -51,7 +50,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { testDriver driver.PVTestDriver ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) { checkPodsRestart := testCmd{ command: "bash", args: []string{"test/utils/check_driver_pods_restart.sh"}, @@ -72,7 +71,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { testDriver = driver.InitAzureFileDriver() - ginkgo.It("should create a storage account with tags [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a storage account with tags [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { // Because the pv object created by kubernetes.io/azure-file does not contain storage account name, skip the test with in-tree volume plugin. skipIfUsingInTreeVolumePlugin() @@ -100,10 +99,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Tags: tags, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand with mount options [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a volume on demand with mount options [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -146,10 +145,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a smb multi-channel volume with max_channels options [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a smb multi-channel volume with max_channels options [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() if !isCapzTest { ginkgo.Skip("test case is only available for capz test") @@ -183,10 +182,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod with volume mount subpath [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a pod with volume mount subpath [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() pods := []testsuites.PodDetails{ @@ -218,10 +217,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done"), @@ -270,11 +269,11 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ColocatePods: true, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) // Track issue https://github.com/kubernetes/kubernetes/issues/70505 - ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("touch /mnt/test-1/data"), @@ -305,10 +304,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { pod := testsuites.PodDetails{ Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done"), Volumes: []testsuites.VolumeDetails{ @@ -342,10 +341,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ExpectedString: expectedString, // pod will be restarted so expect to see 2 instances of string }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimDelete), func() { + ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { reclaimPolicy := v1.PersistentVolumeReclaimDelete volumes := []testsuites.VolumeDetails{ { @@ -366,10 +365,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Volumes: volumes, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func() { + ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { // This tests uses the CSI driver to delete the PV. // TODO: Go via the k8s interfaces and also make it more reliable for in-tree and then // test can be enabled. @@ -391,10 +390,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { "skuName": "Premium_LRS", }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand and resize it [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a volume on demand and resize it [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -423,10 +422,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a vhd disk volume on demand [kubernetes.io/azure-file] [file.csi.azure.com][disk]", func() { + ginkgo.It("should create a vhd disk volume on demand [kubernetes.io/azure-file] [file.csi.azure.com][disk]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -449,10 +448,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: map[string]string{"skuName": "Standard_LRS", "fsType": "ext4"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should receive FailedMount event with invalid mount options [file.csi.azure.com] [disk]", func() { + ginkgo.It("should receive FailedMount event with invalid mount options [file.csi.azure.com] [disk]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -480,10 +479,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: map[string]string{"skuName": "Premium_LRS", "fsType": "ext4"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should receive FailedMount event with invalid mount options [file.csi.azure.com] [disk]", func() { + ginkgo.It("should receive FailedMount event with invalid mount options [file.csi.azure.com] [disk]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -511,10 +510,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: map[string]string{"skuName": "Premium_LRS", "fsType": "ext4"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [file.csi.azure.com][disk]", func() { + ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [file.csi.azure.com][disk]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -554,11 +553,11 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ColocatePods: true, StorageClassParameters: map[string]string{"skuName": "Premium_LRS", "fsType": "xfs"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) // Track issue https://github.com/kubernetes/kubernetes/issues/70505 - ginkgo.It("should create a vhd disk volume on demand and mount it as readOnly in a pod [file.csi.azure.com][disk]", func() { + ginkgo.It("should create a vhd disk volume on demand and mount it as readOnly in a pod [file.csi.azure.com][disk]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -583,10 +582,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: map[string]string{"skuName": "Premium_LRS", "fsType": "ext3"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [file.csi.azure.com] [disk]", func() { + ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [file.csi.azure.com] [disk]", func(ctx ginkgo.SpecContext) { ginkgo.Skip("test case is disabled due to controller.attachRequired is disabled by default now") skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -613,10 +612,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { }, StorageClassParameters: map[string]string{"skuName": "Standard_LRS", "fsType": "xfs"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [file.csi.azure.com] [disk]", v1.PersistentVolumeReclaimDelete), func() { + ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [file.csi.azure.com] [disk]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -633,10 +632,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Volumes: volumes, StorageClassParameters: map[string]string{"skuName": "Standard_RAGRS", "fsType": "ext2"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("[env] should retain PV with reclaimPolicy %q [file.csi.azure.com] [disk]", v1.PersistentVolumeReclaimRetain), func() { + ginkgo.It(fmt.Sprintf("[env] should retain PV with reclaimPolicy %q [file.csi.azure.com] [disk]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { // This tests uses the CSI driver to delete the PV. // TODO: Go via the k8s interfaces and also make it more reliable for in-tree and then // test can be enabled. @@ -657,10 +656,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Azurefile: azurefileDriver, StorageClassParameters: map[string]string{"skuName": "Premium_LRS", "fsType": "xfs"}, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod with multiple volumes [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a pod with multiple volumes [kubernetes.io/azure-file] [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() volumes := []testsuites.VolumeDetails{} @@ -687,10 +686,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { CSIDriver: testDriver, Pods: pods, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod, write and read to it, take a volume snapshot, and validate whether it is ready to use [file.csi.azure.com]", func() { + ginkgo.It("should create a pod, write and read to it, take a volume snapshot, and validate whether it is ready to use [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() skipIfUsingInTreeVolumePlugin() @@ -716,10 +715,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { PodWithSnapshot: podWithSnapshot, StorageClassParameters: map[string]string{"skuName": "Standard_LRS"}, } - test.Run(cs, snapshotrcs, ns) + test.Run(ctx, cs, snapshotrcs, ns) }) - ginkgo.It("should create a volume on demand with mount options (Bring Your Own Key) [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a volume on demand with mount options (Bring Your Own Key) [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() // get storage account secret name err := os.Chdir("../..") @@ -772,10 +771,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: bringKeyStorageClassParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a Premium_LRS volume on demand with useDataPlaneAPI [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a Premium_LRS volume on demand with useDataPlaneAPI [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() pods := []testsuites.PodDetails{ @@ -818,10 +817,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a Standard_LRS volume on demand with disableDeleteRetentionPolicy [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should create a Standard_LRS volume on demand with disableDeleteRetentionPolicy [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() pods := []testsuites.PodDetails{ @@ -863,10 +862,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a statefulset object, write and read to it, delete the pod and write and read to it again [file.csi.azure.com]", func() { + ginkgo.It("should create a statefulset object, write and read to it, delete the pod and write and read to it again [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() pod := testsuites.PodDetails{ @@ -899,10 +898,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ExpectedString: expectedString, // pod will be restarted so expect to see 2 instances of string }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should be able to unmount smb volume if volume is already deleted [file.csi.azure.com]", func() { + ginkgo.It("should be able to unmount smb volume if volume is already deleted [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() pod := testsuites.PodDetails{ @@ -935,10 +934,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ExpectedString: expectedString, // pod will be restarted so expect to see 2 instances of string }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should be able to unmount nfs volume if volume is already deleted [file.csi.azure.com]", func() { + ginkgo.It("should be able to unmount nfs volume if volume is already deleted [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -975,10 +974,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { "protocol": "nfs", }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create an CSI inline volume [file.csi.azure.com]", func() { + ginkgo.It("should create an CSI inline volume [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() // get storage account secret name @@ -1008,7 +1007,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { shareName := "csi-inline-smb-volume" req := makeCreateVolumeReq(shareName, ns.Name) req.Parameters["storageAccount"] = accountName - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -1047,10 +1046,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ReadOnly: false, CSIInlineVolume: true, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create an inline volume by in-tree driver [kubernetes.io/azure-file]", func() { + ginkgo.It("should create an inline volume by in-tree driver [kubernetes.io/azure-file]", func(ctx ginkgo.SpecContext) { if !isTestingMigration { ginkgo.Skip("test case is only available for migration test") } @@ -1081,7 +1080,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { shareName := "intree-inline-smb-volume" req := makeCreateVolumeReq("intree-inline-smb-volume", ns.Name) req.Parameters["storageAccount"] = accountName - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -1121,10 +1120,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ShareName: shareName, ReadOnly: false, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should mount on-prem smb server [file.csi.azure.com]", func() { + ginkgo.It("should mount on-prem smb server [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() if isWindowsCluster && isCapzTest { ginkgo.Skip("test case is not available for capz Windows test") @@ -1135,8 +1134,8 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { secreteData := map[string]string{"azurestorageaccountname": "USERNAME"} secreteData["azurestorageaccountkey"] = "PASSWORD" tsecret := testsuites.NewTestSecret(f.ClientSet, ns, secretName, secreteData) - tsecret.Create() - defer tsecret.Cleanup() + tsecret.Create(ctx) + defer tsecret.Cleanup(ctx) server := "smb-server.default.svc.cluster.local" if isWindowsCluster { @@ -1194,10 +1193,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ReadOnly: false, CSIInlineVolume: true, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a NFS volume on demand with mount options [file.csi.azure.com] [nfs]", func() { + ginkgo.It("should create a NFS volume on demand with mount options [file.csi.azure.com] [nfs]", func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() skipIfUsingInTreeVolumePlugin() @@ -1232,10 +1231,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { "mountPermissions": "0755", }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a NFS volume on demand on a storage account with private endpoint [file.csi.azure.com] [nfs]", func() { + ginkgo.It("should create a NFS volume on demand on a storage account with private endpoint [file.csi.azure.com] [nfs]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -1273,10 +1272,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { Pods: pods, StorageClassParameters: scParameters, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod with multiple NFS volumes [file.csi.azure.com]", func() { + ginkgo.It("should create a pod with multiple NFS volumes [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() skipIfUsingInTreeVolumePlugin() @@ -1312,10 +1311,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { if supportZRSwithNFS { test.StorageClassParameters["skuName"] = "Premium_ZRS" } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("smb volume mount is still valid after driver restart [file.csi.azure.com]", func() { + ginkgo.It("smb volume mount is still valid after driver restart [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() // print azure file driver logs before driver restart @@ -1367,10 +1366,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { execTestCmd([]testCmd{restartDriver}) }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("nfs volume mount is still valid after driver restart [file.csi.azure.com]", func() { + ginkgo.It("nfs volume mount is still valid after driver restart [file.csi.azure.com]", func(ctx ginkgo.SpecContext) { skipIfUsingInTreeVolumePlugin() skipIfTestingInWindowsCluster() @@ -1410,7 +1409,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { execTestCmd([]testCmd{restartDriver}) }, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) }) diff --git a/test/e2e/pre_provisioning_test.go b/test/e2e/pre_provisioning_test.go index 8c7c87b984..7a4d641d01 100644 --- a/test/e2e/pre_provisioning_test.go +++ b/test/e2e/pre_provisioning_test.go @@ -17,7 +17,6 @@ limitations under the License. package e2e import ( - "context" "fmt" "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" @@ -50,7 +49,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { skipManuallyDeletingVolume bool ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) { checkPodsRestart := testCmd{ command: "bash", args: []string{"test/utils/check_driver_pods_restart.sh"}, @@ -64,24 +63,24 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { testDriver = driver.InitAzureFileDriver() }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { if !skipManuallyDeletingVolume { req := &csi.DeleteVolumeRequest{ VolumeId: volumeID, } - _, err := azurefileDriver.DeleteVolume(context.Background(), req) + _, err := azurefileDriver.DeleteVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume %q error: %v", volumeID, err)) } } }) - ginkgo.It("should use a pre-provisioned volume and mount it as readOnly in a pod [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should use a pre-provisioned volume and mount it as readOnly in a pod [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { // Az tests are not yet working for in-tree skipIfUsingInTreeVolumePlugin() req := makeCreateVolumeReq("pre-provisioned-readonly", ns.Name) - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -111,15 +110,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { CSIDriver: testDriver, Pods: pods, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should use a pre-provisioned volume and mount it by multiple pods [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should use a pre-provisioned volume and mount it by multiple pods [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { // Az tests are not yet working for in-tree skipIfUsingInTreeVolumePlugin() req := makeCreateVolumeReq("pre-provisioned-multiple-pods", ns.Name) - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -152,15 +151,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { CSIDriver: testDriver, Pods: pods, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should use a pre-provisioned volume and retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func() { + ginkgo.It(fmt.Sprintf("should use a pre-provisioned volume and retain PV with reclaimPolicy %q [file.csi.azure.com] [Windows]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { // Az tests are not yet working for in tree driver skipIfUsingInTreeVolumePlugin() req := makeCreateVolumeReq("pre-provisioned-retain-reclaimpolicy", ns.Name) - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -180,15 +179,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { CSIDriver: testDriver, Volumes: volumes, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should use existing credentials in k8s cluster [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should use existing credentials in k8s cluster [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { // Az tests are not yet working for in tree driver skipIfUsingInTreeVolumePlugin() req := makeCreateVolumeReq("pre-provisioned-existing-credentials", ns.Name) - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -224,15 +223,15 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { Pods: pods, Azurefile: azurefileDriver, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) - ginkgo.It("should use provided credentials [file.csi.azure.com] [Windows]", func() { + ginkgo.It("should use provided credentials [file.csi.azure.com] [Windows]", func(ctx ginkgo.SpecContext) { // Az tests are not yet working for in tree driver skipIfUsingInTreeVolumePlugin() req := makeCreateVolumeReq("pre-provisioned-provided-credentials", ns.Name) - resp, err := azurefileDriver.CreateVolume(context.Background(), req) + resp, err := azurefileDriver.CreateVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("create volume error: %v", err)) } @@ -269,7 +268,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { Pods: pods, Azurefile: azurefileDriver, } - test.Run(cs, ns) + test.Run(ctx, cs, ns) }) }) diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index be758798c6..e5c6496912 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -72,7 +72,7 @@ type testCmd struct { endLog string } -var _ = ginkgo.BeforeSuite(func() { +var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { log.Println(driver.AzureDriverNameVar, os.Getenv(driver.AzureDriverNameVar), fmt.Sprintf("%v", isUsingInTreeVolumePlugin)) log.Println(testMigrationEnvVar, os.Getenv(testMigrationEnvVar), fmt.Sprintf("%v", isTestingMigration)) log.Println(testWindowsEnvVar, os.Getenv(testWindowsEnvVar), fmt.Sprintf("%v", isWindowsCluster)) @@ -94,7 +94,7 @@ var _ = ginkgo.BeforeSuite(func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) azureClient, err := azure.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = azureClient.EnsureResourceGroup(context.Background(), creds.ResourceGroup, creds.Location, nil) + _, err = azureClient.EnsureResourceGroup(ctx, creds.ResourceGroup, creds.Location, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // check whether current region supports Premium_ZRS with NFS protocol @@ -145,7 +145,7 @@ var _ = ginkgo.BeforeSuite(func() { } }) -var _ = ginkgo.AfterSuite(func() { +var _ = ginkgo.AfterSuite(func(ctx ginkgo.SpecContext) { if testutil.IsRunningInProw() { if isTestingMigration || isUsingInTreeVolumePlugin { cmLog := testCmd{ @@ -221,7 +221,7 @@ var _ = ginkgo.AfterSuite(func() { execTestCmd([]testCmd{installDriver, uninstallDriver}) } - checkAccountCreationLeak() + checkAccountCreationLeak(ctx) err := credentials.DeleteAzureCredentialFile() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -263,13 +263,13 @@ func execTestCmd(cmds []testCmd) { } } -func checkAccountCreationLeak() { +func checkAccountCreationLeak(ctx context.Context) { creds, err := credentials.CreateAzureCredentialFile(false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) azureClient, err := azure.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - accountNum, err := azureClient.GetAccountNumByResourceGroup(context.TODO(), creds.ResourceGroup) + accountNum, err := azureClient.GetAccountNumByResourceGroup(ctx, creds.ResourceGroup) framework.ExpectNoError(err, fmt.Sprintf("failed to GetAccountNumByResourceGroup(%s): %v", creds.ResourceGroup, err)) ginkgo.By(fmt.Sprintf("GetAccountNumByResourceGroup(%s) returns %d accounts", creds.ResourceGroup, accountNum)) diff --git a/test/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go index f8d1afefa4..8c0b9a0462 100644 --- a/test/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" @@ -36,25 +37,25 @@ type DynamicallyProvisionedCmdVolumeTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedCmdVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") if pod.WinServerVer == "windows-2022" { - if err := e2epod.WaitForPodSuccessInNamespaceSlow(tpod.client, tpod.pod.Name, tpod.namespace.Name); err != nil { + if err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, tpod.client, tpod.pod.Name, tpod.namespace.Name); err != nil { ginkgo.By(fmt.Sprintf("hit error(%v) in first run, give another try", err)) } - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } else { - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } } diff --git a/test/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/test/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go index c1e309e7a7..b21206ad09 100644 --- a/test/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -34,24 +36,24 @@ type DynamicallyProvisionedCollocatedPodTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedCollocatedPodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { nodeName := "" for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) if t.ColocatePods && nodeName != "" { tpod.SetNodeSelector(map[string]string{"name": nodeName}) } // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pod is running") - tpod.WaitForRunning() + tpod.WaitForRunning(ctx) nodeName = tpod.pod.Spec.NodeName } diff --git a/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go index 449a96eb0c..504aea2b17 100644 --- a/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "time" @@ -42,18 +43,18 @@ type PodExecCheck struct { ExpectedString string } -func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) { - tDeployment, cleanup, _ := t.Pod.SetupDeployment(client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) +func (t *DynamicallyProvisionedDeletePodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { + tDeployment, cleanup, _ := t.Pod.SetupDeployment(ctx, client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the deployment") - tDeployment.Create() + tDeployment.Create(ctx) ginkgo.By("checking that the pod is running") - tDeployment.WaitForPodReady() + tDeployment.WaitForPodReady(ctx) if t.PodCheck != nil { time.Sleep(time.Second) @@ -64,10 +65,10 @@ func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, na // repeat to make sure mount/unmount is stable for i := 0; i < 10; i++ { ginkgo.By(fmt.Sprintf("deleting the pod for deployment, %d times", i)) - tDeployment.DeletePodAndWait() + tDeployment.DeletePodAndWait(ctx) ginkgo.By(fmt.Sprintf("checking again that the pod is running, %d times", i)) - tDeployment.WaitForPodReady() + tDeployment.WaitForPodReady(ctx) } if t.PodCheck != nil { diff --git a/test/e2e/testsuites/dynamically_provisioned_inline_volume.go b/test/e2e/testsuites/dynamically_provisioned_inline_volume.go index effd0b6ea1..30d8892697 100644 --- a/test/e2e/testsuites/dynamically_provisioned_inline_volume.go +++ b/test/e2e/testsuites/dynamically_provisioned_inline_volume.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -37,7 +39,7 @@ type DynamicallyProvisionedInlineVolumeTest struct { CSIInlineVolume bool } -func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedInlineVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { var tpod *TestPod var cleanup []func() @@ -53,9 +55,9 @@ func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } diff --git a/test/e2e/testsuites/dynamically_provisioned_invalid_mount_options.go b/test/e2e/testsuites/dynamically_provisioned_invalid_mount_options.go index b6955bd287..38affca1ca 100644 --- a/test/e2e/testsuites/dynamically_provisioned_invalid_mount_options.go +++ b/test/e2e/testsuites/dynamically_provisioned_invalid_mount_options.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -32,18 +34,18 @@ type DynamicallyProvisionedInvalidMountOptions struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedInvalidMountOptions) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedInvalidMountOptions) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pod has 'FailedMount' event") - tpod.WaitForFailedMountError() + tpod.WaitForFailedMountError(ctx) } } diff --git a/test/e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go b/test/e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go index 01cf877dbb..b470321830 100644 --- a/test/e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go +++ b/test/e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -34,18 +36,18 @@ type DynamicallyProvisionedPodWithMultiplePVsTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedPodWithMultiplePVsTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedPodWithMultiplePVsTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicMultipleVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicMultipleVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } diff --git a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go index b91e3ac761..620b6ca392 100644 --- a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "strings" @@ -37,7 +38,7 @@ type DynamicallyProvisionedReadOnlyVolumeTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { expectedReadOnlyLog := "Read-only file system" if pod.IsWindows { @@ -45,19 +46,19 @@ func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interfac } permissionDeniedLog := "Permission denied" - tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with an error") - tpod.WaitForFailure() + tpod.WaitForFailure(ctx) ginkgo.By("checking that pod logs contain expected message") - body, err := tpod.Logs() + body, err := tpod.Logs(ctx) framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) hasReadOnlyLog := strings.Contains(string(body), expectedReadOnlyLog) || strings.Contains(string(body), permissionDeniedLog) framework.ExpectEqual(hasReadOnlyLog, true, fmt.Sprintf("expected substring: %s or %s, current returned logs: %s", expectedReadOnlyLog, permissionDeniedLog, string(body))) diff --git a/test/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/test/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go index b3042706c2..fea6e57d4e 100644 --- a/test/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/pkg/azurefile" "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" @@ -33,18 +35,18 @@ type DynamicallyProvisionedReclaimPolicyTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedReclaimPolicyTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, volume := range t.Volumes { - tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // will delete the PVC // will also wait for PV to be deleted when reclaimPolicy=Delete - tpvc.Cleanup() + tpvc.Cleanup(ctx) // first check PV stills exists, then manually delete it if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { - tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased) - tpvc.DeleteBoundPersistentVolume() - tpvc.DeleteBackingVolume(t.Azurefile) + tpvc.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased) + tpvc.DeleteBoundPersistentVolume(ctx) + tpvc.DeleteBackingVolume(ctx, t.Azurefile) } } } diff --git a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go index 075552b4f9..96fbba5ec3 100644 --- a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go @@ -59,21 +59,21 @@ func getStorageAccountName(secretName string) (string, error) { return matches[1], nil } -func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) pvcName := tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName - pvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(context.TODO(), pvcName, metav1.GetOptions{}) + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, fmt.Sprintf("fail to get original pvc(%s): %v", pvcName, err)) } @@ -85,7 +85,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, pvc.Spec.Resources.Requests["storage"] = originalSize ginkgo.By("resizing the pvc") - updatedPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Update(context.TODO(), pvc, metav1.UpdateOptions{}) + updatedPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Update(ctx, pvc, metav1.UpdateOptions{}) if err != nil { framework.ExpectNoError(err, fmt.Sprintf("fail to resize pvc(%s): %v", pvcName, err)) } @@ -95,7 +95,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, time.Sleep(30 * time.Second) ginkgo.By("checking the resizing result") - newPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(context.TODO(), tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + newPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, fmt.Sprintf("fail to get new pvc(%s): %v", pvcName, err)) } @@ -105,7 +105,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, } ginkgo.By("checking the resizing PV result") - newPv, _ := client.CoreV1().PersistentVolumes().Get(context.Background(), updatedPvc.Spec.VolumeName, metav1.GetOptions{}) + newPv, _ := client.CoreV1().PersistentVolumes().Get(ctx, updatedPvc.Spec.VolumeName, metav1.GetOptions{}) newPvSize := newPv.Spec.Capacity["storage"] newPvSizeStr := newPvSize.String() + "Gi" @@ -139,7 +139,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, //get file information fileshareClient, err := azureClient.GetAzureFilesClient() framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azurefile %v", err)) - share, err := fileshareClient.Get(context.Background(), resourceGroup, accountName, shareName, "") + share, err := fileshareClient.Get(ctx, resourceGroup, accountName, shareName, "") framework.ExpectNoError(err, fmt.Sprintf("Error getting file for azurefile %v", err)) newfileSize := strconv.Itoa(int(*share.ShareQuota)) + "Gi" if !(newSize.String() == newfileSize) { diff --git a/test/e2e/testsuites/dynamically_provisioned_restart_driver_tester.go b/test/e2e/testsuites/dynamically_provisioned_restart_driver_tester.go index 85587c42ff..a000554e8f 100644 --- a/test/e2e/testsuites/dynamically_provisioned_restart_driver_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_restart_driver_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -34,18 +36,18 @@ type DynamicallyProvisionedRestartDriverTest struct { RestartDriverFunc func() } -func (t *DynamicallyProvisionedRestartDriverTest) Run(client clientset.Interface, namespace *v1.Namespace) { - tDeployment, cleanup, _ := t.Pod.SetupDeployment(client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) +func (t *DynamicallyProvisionedRestartDriverTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { + tDeployment, cleanup, _ := t.Pod.SetupDeployment(ctx, client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("creating the deployment for the pod") - tDeployment.Create() + tDeployment.Create(ctx) ginkgo.By("checking that the pod is running") - tDeployment.WaitForPodReady() + tDeployment.WaitForPodReady(ctx) if t.PodCheck != nil { ginkgo.By("checking if pod is able to access volume") diff --git a/test/e2e/testsuites/dynamically_provisioned_statefulset_tester.go b/test/e2e/testsuites/dynamically_provisioned_statefulset_tester.go index 44daf43f80..d79d37a77d 100644 --- a/test/e2e/testsuites/dynamically_provisioned_statefulset_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_statefulset_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -32,18 +34,18 @@ type DynamicallyProvisionedStatefulSetTest struct { PodCheck *PodExecCheck } -func (t *DynamicallyProvisionedStatefulSetTest) Run(client clientset.Interface, namespace *v1.Namespace) { - tStatefulSet, cleanup := t.Pod.SetupStatefulset(client, namespace, t.CSIDriver) +func (t *DynamicallyProvisionedStatefulSetTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { + tStatefulSet, cleanup := t.Pod.SetupStatefulset(ctx, client, namespace, t.CSIDriver) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the statefulset") - tStatefulSet.Create() + tStatefulSet.Create(ctx) ginkgo.By("checking that the pod is running") - tStatefulSet.WaitForPodReady() + tStatefulSet.WaitForPodReady(ctx) if t.PodCheck != nil { ginkgo.By("check pod exec") @@ -51,10 +53,10 @@ func (t *DynamicallyProvisionedStatefulSetTest) Run(client clientset.Interface, } ginkgo.By("deleting the pod for statefulset") - tStatefulSet.DeletePodAndWait() + tStatefulSet.DeletePodAndWait(ctx) ginkgo.By("checking again that the pod is running") - tStatefulSet.WaitForPodReady() + tStatefulSet.WaitForPodReady(ctx) if t.PodCheck != nil { ginkgo.By("check pod exec") diff --git a/test/e2e/testsuites/dynamically_provisioned_tags_tester.go b/test/e2e/testsuites/dynamically_provisioned_tags_tester.go index d70e277bc6..4d58a43bdc 100644 --- a/test/e2e/testsuites/dynamically_provisioned_tags_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_tags_tester.go @@ -43,15 +43,15 @@ type DynamicallyProvisionedAccountWithTags struct { Tags string } -func (t *DynamicallyProvisionedAccountWithTags) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedAccountWithTags) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, volume := range t.Volumes { - tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters) - defer tpvc.Cleanup() + tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) + defer tpvc.Cleanup(ctx) ginkgo.By("checking whether the storage account contains tags") pvName := tpvc.persistentVolume.ObjectMeta.Name - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("failed to get pv(%s): %v", pvName, err)) volumeID := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle @@ -63,7 +63,7 @@ func (t *DynamicallyProvisionedAccountWithTags) Run(client clientset.Interface, azureClient, err := azureUtils.GetAzureClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - account, err := azureClient.GetStorageAccount(context.TODO(), resourceGroupName, accountName) + account, err := azureClient.GetStorageAccount(ctx, resourceGroupName, accountName) framework.ExpectNoError(err, fmt.Sprintf("failed to get storage account(%s): %v", accountName, err)) resultTags := account.Tags diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go index 84190f2217..ef4d5b6361 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -37,29 +39,29 @@ type DynamicallyProvisionedVolumeSnapshotTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedVolumeSnapshotTest) Run(client clientset.Interface, restclient restclientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedVolumeSnapshotTest) Run(ctx context.Context, client clientset.Interface, restclient restclientset.Interface, namespace *v1.Namespace) { tpod := NewTestPod(client, namespace, t.Pod.Cmd, t.Pod.IsWindows, t.Pod.WinServerVer) volume := t.Pod.Volumes[0] - tpvc, pvcCleanup := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpvc, pvcCleanup := volume.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) for i := range pvcCleanup { - defer pvcCleanup[i]() + defer pvcCleanup[i](ctx) } tpod.SetupVolume(tpvc.persistentVolumeClaim, volume.VolumeMount.NameGenerate+"1", volume.VolumeMount.MountPathGenerate+"1", volume.VolumeMount.ReadOnly) ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pod's command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) ginkgo.By("creating volume snapshot class") - tvsc, cleanup := CreateVolumeSnapshotClass(restclient, namespace, t.CSIDriver) + tvsc, cleanup := CreateVolumeSnapshotClass(ctx, restclient, namespace, t.CSIDriver) defer cleanup() ginkgo.By("taking snapshots") - snapshot := tvsc.CreateSnapshot(tpvc.persistentVolumeClaim) - defer tvsc.DeleteSnapshot(snapshot) + snapshot := tvsc.CreateSnapshot(ctx, tpvc.persistentVolumeClaim) + defer tvsc.DeleteSnapshot(ctx, snapshot) // If the field ReadyToUse is still false, there will be a timeout error. - tvsc.ReadyToUse(snapshot) + tvsc.ReadyToUse(ctx, snapshot) } diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_subpath_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_subpath_tester.go index 389f7ad84d..dc1384331e 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_subpath_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_subpath_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -33,18 +35,18 @@ type DynamicallyProvisionedVolumeSubpathTester struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedVolumeSubpathTester) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *DynamicallyProvisionedVolumeSubpathTester) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithDynamicVolumesWithSubpath(client, namespace, t.CSIDriver, t.StorageClassParameters) + tpod, cleanup := pod.SetupWithDynamicVolumesWithSubpath(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } diff --git a/test/e2e/testsuites/dynamically_provisioned_volume_unmount_tester.go b/test/e2e/testsuites/dynamically_provisioned_volume_unmount_tester.go index ad7fb01aa1..6c797ae679 100644 --- a/test/e2e/testsuites/dynamically_provisioned_volume_unmount_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_volume_unmount_tester.go @@ -41,18 +41,18 @@ type DynamicallyProvisionedVolumeUnmountTest struct { StorageClassParameters map[string]string } -func (t *DynamicallyProvisionedVolumeUnmountTest) Run(client clientset.Interface, namespace *v1.Namespace) { - tDeployment, cleanup, volumeID := t.Pod.SetupDeployment(client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) +func (t *DynamicallyProvisionedVolumeUnmountTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { + tDeployment, cleanup, volumeID := t.Pod.SetupDeployment(ctx, client, namespace, 1 /*replicas*/, t.CSIDriver, t.StorageClassParameters) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the deployment") - tDeployment.Create() + tDeployment.Create(ctx) ginkgo.By("checking that the pod is running") - tDeployment.WaitForPodReady() + tDeployment.WaitForPodReady(ctx) if t.PodCheck != nil { time.Sleep(time.Second) @@ -61,7 +61,7 @@ func (t *DynamicallyProvisionedVolumeUnmountTest) Run(client clientset.Interface } ginkgo.By("delete volume " + volumeID + " first, make sure pod could still be terminated") - _, err := t.Azurefile.DeleteVolume(context.TODO(), &csi.DeleteVolumeRequest{VolumeId: volumeID}) + _, err := t.Azurefile.DeleteVolume(ctx, &csi.DeleteVolumeRequest{VolumeId: volumeID}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("check whether " + volumeID + " exists") @@ -77,11 +77,11 @@ func (t *DynamicallyProvisionedVolumeUnmountTest) Run(client clientset.Interface VolumeCapabilities: multiNodeVolCap, } - if _, err = t.Azurefile.ValidateVolumeCapabilities(context.TODO(), req); err != nil { + if _, err = t.Azurefile.ValidateVolumeCapabilities(ctx, req); err != nil { ginkgo.By("ValidateVolumeCapabilities " + volumeID + " returned with error: " + err.Error()) } gomega.Expect(err).To(gomega.HaveOccurred()) ginkgo.By("deleting the pod for deployment") - tDeployment.DeletePodAndWait() + tDeployment.DeletePodAndWait(ctx) } diff --git a/test/e2e/testsuites/pre_provisioned_existing_credentials_tester.go b/test/e2e/testsuites/pre_provisioned_existing_credentials_tester.go index 11becb330e..103c6becb1 100644 --- a/test/e2e/testsuites/pre_provisioned_existing_credentials_tester.go +++ b/test/e2e/testsuites/pre_provisioned_existing_credentials_tester.go @@ -38,10 +38,10 @@ type PreProvisionedExistingCredentialsTest struct { Azurefile *azurefile.Driver } -func (t *PreProvisionedExistingCredentialsTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *PreProvisionedExistingCredentialsTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { for n, volume := range pod.Volumes { - resourceGroupName, accountName, _, fileShareName, _, _, err := t.Azurefile.GetAccountInfo(context.Background(), volume.VolumeID, nil, nil) + resourceGroupName, accountName, _, fileShareName, _, _, err := t.Azurefile.GetAccountInfo(ctx, volume.VolumeID, nil, nil) if err != nil { framework.ExpectNoError(err, fmt.Sprintf("Error GetContainerInfo from volumeID(%s): %v", volume.VolumeID, err)) return @@ -55,25 +55,25 @@ func (t *PreProvisionedExistingCredentialsTest) Run(client clientset.Interface, ginkgo.By("creating the storageclass with existing credentials") sc := t.CSIDriver.GetPreProvisionStorageClass(parameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, sc) - createdStorageClass := tsc.Create() - defer tsc.Cleanup() + createdStorageClass := tsc.Create(ctx) + defer tsc.Cleanup(ctx) ginkgo.By("creating pvc with storageclass") tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) - tpvc.Create() - defer tpvc.Cleanup() + tpvc.Create(ctx) + defer tpvc.Cleanup(ctx) ginkgo.By("validating the pvc") - tpvc.WaitForBound() - tpvc.ValidateProvisionedPersistentVolume() + tpvc.WaitForBound(ctx) + tpvc.ValidateProvisionedPersistentVolume(ctx) tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, n+1), volume.VolumeMount.ReadOnly) ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } } diff --git a/test/e2e/testsuites/pre_provisioned_multiple_pods.go b/test/e2e/testsuites/pre_provisioned_multiple_pods.go index 26a2399413..33bd52af82 100644 --- a/test/e2e/testsuites/pre_provisioned_multiple_pods.go +++ b/test/e2e/testsuites/pre_provisioned_multiple_pods.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" "github.com/onsi/ginkgo/v2" @@ -31,18 +33,18 @@ type PreProvisionedMultiplePods struct { Pods []PodDetails } -func (t *PreProvisionedMultiplePods) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *PreProvisionedMultiplePods) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { - tpod, cleanup := pod.SetupWithPreProvisionedVolumes(client, namespace, t.CSIDriver) + tpod, cleanup := pod.SetupWithPreProvisionedVolumes(ctx, client, namespace, t.CSIDriver) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } diff --git a/test/e2e/testsuites/pre_provisioned_provided_credentials_tester.go b/test/e2e/testsuites/pre_provisioned_provided_credentials_tester.go index a22c807bef..4f679bc014 100644 --- a/test/e2e/testsuites/pre_provisioned_provided_credentials_tester.go +++ b/test/e2e/testsuites/pre_provisioned_provided_credentials_tester.go @@ -38,31 +38,31 @@ type PreProvisionedProvidedCredentiasTest struct { Azurefile *azurefile.Driver } -func (t *PreProvisionedProvidedCredentiasTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *PreProvisionedProvidedCredentiasTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { for n, volume := range pod.Volumes { - _, accountName, accountKey, fileShareName, _, _, err := t.Azurefile.GetAccountInfo(context.Background(), volume.VolumeID, nil, nil) + _, accountName, accountKey, fileShareName, _, _, err := t.Azurefile.GetAccountInfo(ctx, volume.VolumeID, nil, nil) framework.ExpectNoError(err, fmt.Sprintf("Error GetAccountInfo from volumeID(%s): %v", volume.VolumeID, err)) ginkgo.By("creating the secret") secreteData := map[string]string{"azurestorageaccountname": accountName} secreteData["azurestorageaccountkey"] = accountKey tsecret := NewTestSecret(client, namespace, volume.NodeStageSecretRef, secreteData) - tsecret.Create() - defer tsecret.Cleanup() + tsecret.Create(ctx) + defer tsecret.Cleanup(ctx) pod.Volumes[n].ShareName = fileShareName - tpod, cleanup := pod.SetupWithPreProvisionedVolumes(client, namespace, t.CSIDriver) + tpod, cleanup := pod.SetupWithPreProvisionedVolumes(ctx, client, namespace, t.CSIDriver) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with no error") - tpod.WaitForSuccess() + tpod.WaitForSuccess(ctx) } } } diff --git a/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go b/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go index f60d0d43d0..05c767a3c7 100644 --- a/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go +++ b/test/e2e/testsuites/pre_provisioned_read_only_volume_tester.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" @@ -35,26 +36,26 @@ type PreProvisionedReadOnlyVolumeTest struct { Pods []PodDetails } -func (t *PreProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *PreProvisionedReadOnlyVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, pod := range t.Pods { expectedReadOnlyLog := "Read-only file system" if pod.IsWindows { expectedReadOnlyLog = "FileOpenFailure" } - tpod, cleanup := pod.SetupWithPreProvisionedVolumes(client, namespace, t.CSIDriver) + tpod, cleanup := pod.SetupWithPreProvisionedVolumes(ctx, client, namespace, t.CSIDriver) // defer must be called here for resources not get removed before using them for i := range cleanup { - defer cleanup[i]() + defer cleanup[i](ctx) } ginkgo.By("deploying the pod") - tpod.Create() - defer tpod.Cleanup() + tpod.Create(ctx) + defer tpod.Cleanup(ctx) ginkgo.By("checking that the pods command exits with an error") - tpod.WaitForFailure() + tpod.WaitForFailure(ctx) ginkgo.By("checking that pod logs contain expected message") - body, err := tpod.Logs() + body, err := tpod.Logs(ctx) framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) gomega.Expect(string(body)).To(gomega.ContainSubstring(expectedReadOnlyLog)) } diff --git a/test/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go b/test/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go index 1246e08f0e..a0fe8a1f09 100644 --- a/test/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go +++ b/test/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" v1 "k8s.io/api/core/v1" @@ -30,17 +32,17 @@ type PreProvisionedReclaimPolicyTest struct { Volumes []VolumeDetails } -func (t *PreProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { +func (t *PreProvisionedReclaimPolicyTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { for _, volume := range t.Volumes { - tpvc, _ := volume.SetupPreProvisionedPersistentVolumeClaim(client, namespace, t.CSIDriver) + tpvc, _ := volume.SetupPreProvisionedPersistentVolumeClaim(ctx, client, namespace, t.CSIDriver) // will delete the PVC // will also wait for PV to be deleted when reclaimPolicy=Delete - tpvc.Cleanup() + tpvc.Cleanup(ctx) // first check PV stills exists, then manually delete it if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { - tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased) - tpvc.DeleteBoundPersistentVolume() + tpvc.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased) + tpvc.DeleteBoundPersistentVolume(ctx) } } } diff --git a/test/e2e/testsuites/specs.go b/test/e2e/testsuites/specs.go index 8a8da07ccb..cb6151d850 100644 --- a/test/e2e/testsuites/specs.go +++ b/test/e2e/testsuites/specs.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "sigs.k8s.io/azurefile-csi-driver/test/e2e/driver" @@ -91,11 +92,11 @@ type DataSource struct { var supportedStorageAccountTypes = []string{"Standard_LRS", "Premium_LRS", "Standard_ZRS", "Standard_GRS", "Standard_RAGRS"} -func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { +func (pod *PodDetails) SetupWithDynamicVolumes(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) { tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) - cleanupFuncs := make([]func(), 0) + cleanupFuncs := make([]func(ctx context.Context), 0) for n, v := range pod.Volumes { - tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) + tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters) cleanupFuncs = append(cleanupFuncs, funcs...) if v.VolumeMode == Block { tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath) @@ -107,15 +108,15 @@ func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, names } // SetupWithDynamicMultipleVolumes each pod will be mounted with multiple volumes with different storage account types -func (pod *PodDetails) SetupWithDynamicMultipleVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { +func (pod *PodDetails) SetupWithDynamicMultipleVolumes(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) { tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) - cleanupFuncs := make([]func(), 0) + cleanupFuncs := make([]func(ctx context.Context), 0) accountTypeCount := len(supportedStorageAccountTypes) for n, v := range pod.Volumes { if len(storageClassParameters) == 0 { storageClassParameters = map[string]string{"skuName": supportedStorageAccountTypes[n%accountTypeCount]} } - tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) + tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters) cleanupFuncs = append(cleanupFuncs, funcs...) if v.VolumeMode == Block { tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath) @@ -126,11 +127,11 @@ func (pod *PodDetails) SetupWithDynamicMultipleVolumes(client clientset.Interfac return tpod, cleanupFuncs } -func (pod *PodDetails) SetupWithDynamicVolumesWithSubpath(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { +func (pod *PodDetails) SetupWithDynamicVolumesWithSubpath(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) { tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) - cleanupFuncs := make([]func(), 0) + cleanupFuncs := make([]func(ctx context.Context), 0) for n, v := range pod.Volumes { - tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) + tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters) cleanupFuncs = append(cleanupFuncs, funcs...) tpod.SetupVolumeMountWithSubpath(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), "testSubpath", v.VolumeMount.ReadOnly) } @@ -155,11 +156,11 @@ func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, nam return tpod, cleanupFuncs } -func (pod *PodDetails) SetupWithPreProvisionedVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPod, []func()) { +func (pod *PodDetails) SetupWithPreProvisionedVolumes(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPod, []func(ctx context.Context)) { tpod := NewTestPod(client, namespace, pod.Cmd, pod.IsWindows, pod.WinServerVer) - cleanupFuncs := make([]func(), 0) + cleanupFuncs := make([]func(ctx context.Context), 0) for n, v := range pod.Volumes { - tpvc, funcs := v.SetupPreProvisionedPersistentVolumeClaim(client, namespace, csiDriver) + tpvc, funcs := v.SetupPreProvisionedPersistentVolumeClaim(ctx, client, namespace, csiDriver) cleanupFuncs = append(cleanupFuncs, funcs...) if v.VolumeMode == Block { @@ -171,19 +172,19 @@ func (pod *PodDetails) SetupWithPreProvisionedVolumes(client clientset.Interface return tpod, cleanupFuncs } -func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, replicas int32, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func(), string) { - cleanupFuncs := make([]func(), 0) +func (pod *PodDetails) SetupDeployment(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, replicas int32, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func(ctx context.Context), string) { + cleanupFuncs := make([]func(ctx context.Context), 0) volume := pod.Volumes[0] ginkgo.By("setting up the StorageClass") storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, storageClass) - createdStorageClass := tsc.Create() + createdStorageClass := tsc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) ginkgo.By("setting up the PVC") tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) - tpvc.Create() - tpvc.WaitForBound() - tpvc.ValidateProvisionedPersistentVolume() + tpvc.Create(ctx) + tpvc.WaitForBound(ctx) + tpvc.ValidateProvisionedPersistentVolume(ctx) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) ginkgo.By("setting up the Deployment") tDeployment := NewTestDeployment(client, namespace, replicas, pod.Cmd, tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, 1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, 1), volume.VolumeMount.ReadOnly, pod.IsWindows, pod.WinServerVer) @@ -196,13 +197,13 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1 return tDeployment, cleanupFuncs, volumeID } -func (pod *PodDetails) SetupStatefulset(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestStatefulset, []func()) { - cleanupFuncs := make([]func(), 0) +func (pod *PodDetails) SetupStatefulset(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestStatefulset, []func(ctx context.Context)) { + cleanupFuncs := make([]func(ctx context.Context), 0) volume := pod.Volumes[0] ginkgo.By("setting up the StorageClass") storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, storageClass) - createdStorageClass := tsc.Create() + createdStorageClass := tsc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) ginkgo.By("setting up the PVC") tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) @@ -218,12 +219,12 @@ func (pod *PodDetails) SetupStatefulset(client clientset.Interface, namespace *v return tStatefulset, cleanupFuncs } -func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func()) { - cleanupFuncs := make([]func(), 0) +func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func(ctx context.Context)) { + cleanupFuncs := make([]func(ctx context.Context), 0) ginkgo.By("setting up the StorageClass") storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, storageClass) - createdStorageClass := tsc.Create() + createdStorageClass := tsc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) ginkgo.By("setting up the PVC and PV") var tpvc *TestPersistentVolumeClaim @@ -235,19 +236,19 @@ func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset. } else { tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) } - tpvc.Create() + tpvc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) // PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer if volume.VolumeBindingMode == nil || *volume.VolumeBindingMode == storagev1.VolumeBindingImmediate { - tpvc.WaitForBound() - tpvc.ValidateProvisionedPersistentVolume() + tpvc.WaitForBound(ctx) + tpvc.ValidateProvisionedPersistentVolume(ctx) } return tpvc, cleanupFuncs } -func (volume *VolumeDetails) SetupPreProvisionedPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPersistentVolumeClaim, []func()) { - cleanupFuncs := make([]func(), 0) +func (volume *VolumeDetails) SetupPreProvisionedPersistentVolumeClaim(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPersistentVolumeClaim, []func(ctx context.Context)) { + cleanupFuncs := make([]func(ctx context.Context), 0) ginkgo.By("setting up the PV") attrib := make(map[string]string) if volume.ShareName != "" { @@ -256,23 +257,23 @@ func (volume *VolumeDetails) SetupPreProvisionedPersistentVolumeClaim(client cli nodeStageSecretRef := volume.NodeStageSecretRef pv := csiDriver.GetPersistentVolume(volume.VolumeID, volume.FSType, volume.ClaimSize, volume.ReclaimPolicy, namespace.Name, attrib, nodeStageSecretRef) tpv := NewTestPreProvisionedPersistentVolume(client, pv) - tpv.Create() + tpv.Create(ctx) ginkgo.By("setting up the PVC") tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, nil) - tpvc.Create() + tpvc.Create(ctx) cleanupFuncs = append(cleanupFuncs, tpvc.DeleteBoundPersistentVolume) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) - tpvc.WaitForBound() - tpvc.ValidateProvisionedPersistentVolume() + tpvc.WaitForBound(ctx) + tpvc.ValidateProvisionedPersistentVolume(ctx) return tpvc, cleanupFuncs } -func CreateVolumeSnapshotClass(client restclientset.Interface, namespace *v1.Namespace, csiDriver driver.VolumeSnapshotTestDriver) (*TestVolumeSnapshotClass, func()) { +func CreateVolumeSnapshotClass(ctx context.Context, client restclientset.Interface, namespace *v1.Namespace, csiDriver driver.VolumeSnapshotTestDriver) (*TestVolumeSnapshotClass, func()) { ginkgo.By("setting up the VolumeSnapshotClass") volumeSnapshotClass := csiDriver.GetVolumeSnapshotClass(namespace.Name) tvsc := NewTestVolumeSnapshotClass(client, namespace, volumeSnapshotClass) - tvsc.Create() + tvsc.Create(ctx) return tvsc, tvsc.Cleanup } diff --git a/test/e2e/testsuites/testsuites.go b/test/e2e/testsuites/testsuites.go index 09a4c49194..e53efdbc46 100644 --- a/test/e2e/testsuites/testsuites.go +++ b/test/e2e/testsuites/testsuites.go @@ -82,18 +82,18 @@ func NewTestStorageClass(c clientset.Interface, ns *v1.Namespace, sc *storagev1. } } -func (t *TestStorageClass) Create() storagev1.StorageClass { +func (t *TestStorageClass) Create(ctx context.Context) storagev1.StorageClass { var err error ginkgo.By("creating a StorageClass " + t.storageClass.Name) - t.storageClass, err = t.client.StorageV1().StorageClasses().Create(context.TODO(), t.storageClass, metav1.CreateOptions{}) + t.storageClass, err = t.client.StorageV1().StorageClasses().Create(ctx, t.storageClass, metav1.CreateOptions{}) framework.ExpectNoError(err) return *t.storageClass } -func (t *TestStorageClass) Cleanup() { +func (t *TestStorageClass) Cleanup(ctx context.Context) { framework.Logf("deleting StorageClass %s", t.storageClass.Name) - err := t.client.StorageV1().StorageClasses().Delete(context.TODO(), t.storageClass.Name, metav1.DeleteOptions{}) + err := t.client.StorageV1().StorageClasses().Delete(ctx, t.storageClass.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } @@ -110,10 +110,10 @@ func NewTestPreProvisionedPersistentVolume(c clientset.Interface, pv *v1.Persist } } -func (pv *TestPreProvisionedPersistentVolume) Create() v1.PersistentVolume { +func (pv *TestPreProvisionedPersistentVolume) Create(ctx context.Context) v1.PersistentVolume { var err error ginkgo.By("creating a PV") - pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv.requestedPersistentVolume, metav1.CreateOptions{}) + pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(ctx, pv.requestedPersistentVolume, metav1.CreateOptions{}) framework.ExpectNoError(err) return *pv.persistentVolume } @@ -159,7 +159,7 @@ func NewTestPersistentVolumeClaimWithDataSource(c clientset.Interface, ns *v1.Na } } -func (t *TestPersistentVolumeClaim) Create() { +func (t *TestPersistentVolumeClaim) Create(ctx context.Context) { var err error ginkgo.By("creating a PVC") @@ -168,16 +168,16 @@ func (t *TestPersistentVolumeClaim) Create() { storageClassName = t.storageClass.Name } t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource) - t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(context.TODO(), t.requestedPersistentVolumeClaim, metav1.CreateOptions{}) + t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(ctx, t.requestedPersistentVolumeClaim, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { +func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume(ctx context.Context) { var err error // Get the bound PersistentVolume ginkgo.By("validating provisioned PV") - t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{}) + t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(ctx, t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) // Check sizes @@ -213,16 +213,16 @@ func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { } } -func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim { +func (t *TestPersistentVolumeClaim) WaitForBound(ctx context.Context) v1.PersistentVolumeClaim { var err error ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound)) - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) ginkgo.By("checking the PVC") // Get new copy of the claim - t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(context.TODO(), t.persistentVolumeClaim.Name, metav1.GetOptions{}) + t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(ctx, t.persistentVolumeClaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return *t.persistentVolumeClaim @@ -272,9 +272,9 @@ func generateStatefulSetPVC(namespace, storageClassName, claimSize string, volum } } -func (t *TestPersistentVolumeClaim) Cleanup() { +func (t *TestPersistentVolumeClaim) Cleanup(ctx context.Context) { framework.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name) - err := e2epv.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name) + err := e2epv.DeletePersistentVolumeClaim(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name) framework.ExpectNoError(err) // Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Retain, there's no use waiting because the PV won't be auto-deleted and @@ -285,14 +285,14 @@ func (t *TestPersistentVolumeClaim) Cleanup() { if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { if t.persistentVolume.Spec.CSI != nil { // only workaround in CSI driver tests - t.removeFinalizers() + t.removeFinalizers(ctx) } ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) - err := e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + err := e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) framework.ExpectNoError(err) } // Wait for the PVC to be deleted - err = waitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute) + err = waitForPersistentVolumeClaimDeleted(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute) framework.ExpectNoError(err) } @@ -300,27 +300,27 @@ func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPo return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy } -func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) { - err := e2epv.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) +func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(ctx context.Context, phase v1.PersistentVolumePhase) { + err := e2epv.WaitForPersistentVolumePhase(ctx, phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) framework.ExpectNoError(err) } -func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() { +func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume(ctx context.Context) { ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name)) - err := e2epv.DeletePersistentVolume(t.client, t.persistentVolume.Name) + err := e2epv.DeletePersistentVolume(ctx, t.client, t.persistentVolume.Name) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) - err = e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) framework.ExpectNoError(err) } -func (t *TestPersistentVolumeClaim) DeleteBackingVolume(azfile *azurefile.Driver) { +func (t *TestPersistentVolumeClaim) DeleteBackingVolume(ctx context.Context, azfile *azurefile.Driver) { volumeID := t.persistentVolume.Spec.CSI.VolumeHandle ginkgo.By(fmt.Sprintf("deleting azurefile volume %q", volumeID)) req := &csi.DeleteVolumeRequest{ VolumeId: volumeID, } - _, err := azfile.DeleteVolume(context.Background(), req) + _, err := azfile.DeleteVolume(ctx, req) if err != nil { ginkgo.Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err)) } @@ -328,8 +328,8 @@ func (t *TestPersistentVolumeClaim) DeleteBackingVolume(azfile *azurefile.Driver // removeFinalizers is a workaround to solve the problem that PV is stuck at terminating after PVC is deleted. // Related issue: https://github.com/kubernetes/kubernetes/issues/69697 -func (t *TestPersistentVolumeClaim) removeFinalizers() { - pv, err := t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolume.Name, metav1.GetOptions{}) +func (t *TestPersistentVolumeClaim) removeFinalizers(ctx context.Context) { + pv, err := t.client.CoreV1().PersistentVolumes().Get(ctx, t.persistentVolume.Name, metav1.GetOptions{}) if err != nil && strings.Contains(err.Error(), "not found") { return } @@ -348,7 +348,7 @@ func (t *TestPersistentVolumeClaim) removeFinalizers() { patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone) framework.ExpectNoError(err) - _, err = t.client.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = t.client.CoreV1().PersistentVolumes().Patch(ctx, pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) // Because the pv might be deleted successfully before patched, if so, ignore the error. if err != nil && strings.Contains(err.Error(), "not found") { return @@ -427,25 +427,25 @@ func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, replicas int32, return testDeployment } -func (t *TestDeployment) Create() { +func (t *TestDeployment) Create(ctx context.Context) { var err error - t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(context.TODO(), t.deployment, metav1.CreateOptions{}) + t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(ctx, t.deployment, metav1.CreateOptions{}) framework.ExpectNoError(err) err = deployment.WaitForDeploymentComplete(t.client, t.deployment) framework.ExpectNoError(err) - pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) + pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment) framework.ExpectNoError(err) // always get first pod as there should only be one t.podName = pods.Items[0].Name } -func (t *TestDeployment) WaitForPodReady() { - pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) +func (t *TestDeployment) WaitForPodReady(ctx context.Context) { + pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment) framework.ExpectNoError(err) // always get first pod as there should only be one pod := pods.Items[0] t.podName = pod.Name - err = e2epod.WaitForPodRunningInNamespace(t.client, &pod) + err = e2epod.WaitForPodRunningInNamespace(ctx, t.client, &pod) framework.ExpectNoError(err) } @@ -453,9 +453,9 @@ func (t *TestDeployment) PollForStringInPodsExec(command []string, expectedStrin pollForStringInPodsExec(t.namespace.Name, []string{t.podName}, command, expectedString) } -func (t *TestDeployment) DeletePodAndWait() { +func (t *TestDeployment) DeletePodAndWait(ctx context.Context) { framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name) - err := t.client.CoreV1().Pods(t.namespace.Name).Delete(context.TODO(), t.podName, metav1.DeleteOptions{}) + err := t.client.CoreV1().Pods(t.namespace.Name).Delete(ctx, t.podName, metav1.DeleteOptions{}) if err != nil { if !apierrs.IsNotFound(err) { framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err)) @@ -463,26 +463,26 @@ func (t *TestDeployment) DeletePodAndWait() { return } framework.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name) - err = e2epod.WaitForPodNotFoundInNamespace(t.client, t.podName, t.namespace.Name, e2epod.DefaultPodDeletionTimeout) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, t.client, t.podName, t.namespace.Name, e2epod.DefaultPodDeletionTimeout) if err != nil { framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %v", t.podName, err)) } } -func (t *TestDeployment) Cleanup() { +func (t *TestDeployment) Cleanup(ctx context.Context) { framework.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name) - body, err := t.Logs() + body, err := t.Logs(ctx) if err != nil { framework.Logf("Error getting logs for pod %s: %v", t.podName, err) } else { framework.Logf("Pod %s has the following logs: %s", t.podName, body) } - err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(context.TODO(), t.deployment.Name, metav1.DeleteOptions{}) + err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(ctx, t.deployment.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } -func (t *TestDeployment) Logs() ([]byte, error) { - return podLogs(t.client, t.podName, t.namespace.Name) +func (t *TestDeployment) Logs(ctx context.Context) ([]byte, error) { + return podLogs(ctx, t.client, t.podName, t.namespace.Name) } type TestStatefulset struct { @@ -556,31 +556,31 @@ func NewTestStatefulset(c clientset.Interface, ns *v1.Namespace, command string, return testStatefulset } -func (t *TestStatefulset) Create() { +func (t *TestStatefulset) Create(ctx context.Context) { var err error - t.statefulset, err = t.client.AppsV1().StatefulSets(t.namespace.Name).Create(context.TODO(), t.statefulset, metav1.CreateOptions{}) + t.statefulset, err = t.client.AppsV1().StatefulSets(t.namespace.Name).Create(ctx, t.statefulset, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = waitForStatefulSetComplete(t.client, t.namespace, t.statefulset) + err = waitForStatefulSetComplete(ctx, t.client, t.namespace, t.statefulset) framework.ExpectNoError(err) selector, err := metav1.LabelSelectorAsSelector(t.statefulset.Spec.Selector) framework.ExpectNoError(err) options := metav1.ListOptions{LabelSelector: selector.String()} - statefulSetPods, err := t.client.CoreV1().Pods(t.namespace.Name).List(context.TODO(), options) + statefulSetPods, err := t.client.CoreV1().Pods(t.namespace.Name).List(ctx, options) framework.ExpectNoError(err) // always get first pod as there should only be one t.podName = statefulSetPods.Items[0].Name } -func (t *TestStatefulset) WaitForPodReady() { +func (t *TestStatefulset) WaitForPodReady(ctx context.Context) { selector, err := metav1.LabelSelectorAsSelector(t.statefulset.Spec.Selector) framework.ExpectNoError(err) options := metav1.ListOptions{LabelSelector: selector.String()} - statefulSetPods, err := t.client.CoreV1().Pods(t.namespace.Name).List(context.TODO(), options) + statefulSetPods, err := t.client.CoreV1().Pods(t.namespace.Name).List(ctx, options) framework.ExpectNoError(err) // always get first pod as there should only be one pod := statefulSetPods.Items[0] t.podName = pod.Name - err = e2epod.WaitForPodRunningInNamespace(t.client, &pod) + err = e2epod.WaitForPodRunningInNamespace(ctx, t.client, &pod) framework.ExpectNoError(err) } @@ -588,9 +588,9 @@ func (t *TestStatefulset) PollForStringInPodsExec(command []string, expectedStri pollForStringInPodsExec(t.namespace.Name, []string{t.podName}, command, expectedString) } -func (t *TestStatefulset) DeletePodAndWait() { +func (t *TestStatefulset) DeletePodAndWait(ctx context.Context) { framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name) - err := t.client.CoreV1().Pods(t.namespace.Name).Delete(context.TODO(), t.podName, metav1.DeleteOptions{}) + err := t.client.CoreV1().Pods(t.namespace.Name).Delete(ctx, t.podName, metav1.DeleteOptions{}) if err != nil { if !apierrs.IsNotFound(err) { framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err)) @@ -601,25 +601,25 @@ func (t *TestStatefulset) DeletePodAndWait() { time.Sleep(60 * time.Second) } -func (t *TestStatefulset) Cleanup() { +func (t *TestStatefulset) Cleanup(ctx context.Context) { framework.Logf("deleting StatefulSet %q/%q", t.namespace.Name, t.statefulset.Name) - body, err := t.Logs() + body, err := t.Logs(ctx) if err != nil { framework.Logf("Error getting logs for pod %s: %v", t.podName, err) } else { framework.Logf("Pod %s has the following logs: %s", t.podName, body) } - err = t.client.AppsV1().StatefulSets(t.namespace.Name).Delete(context.TODO(), t.statefulset.Name, metav1.DeleteOptions{}) + err = t.client.AppsV1().StatefulSets(t.namespace.Name).Delete(ctx, t.statefulset.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } -func (t *TestStatefulset) Logs() ([]byte, error) { - return podLogs(t.client, t.podName, t.namespace.Name) +func (t *TestStatefulset) Logs(ctx context.Context) ([]byte, error) { + return podLogs(ctx, t.client, t.podName, t.namespace.Name) } -func waitForStatefulSetComplete(cs clientset.Interface, ns *v1.Namespace, ss *apps.StatefulSet) error { +func waitForStatefulSetComplete(ctx context.Context, cs clientset.Interface, ns *v1.Namespace, ss *apps.StatefulSet) error { err := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { var err error - statefulSet, err := cs.AppsV1().StatefulSets(ns.Name).Get(context.TODO(), ss.Name, metav1.GetOptions{}) + statefulSet, err := cs.AppsV1().StatefulSets(ns.Name).Get(ctx, ss.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -675,24 +675,25 @@ func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string, isWindo return testPod } -func (t *TestPod) Create() { +func (t *TestPod) Create(ctx context.Context) { var err error - t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(context.TODO(), t.pod, metav1.CreateOptions{}) + t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(ctx, t.pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func (t *TestPod) WaitForSuccess() { - err := e2epod.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name) +func (t *TestPod) WaitForSuccess(ctx context.Context) { + err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, t.client, t.pod.Name, t.namespace.Name) framework.ExpectNoError(err) } -func (t *TestPod) WaitForRunning() { - err := e2epod.WaitForPodRunningInNamespace(t.client, t.pod) +func (t *TestPod) WaitForRunning(ctx context.Context) { + err := e2epod.WaitForPodRunningInNamespace(ctx, t.client, t.pod) framework.ExpectNoError(err) } -func (t *TestPod) WaitForFailedMountError() { +func (t *TestPod) WaitForFailedMountError(ctx context.Context) { err := e2eevents.WaitTimeoutForEvent( + ctx, t.client, t.namespace.Name, fields.Set{"reason": events.FailedMountVolume}.AsSelector().String(), @@ -715,8 +716,8 @@ var podFailedCondition = func(pod *v1.Pod) (bool, error) { } } -func (t *TestPod) WaitForFailure() { - err := e2epod.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition) +func (t *TestPod) WaitForFailure(ctx context.Context) { + err := e2epod.WaitForPodCondition(ctx, t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition) framework.ExpectNoError(err) } @@ -830,12 +831,12 @@ func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) { t.pod.Spec.NodeSelector = nodeSelector } -func (t *TestPod) Cleanup() { - cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name) +func (t *TestPod) Cleanup(ctx context.Context) { + cleanupPodOrFail(ctx, t.client, t.pod.Name, t.namespace.Name) } -func (t *TestPod) Logs() ([]byte, error) { - return podLogs(t.client, t.pod.Name, t.namespace.Name) +func (t *TestPod) Logs(ctx context.Context) ([]byte, error) { + return podLogs(ctx, t.client, t.pod.Name, t.namespace.Name) } type TestSecret struct { @@ -858,15 +859,15 @@ func NewTestSecret(c clientset.Interface, ns *v1.Namespace, name string, data ma } } -func (t *TestSecret) Create() { +func (t *TestSecret) Create(ctx context.Context) { var err error - t.secret, err = t.client.CoreV1().Secrets(t.namespace.Name).Create(context.TODO(), t.secret, metav1.CreateOptions{}) + t.secret, err = t.client.CoreV1().Secrets(t.namespace.Name).Create(ctx, t.secret, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func (t *TestSecret) Cleanup() { +func (t *TestSecret) Cleanup(ctx context.Context) { framework.Logf("deleting Secret %s", t.secret.Name) - err := t.client.CoreV1().Secrets(t.namespace.Name).Delete(context.TODO(), t.secret.Name, metav1.DeleteOptions{}) + err := t.client.CoreV1().Secrets(t.namespace.Name).Delete(ctx, t.secret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } @@ -884,14 +885,14 @@ func NewTestVolumeSnapshotClass(c restclientset.Interface, ns *v1.Namespace, vsc } } -func (t *TestVolumeSnapshotClass) Create() { +func (t *TestVolumeSnapshotClass) Create(ctx context.Context) { ginkgo.By("creating a VolumeSnapshotClass") var err error - t.volumeSnapshotClass, err = snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshotClasses().Create(context.TODO(), t.volumeSnapshotClass, metav1.CreateOptions{}) + t.volumeSnapshotClass, err = snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshotClasses().Create(ctx, t.volumeSnapshotClass, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func (t *TestVolumeSnapshotClass) CreateSnapshot(pvc *v1.PersistentVolumeClaim) *snapshotv1.VolumeSnapshot { +func (t *TestVolumeSnapshotClass) CreateSnapshot(ctx context.Context, pvc *v1.PersistentVolumeClaim) *snapshotv1.VolumeSnapshot { ginkgo.By("creating a VolumeSnapshot for " + pvc.Name) snapshot := &snapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -909,15 +910,15 @@ func (t *TestVolumeSnapshotClass) CreateSnapshot(pvc *v1.PersistentVolumeClaim) }, }, } - snapshot, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Create(context.TODO(), snapshot, metav1.CreateOptions{}) + snapshot, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Create(ctx, snapshot, metav1.CreateOptions{}) framework.ExpectNoError(err) return snapshot } -func (t *TestVolumeSnapshotClass) ReadyToUse(snapshot *snapshotv1.VolumeSnapshot) { +func (t *TestVolumeSnapshotClass) ReadyToUse(ctx context.Context, snapshot *snapshotv1.VolumeSnapshot) { ginkgo.By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name) err := wait.Poll(15*time.Second, 5*time.Minute, func() (bool, error) { - vs, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Get(context.TODO(), snapshot.Name, metav1.GetOptions{}) + vs, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Get(ctx, snapshot.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("did not see ReadyToUse: %v", err) } @@ -927,9 +928,9 @@ func (t *TestVolumeSnapshotClass) ReadyToUse(snapshot *snapshotv1.VolumeSnapshot framework.ExpectNoError(err) } -func (t *TestVolumeSnapshotClass) DeleteSnapshot(vs *snapshotv1.VolumeSnapshot) { +func (t *TestVolumeSnapshotClass) DeleteSnapshot(ctx context.Context, vs *snapshotv1.VolumeSnapshot) { ginkgo.By("deleting a VolumeSnapshot " + vs.Name) - err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Delete(context.TODO(), vs.Name, metav1.DeleteOptions{}) + err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Delete(ctx, vs.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } @@ -941,26 +942,26 @@ func (t *TestVolumeSnapshotClass) Cleanup() { //framework.ExpectNoError(err) } -func cleanupPodOrFail(client clientset.Interface, name, namespace string) { +func cleanupPodOrFail(ctx context.Context, client clientset.Interface, name, namespace string) { framework.Logf("deleting Pod %q/%q", namespace, name) - body, err := podLogs(client, name, namespace) + body, err := podLogs(ctx, client, name, namespace) if err != nil { framework.Logf("Error getting logs for pod %s: %v", name, err) } else { framework.Logf("Pod %s has the following logs: %s", name, body) } - e2epod.DeletePodOrFail(client, namespace, name) + e2epod.DeletePodOrFail(ctx, client, namespace, name) } -func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) { - return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() +func podLogs(ctx context.Context, client clientset.Interface, name, namespace string) ([]byte, error) { + return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(ctx).Raw() } // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. -func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { +func waitForPersistentVolumeClaimDeleted(ctx context.Context, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) + _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)