Skip to content

Commit

Permalink
fix: createvolume and initialize volumeID in beforeeach
Browse files Browse the repository at this point in the history
  • Loading branch information
cvvz committed Mar 29, 2023
1 parent e6aa3a8 commit a9b913d
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 160 deletions.
90 changes: 89 additions & 1 deletion test/e2e/dynamic_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() {

cs = f.ClientSet
ns = f.Namespace
testDriver = driver.InitBlobCSIDriver()
})

testDriver = driver.InitBlobCSIDriver()
ginkgo.It("should create a volume on demand without saving storage account key", func() {
pods := []testsuites.PodDetails{
{
Expand Down Expand Up @@ -811,4 +811,92 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() {
}
test.Run(cs, ns)
})

ginkgo.It("nfs volume mount is still valid after driver restart [blob.csi.azure.com]", func() {
// print driver logs before driver restart
blobLog := testCmd{
command: "bash",
args: []string{"test/utils/blob_log.sh"},
startLog: "===================blob log (before restart)===================",
endLog: "====================================================================",
}
execTestCmd([]testCmd{blobLog})

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 3600; done",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
}

podCheckCmd := []string{"cat", "/mnt/test-1/data"}
expectedString := "hello world\n"
test := testsuites.DynamicallyProvisionedRestartDriverTest{
CSIDriver: testDriver,
Pod: pod,
PodCheck: &testsuites.PodExecCheck{
Cmd: podCheckCmd,
ExpectedString: expectedString,
},
StorageClassParameters: map[string]string{"protocol": "nfs"},
RestartDriverFunc: func() {
restartDriver := testCmd{
command: "bash",
args: []string{"test/utils/restart_driver_daemonset.sh"},
startLog: "Restart driver node daemonset ...",
endLog: "Restart driver node daemonset done successfully",
}
execTestCmd([]testCmd{restartDriver})
},
}
test.Run(cs, ns)
})

ginkgo.It("blobfuse volume mount is still valid after driver restart [blob.csi.azure.com]", func() {
_, useBlobfuseProxy := os.LookupEnv("ENABLE_BLOBFUSE_PROXY")
if !useBlobfuseProxy {
ginkgo.Skip("skip this test since blobfuse-proxy is not enabled")
}

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 3600; done",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
}

podCheckCmd := []string{"cat", "/mnt/test-1/data"}
expectedString := "hello world\n"
test := testsuites.DynamicallyProvisionedRestartDriverTest{
CSIDriver: testDriver,
Pod: pod,
PodCheck: &testsuites.PodExecCheck{
Cmd: podCheckCmd,
ExpectedString: expectedString,
},
StorageClassParameters: map[string]string{"skuName": "Standard_LRS"},
RestartDriverFunc: func() {
restartDriver := testCmd{
command: "bash",
args: []string{"test/utils/restart_driver_daemonset.sh"},
startLog: "Restart driver node daemonset ...",
endLog: "Restart driver node daemonset done successfully",
}
execTestCmd([]testCmd{restartDriver})
},
}
test.Run(cs, ns)
})
})
170 changes: 12 additions & 158 deletions test/e2e/pre_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package e2e
import (
"context"
"fmt"
"os"
"time"

"sigs.k8s.io/blob-csi-driver/test/e2e/driver"
Expand Down Expand Up @@ -52,8 +51,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
ns *v1.Namespace
testDriver driver.PreProvisionedVolumeTestDriver
volumeID string
// Set to true if the volume should be deleted automatically after test
skipManuallyDeletingVolume bool
)

ginkgo.BeforeEach(func() {
Expand All @@ -68,29 +65,22 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
cs = f.ClientSet
ns = f.Namespace
testDriver = driver.InitBlobCSIDriver()
})

ginkgo.AfterEach(func() {
if !skipManuallyDeletingVolume {
req := &csi.DeleteVolumeRequest{
VolumeId: volumeID,
}
_, err := blobDriver.DeleteVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("delete volume %q error: %v", volumeID, err))
}
}
resp, err := blobDriver.CreateVolume(context.Background(), makeCreateVolumeReq(fmt.Sprintf("pre-provisioned-%d", ginkgo.GinkgoParallelProcess()), ns.Name))
framework.ExpectNoError(err, "create volume error")
volumeID = resp.Volume.VolumeId

ginkgo.DeferCleanup(func() {
_, err := blobDriver.DeleteVolume(
context.Background(),
&csi.DeleteVolumeRequest{
VolumeId: volumeID,
})
framework.ExpectNoError(err, "delete volume %s error", volumeID)
})
})

ginkgo.It("[env] should use a pre-provisioned volume and mount it as readOnly in a pod", func() {
req := makeCreateVolumeReq("pre-provisioned-readonly", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
pods := []testsuites.PodDetails{
{
Expand All @@ -117,14 +107,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
})

ginkgo.It(fmt.Sprintf("[env] should use a pre-provisioned volume and retain PV with reclaimPolicy %q", v1.PersistentVolumeReclaimRetain), func() {
req := makeCreateVolumeReq("pre-provisioned-retain-reclaimpolicy", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumes := []testsuites.VolumeDetails{
Expand All @@ -146,14 +128,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
pods := []testsuites.PodDetails{}
for i := 1; i <= 6; i++ {
req := makeCreateVolumeReq(fmt.Sprintf("pre-provisioned-multiple-pods%d", time.Now().UnixNano()), ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
Expand All @@ -178,14 +152,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
})

ginkgo.It("should use existing credentials in k8s cluster", func() {
req := makeCreateVolumeReq("pre-provisioned-existing-credentials", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumeBindingMode := storagev1.VolumeBindingImmediate
Expand Down Expand Up @@ -216,14 +182,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
})

ginkgo.It("should use provided credentials", func() {
req := makeCreateVolumeReq("pre-provisioned-provided-credentials", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumeBindingMode := storagev1.VolumeBindingImmediate
Expand Down Expand Up @@ -257,14 +215,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
})

ginkgo.It("should use Key Vault", func() {
req := makeCreateVolumeReq("pre-provisioned-key-vault", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

volumeSize := fmt.Sprintf("%dGi", defaultVolumeSize)
reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumeBindingMode := storagev1.VolumeBindingImmediate
Expand Down Expand Up @@ -298,14 +248,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
})

ginkgo.It("should use SAS token", func() {
req := makeCreateVolumeReq("pre-provisioned-sas-token", ns.Name)
resp, err := blobDriver.CreateVolume(context.Background(), req)
if err != nil {
ginkgo.Fail(fmt.Sprintf("create volume error: %v", err))
}
volumeID = resp.Volume.VolumeId
ginkgo.By(fmt.Sprintf("Successfully provisioned blob volume: %q\n", volumeID))

pods := []testsuites.PodDetails{
{
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
Expand Down Expand Up @@ -333,94 +275,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Pre-Provisioned", func() {
}
test.Run(cs, ns)
})

ginkgo.It("nfs volume mount is still valid after driver restart [blob.csi.azure.com]", func() {
// print driver logs before driver restart
blobLog := testCmd{
command: "bash",
args: []string{"test/utils/blob_log.sh"},
startLog: "===================blob log (before restart)===================",
endLog: "====================================================================",
}
execTestCmd([]testCmd{blobLog})

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 3600; done",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
}

podCheckCmd := []string{"cat", "/mnt/test-1/data"}
expectedString := "hello world\n"
test := testsuites.DynamicallyProvisionedRestartDriverTest{
CSIDriver: testDriver,
Pod: pod,
PodCheck: &testsuites.PodExecCheck{
Cmd: podCheckCmd,
ExpectedString: expectedString,
},
StorageClassParameters: map[string]string{"protocol": "nfs"},
RestartDriverFunc: func() {
restartDriver := testCmd{
command: "bash",
args: []string{"test/utils/restart_driver_daemonset.sh"},
startLog: "Restart driver node daemonset ...",
endLog: "Restart driver node daemonset done successfully",
}
execTestCmd([]testCmd{restartDriver})
},
}
test.Run(cs, ns)
})

ginkgo.It("blobfuse volume mount is still valid after driver restart [blob.csi.azure.com]", func() {
_, useBlobfuseProxy := os.LookupEnv("ENABLE_BLOBFUSE_PROXY")
if !useBlobfuseProxy {
ginkgo.Skip("skip this test since blobfuse-proxy is not enabled")
}

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 3600; done",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
}

podCheckCmd := []string{"cat", "/mnt/test-1/data"}
expectedString := "hello world\n"
test := testsuites.DynamicallyProvisionedRestartDriverTest{
CSIDriver: testDriver,
Pod: pod,
PodCheck: &testsuites.PodExecCheck{
Cmd: podCheckCmd,
ExpectedString: expectedString,
},
StorageClassParameters: map[string]string{"skuName": "Standard_LRS"},
RestartDriverFunc: func() {
restartDriver := testCmd{
command: "bash",
args: []string{"test/utils/restart_driver_daemonset.sh"},
startLog: "Restart driver node daemonset ...",
endLog: "Restart driver node daemonset done successfully",
}
execTestCmd([]testCmd{restartDriver})
},
}
test.Run(cs, ns)
})
})

func makeCreateVolumeReq(volumeName, secretNamespace string) *csi.CreateVolumeRequest {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
os.Setenv(kubeconfigEnvVar, kubeconfig)
}

// spin up a blob driver locally to make use of the azure client
// spin up a blob driver locally to make use of the azure client and controller service
kubeconfig := os.Getenv(kubeconfigEnvVar)
_, useBlobfuseProxy := os.LookupEnv("ENABLE_BLOBFUSE_PROXY")
driverOptions := blob.DriverOptions{
Expand Down

0 comments on commit a9b913d

Please sign in to comment.