Skip to content

Commit

Permalink
Merge pull request #193 from kubernetes-sigs/lock
Browse files Browse the repository at this point in the history
feat: use file metadata for vhd disk lock
  • Loading branch information
andyzhangx authored Mar 11, 2020
2 parents dfb1f2a + 68c0c6b commit a78a68e
Show file tree
Hide file tree
Showing 115 changed files with 88,238 additions and 117 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ Please refer to [install azurefile csi driver](https://github.com/kubernetes-sig

### Examples
- [Basic usage](./deploy/example/e2e_usage.md)
- [Snapshot](./deploy/example/snapshot)
- [Snapshot(alpha)](./deploy/example/snapshot)
- [Fast attach disk(alpha](./deploy/example/disk)

## Kubernetes Development
Please refer to [development guide](./docs/csi-dev.md)
Expand Down
5 changes: 4 additions & 1 deletion deploy/example/disk/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
## Azure File CSI driver vhd disk feature example
## Azure File CSI driver fast attach disk feature example
Attach Azure disks in < 1 second. Attach as many as you want. Attach them where ever you want. VHD disk feature could mount Azure disks as Linux block device directly on VMs without dependency on the host.

- Motivation: [Metadata/namespace heavy workload on Azure File](https://docs.microsoft.com/en-us/azure/storage/files/storage-troubleshooting-files-performance#cause-2-metadatanamespace-heavy-workload)
Expand All @@ -9,6 +9,9 @@ Add a VHD on the file share and mount VHD over SMB from the client to perform fi

Scheduling 20 pods with one vhd disk each on **one** node **in parallel** could be completed in 2min, while for azure managed disk driver, it's 30min.

#### Feature Status
Status: Alpha

#### 1. create a pod with csi azurefile vhd disk mount on linux
##### Option#1: Dynamic Provisioning
- Create an azurefile CSI storage class and PVC
Expand Down
3 changes: 3 additions & 0 deletions deploy/example/snapshot/README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Snapshot Example

#### Feature Status
Status: Alpha

> Attention: Since volume snapshot is an alpha feature in Kubernetes currently, you need to enable a new alpha feature gate called `VolumeSnapshotDataSource` in the Kubernetes master.
>
> ```
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ require (
github.com/Azure/azure-storage-file-go v0.5.0
github.com/Azure/go-autorest/autorest v0.9.0
github.com/Azure/go-autorest/autorest/adal v0.5.0
github.com/Azure/go-autorest/autorest/to v0.2.0
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/container-storage-interface/spec v1.1.0
github.com/coreos/bbolt v1.3.2 // indirect
Expand Down Expand Up @@ -42,6 +43,7 @@ require (
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/client-go v0.0.0
k8s.io/cloud-provider v0.0.0
k8s.io/klog v1.0.0
k8s.io/kubernetes v1.15.0
k8s.io/legacy-cloud-providers v0.0.0
Expand Down
66 changes: 38 additions & 28 deletions pkg/azurefile/azurefile.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"time"

csicommon "sigs.k8s.io/azurefile-csi-driver/pkg/csi-common"
volumehelper "sigs.k8s.io/azurefile-csi-driver/pkg/util"

azs "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/azure-storage-file-go/azfile"
Expand Down Expand Up @@ -66,17 +65,21 @@ const (
// key of snapshot name in metadata
snapshotNameKey = "initiator"

diskNameField = "diskname"
fsTypeField = "fstype"
proxyMount = "proxy-mount"
cifs = "cifs"
shareNameField = "sharename"
diskNameField = "diskname"
fsTypeField = "fstype"
proxyMount = "proxy-mount"
cifs = "cifs"
metaDataNode = "node"
)

// Driver implements all interfaces of CSI drivers
type Driver struct {
csicommon.CSIDriver
cloud *azure.Cloud
mounter *mount.SafeFormatAndMount
// lock per volume attach (only for vhd disk feature)
volLockMap *lockMap
}

// NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version &
Expand All @@ -86,6 +89,7 @@ func NewDriver(nodeID string) *Driver {
driver.Name = DriverName
driver.Version = driverVersion
driver.NodeID = nodeID
driver.volLockMap = newLockMap()
return &driver
}

Expand All @@ -112,6 +116,7 @@ func (d *Driver) Run(endpoint string) {
d.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
//csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
Expand Down Expand Up @@ -298,29 +303,6 @@ func getSnapshot(id string) (string, error) {
return segments[4], nil
}

func (d *Driver) expandVolume(ctx context.Context, volumeID string, capacityBytes int64) (int64, error) {
if capacityBytes == 0 {
return -1, status.Error(codes.InvalidArgument, "volume capacity range missing in request")
}
requestGiB := int32(volumehelper.RoundUpGiB(capacityBytes))

shareURL, err := d.getShareURL(volumeID)
if err != nil {
return -1, status.Errorf(codes.Internal, "failed to get share url with (%s): %v, returning with success", volumeID, err)
}

if _, err = shareURL.SetQuota(ctx, requestGiB); err != nil {
return -1, status.Errorf(codes.Internal, "expand volume error: %v", err)
}

resp, err := shareURL.GetProperties(ctx)
if err != nil {
return -1, status.Errorf(codes.Internal, "failed to get properties of share(%v): %v", shareURL, err)
}

return volumehelper.GiBToBytes(int64(resp.Quota())), nil
}

func getFileURL(accountName, accountKey, storageEndpointSuffix, fileShareName, diskName string) (*azfile.FileURL, error) {
credential, err := azfile.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
Expand Down Expand Up @@ -378,3 +360,31 @@ func IsCorruptedDir(dir string) bool {
fmt.Printf("IsCorruptedDir(%s) returned with error: %v", dir, pathErr)
return pathErr != nil && mount.IsCorruptedMnt(pathErr)
}

func (d *Driver) getAccountInfo(volumeID string, secrets, context map[string]string) (rgName, accountName, accountKey, fileShareName, diskName string, err error) {
if len(secrets) == 0 {
rgName, accountName, fileShareName, diskName, err = getFileShareInfo(volumeID)
if err == nil {
if rgName == "" {
rgName = d.cloud.ResourceGroup
}
accountKey, err = d.cloud.GetStorageAccesskey(accountName, rgName)
}
} else {
for k, v := range context {
switch strings.ToLower(k) {
case shareNameField:
fileShareName = v
case diskNameField:
diskName = v
}
}
if fileShareName != "" {
accountName, accountKey, err = getStorageAccount(secrets)
} else {
err = fmt.Errorf("could not find sharename from context(%v)", context)
}
}

return rgName, accountName, accountKey, fileShareName, diskName, err
}
9 changes: 4 additions & 5 deletions pkg/azurefile/azurefile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"os"
"path/filepath"
"reflect"
"strings"
"testing"

"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -117,16 +116,16 @@ func TestGetFileShareInfo(t *testing.T) {

for _, test := range tests {
resourceGroupName, accountName, fileShareName, diskName, expectedError := getFileShareInfo(test.id)
if !strings.EqualFold(resourceGroupName, test.resourceGroupName) {
if resourceGroupName != test.resourceGroupName {
t.Errorf("getFileShareInfo(%q) returned with: %q, expected: %q", test.id, resourceGroupName, test.resourceGroupName)
}
if !strings.EqualFold(accountName, test.accountName) {
if accountName != test.accountName {
t.Errorf("getFileShareInfo(%q) returned with: %q, expected: %q", test.id, accountName, test.accountName)
}
if !strings.EqualFold(fileShareName, test.fileShareName) {
if fileShareName != test.fileShareName {
t.Errorf("getFileShareInfo(%q) returned with: %q, expected: %q", test.id, fileShareName, test.fileShareName)
}
if !strings.EqualFold(diskName, test.diskName) {
if diskName != test.diskName {
t.Errorf("getFileShareInfo(%q) returned with: %q, expected: %q", test.id, diskName, test.diskName)
}
if !reflect.DeepEqual(expectedError, test.expectedError) {
Expand Down
Loading

0 comments on commit a78a68e

Please sign in to comment.