Skip to content

Commit

Permalink
Mounting auto provisioned bricks on glusterd2 start
Browse files Browse the repository at this point in the history
On `glusterd2` restart, it checks the list of auto provisioned bricks
and mounts if not mounted already.

Updates: gluster#851
Signed-off-by: Aravinda VK <avishwan@redhat.com>
  • Loading branch information
aravindavk authored and Madhu-1 committed Jul 19, 2018
1 parent 5bf8c09 commit 4f9dde7
Show file tree
Hide file tree
Showing 9 changed files with 160 additions and 99 deletions.
4 changes: 3 additions & 1 deletion e2e/volume_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,9 @@ func TestVolumeOptions(t *testing.T) {
Force: true,
// XXX: Setting advanced, as all options are advanced by default
// TODO: Remove this later if the default changes
Advanced: true,
VolOptionReq: api.VolOptionReq{
Advanced: true,
},
}

validOpKeys := []string{"gfproxy.afr.eager-lock", "afr.eager-lock"}
Expand Down
18 changes: 10 additions & 8 deletions glustercli/cmd/volume-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,14 +255,16 @@ func volumeCreateCmdRun(cmd *cobra.Command, args []string) {
}

req := api.VolCreateReq{
Name: volname,
Subvols: subvols,
Force: flagCreateForce,
Options: options,
Advanced: flagCreateAdvOpts,
Experimental: flagCreateExpOpts,
Deprecated: flagCreateDepOpts,
Flags: flags,
Name: volname,
Subvols: subvols,
Force: flagCreateForce,
VolOptionReq: api.VolOptionReq{
Options: options,
Advanced: flagCreateAdvOpts,
Experimental: flagCreateExpOpts,
Deprecated: flagCreateDepOpts,
},
Flags: flags,
}

// handle thin-arbiter
Expand Down
5 changes: 5 additions & 0 deletions glusterd2/bricksplanner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,14 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
bricks = append(bricks, api.BrickReq{
Type: brickType,
Path: fmt.Sprintf("%s/%s-s%d-b%d/brick", bricksMountRoot, req.Name, i, j),
Mountdir: fmt.Sprintf("%s/%s-s%d-b%d", bricksMountRoot, req.Name, i, j),
TpName: fmt.Sprintf("tp-%s-s%d-b%d", req.Name, i, j),
LvName: fmt.Sprintf("brick-%s-s%d-b%d", req.Name, i, j),
Size: eachBrickSize,
TpSize: eachBrickTpSize,
TpMetadataSize: deviceutils.GetPoolMetadataSize(eachBrickTpSize),
FsType: "xfs",
MntOpts: "rw,inode64,noatime,nouuid",
})
}

Expand Down Expand Up @@ -187,6 +190,7 @@ func PlanBricks(req *api.VolCreateReq) error {
if vg.AvailableSize >= totalsize && !zoneUsed && !vg.Used {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName

zones[vg.Zone] = struct{}{}
numBricksAllocated++
Expand Down Expand Up @@ -214,6 +218,7 @@ func PlanBricks(req *api.VolCreateReq) error {
if vg.AvailableSize >= totalsize && !zoneUsed {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName

zones[vg.Zone] = struct{}{}
numBricksAllocated++
Expand Down
5 changes: 3 additions & 2 deletions glusterd2/commands/volumes/volume-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (

const (
maxMetadataSizeLimit = 4096
minVolumeSize = 10
minVolumeSize = 20
)

func applyDefaults(req *api.VolCreateReq) {
Expand Down Expand Up @@ -123,7 +123,7 @@ func volumeCreateHandler(w http.ResponseWriter, r *http.Request) {

if req.SnapshotReserveFactor < 1 {
restutils.SendHTTPError(ctx, w, http.StatusBadRequest,
errors.New("invalid Snapshot Reserve Factor"))
errors.New("invalid snapshot reserve factor"))
return
}

Expand Down Expand Up @@ -168,6 +168,7 @@ func volumeCreateHandler(w http.ResponseWriter, r *http.Request) {
DoFunc: "vol-create.PrepareBricks",
UndoFunc: "vol-create.UndoPrepareBricks",
Nodes: nodes,
Skip: (req.Size == 0),
},
{
DoFunc: "vol-create.CreateVolinfo",
Expand Down
135 changes: 55 additions & 80 deletions glusterd2/commands/volumes/volume-smartvol-txn.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,22 @@ package volumecommands

import (
"os"
"path"

"github.com/gluster/glusterd2/glusterd2/gdctx"
"github.com/gluster/glusterd2/glusterd2/transaction"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/plugins/device/deviceutils"

config "github.com/spf13/viper"
log "github.com/sirupsen/logrus"
)

func txnPrepareBricks(c transaction.TxnCtx) error {
var req api.VolCreateReq
if err := c.Get("req", &req); err != nil {
c.Logger().WithError(err).WithField(
"key", "req").Debug("Failed to get key from store")
c.Logger().WithFields(log.Fields{
"error": err,
"key": "req",
}).Error("failed to get key from store")
return err
}

Expand All @@ -26,91 +27,72 @@ func txnPrepareBricks(c transaction.TxnCtx) error {
continue
}

brickMountDir := path.Dir(b.Path)

// Create Mount directory
err := os.MkdirAll(brickMountDir, os.ModeDir|os.ModePerm)
err := os.MkdirAll(b.Mountdir, os.ModeDir|os.ModePerm)
if err != nil {
c.Logger().WithError(err).
WithField("path", brickMountDir).
Error("Failed to create brick mount directory")
c.Logger().WithFields(log.Fields{
"error": err,
"path": b.Mountdir,
}).Error("failed to create brick mount directory")
return err
}

// Thin Pool Creation
err = deviceutils.CreateTP(b.VgName, b.TpName, b.TpSize, b.TpMetadataSize)
if err != nil {
c.Logger().WithError(err).
WithField("vg-name", b.VgName).
WithField("tp-name", b.TpName).
WithField("tp-size", b.TpSize).
WithField("tp-meta-size", b.TpMetadataSize).
Error("Thinpool Creation failed")
c.Logger().WithFields(log.Fields{
"error": err,
"vg-name": b.VgName,
"tp-name": b.TpName,
"tp-size": b.TpSize,
"tp-meta-size": b.TpMetadataSize,
}).Error("thinpool creation failed")
return err
}

// LV Creation
err = deviceutils.CreateLV(b.VgName, b.TpName, b.LvName, b.Size)
if err != nil {
c.Logger().WithError(err).
WithField("vg-name", b.VgName).
WithField("tp-name", b.TpName).
WithField("lv-name", b.LvName).
WithField("size", b.Size).
Error("lvcreate failed")
c.Logger().WithFields(log.Fields{
"error": err,
"vg-name": b.VgName,
"tp-name": b.TpName,
"lv-name": b.LvName,
"size": b.Size,
}).Error("lvcreate failed")
return err
}

dev := "/dev/" + b.VgName + "/" + b.LvName
// Make Filesystem
err = deviceutils.MakeXfs(dev)
err = deviceutils.MakeXfs(b.DevicePath)
if err != nil {
c.Logger().WithError(err).WithField("dev", dev).Error("mkfs.xfs failed")
c.Logger().WithError(err).WithField("dev", b.DevicePath).Error("mkfs.xfs failed")
return err
}

// Mount the Created FS
err = deviceutils.BrickMount(dev, brickMountDir)
err = deviceutils.BrickMount(b.DevicePath, b.Mountdir)
if err != nil {
c.Logger().WithError(err).
WithField("dev", dev).
WithField("path", brickMountDir).
Error("brick mount failed")
c.Logger().WithFields(log.Fields{
"error": err,
"dev": b.DevicePath,
"path": b.Mountdir,
}).Error("brick mount failed")
return err
}

// Create a directory in Brick Mount
err = os.MkdirAll(b.Path, os.ModeDir|os.ModePerm)
if err != nil {
c.Logger().WithError(err).
WithField("path", b.Path).
Error("Failed to create brick directory in mount")
c.Logger().WithFields(log.Fields{
"error": err,
"path": b.Path,
}).Error("failed to create brick directory in mount")
return err
}

// Update current Vg free size
deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.VgName)

// Persist mount points in custom fstab file
// On Glusterd2 restart, all bricks should be
// mounted using mount -a -T <fstab-file>
fstabFile := config.GetString("localstatedir") + "/fstab"
err = deviceutils.FstabAddMount(fstabFile, deviceutils.FstabMount{
Device: dev,
MountPoint: brickMountDir,
FilesystemFormat: "xfs",
MountOptions: "rw,inode64,noatime,nouuid",
DumpValue: "1",
FsckOption: "2",
})
if err != nil {
c.Logger().WithError(err).
WithField("fstab", fstabFile).
WithField("device", dev).
WithField("mount", brickMountDir).
Error("Failed to add entry to fstab file")
return err
}
}
}

Expand All @@ -120,8 +102,10 @@ func txnPrepareBricks(c transaction.TxnCtx) error {
func txnUndoPrepareBricks(c transaction.TxnCtx) error {
var req api.VolCreateReq
if err := c.Get("req", &req); err != nil {
c.Logger().WithError(err).WithField(
"key", "req").Debug("Failed to get key from store")
c.Logger().WithFields(log.Fields{
"error": err,
"key": "req",
}).Error("failed to get key from store")
return err
}

Expand All @@ -132,42 +116,33 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error {
continue
}

brickMountDir := path.Dir(b.Path)

// UnMount the Brick
err := deviceutils.BrickUnmount(brickMountDir)
if err != nil {
c.Logger().WithError(err).
WithField("path", brickMountDir).
Error("brick unmount failed")
}

// Remove entry from fstab if available
fstabFile := config.GetString("localstatedir") + "/fstab"
err = deviceutils.FstabRemoveMount(fstabFile, brickMountDir)
err := deviceutils.BrickUnmount(b.Mountdir)
if err != nil {
c.Logger().WithError(err).
WithField("fstab", fstabFile).
WithField("mount", brickMountDir).
Error("Failed to remove entry from fstab file")
c.Logger().WithFields(log.Fields{
"error": err,
"path": b.Mountdir,
}).Error("brick unmount failed")
}

// Remove LV
err = deviceutils.RemoveLV(b.VgName, b.LvName)
if err != nil {
c.Logger().WithError(err).
WithField("vg-name", b.VgName).
WithField("lv-name", b.LvName).
Error("lv remove failed")
c.Logger().WithFields(log.Fields{
"error": err,
"vg-name": b.VgName,
"lv-name": b.LvName,
}).Error("lv remove failed")
}

// Remove Thin Pool
err = deviceutils.RemoveLV(b.VgName, b.TpName)
if err != nil {
c.Logger().WithError(err).
WithField("vg-name", b.VgName).
WithField("tp-name", b.TpName).
Error("thin pool remove failed")
c.Logger().WithFields(log.Fields{
"error": err,
"vg-name": b.VgName,
"tp-name": b.TpName,
}).Error("thinpool remove failed")
}

// Update current Vg free size
Expand Down
4 changes: 4 additions & 0 deletions glusterd2/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"github.com/gluster/glusterd2/glusterd2/peer"
"github.com/gluster/glusterd2/glusterd2/servers"
"github.com/gluster/glusterd2/glusterd2/store"
gdutils "github.com/gluster/glusterd2/glusterd2/utils"
"github.com/gluster/glusterd2/glusterd2/xlator"
"github.com/gluster/glusterd2/pkg/errors"
"github.com/gluster/glusterd2/pkg/logging"
Expand Down Expand Up @@ -127,6 +128,9 @@ func main() {
// Restart previously running daemons
daemon.StartAllDaemons()

// Mount all Local Bricks
gdutils.MountLocalBricks()

// Use the main goroutine as signal handling loop
sigCh := make(chan os.Signal)
signal.Notify(sigCh)
Expand Down
58 changes: 58 additions & 0 deletions glusterd2/utils/mount.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package utils

import (
"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/utils"

log "github.com/sirupsen/logrus"
)

// MountLocalBricks mounts bricks of auto provisioned volumes
func MountLocalBricks() error {
volumes, err := volume.GetVolumes()
if err != nil {
return err
}

// TODO: Get Snapshot Volumes as well

if len(volumes) == 0 {
return nil
}

// Get list of mounted dirs
mtabEntries, err := volume.GetMounts()
if err != nil {
log.WithError(err).Error("failed to get list of mounts")
return err
}

mounts := make(map[string]struct{})

for _, entry := range mtabEntries {
mounts[entry.MntDir] = struct{}{}
}

for _, v := range volumes {
for _, b := range v.GetLocalBricks() {
// Mount all local Bricks if they are auto provisioned
if b.MountInfo.DevicePath != "" {
if _, exists := mounts[b.MountInfo.Mountdir]; exists {
continue
}

err := utils.ExecuteCommandRun("mount", "-o", b.MountInfo.MntOpts, b.MountInfo.DevicePath, b.MountInfo.Mountdir)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"volume": v.Name,
"dev": b.MountInfo.DevicePath,
"path": b.MountInfo.Mountdir,
}).Error("brick mount failed")
}
}
}
}

return nil
}
Loading

0 comments on commit 4f9dde7

Please sign in to comment.