Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Commit

Permalink
Auto distribute count based on Max brick size
Browse files Browse the repository at this point in the history
While creating auto provisioned volume, support added to automatically
calculate the distribute count based on max brick size specified in
the request.

For example, below command creates 2x3(Distributed replicate) volume

```
glustercli volume create gv1 --replica 3 --size 1G \
        --max-brick-size 512M
```

Fixes: #999
Signed-off-by: Aravinda VK <avishwan@redhat.com>
  • Loading branch information
aravindavk committed Dec 13, 2018
1 parent 05fee9f commit 56dc388
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 21 deletions.
79 changes: 79 additions & 0 deletions e2e/smartvol_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,83 @@ func testSmartVolumeDistributeDisperse(t *testing.T) {
checkZeroLvs(r)
}

func testSmartVolumeAutoDistributeReplicate(t *testing.T) {
r := require.New(t)

smartvolname := formatVolName(t.Name())

// Too small value for max-brick-size
createReq := api.VolCreateReq{
Name: smartvolname,
Size: 40 * gutils.MiB,
ReplicaCount: 3,
MaxBrickSize: 10 * gutils.MiB,
SubvolZonesOverlap: true,
}
volinfo, err := client.VolumeCreate(createReq)
r.NotNil(err)

createReq = api.VolCreateReq{
Name: smartvolname,
Size: 40 * gutils.MiB,
ReplicaCount: 3,
MaxBrickSize: 20 * gutils.MiB,
SubvolZonesOverlap: true,
}
volinfo, err = client.VolumeCreate(createReq)
r.Nil(err)

r.Len(volinfo.Subvols, 2)
r.Equal("Distributed-Replicate", volinfo.Type.String())
r.Len(volinfo.Subvols[0].Bricks, 3)
r.Len(volinfo.Subvols[1].Bricks, 3)

r.Nil(client.VolumeDelete(smartvolname))
checkZeroLvs(r)

// Max-brick-size is more than request size
createReq = api.VolCreateReq{
Name: smartvolname,
Size: 20 * gutils.MiB,
ReplicaCount: 3,
MaxBrickSize: 30 * gutils.MiB,
SubvolZonesOverlap: true,
}
volinfo, err = client.VolumeCreate(createReq)
r.Nil(err)

r.Len(volinfo.Subvols, 1)
r.Equal("Replicate", volinfo.Type.String())
r.Len(volinfo.Subvols[0].Bricks, 3)

r.Nil(client.VolumeDelete(smartvolname))
checkZeroLvs(r)
}

func testSmartVolumeAutoDistributeDisperse(t *testing.T) {
r := require.New(t)

smartvolname := formatVolName(t.Name())

createReq := api.VolCreateReq{
Name: smartvolname,
Size: 80 * gutils.MiB,
DisperseCount: 3,
MaxBrickSize: 20 * gutils.MiB,
SubvolZonesOverlap: true,
}
volinfo, err := client.VolumeCreate(createReq)
r.Nil(err)

r.Len(volinfo.Subvols, 2)
r.Equal("Distributed-Disperse", volinfo.Type.String())
r.Len(volinfo.Subvols[0].Bricks, 3)
r.Len(volinfo.Subvols[1].Bricks, 3)

r.Nil(client.VolumeDelete(smartvolname))
checkZeroLvs(r)
}

func editDevice(t *testing.T) {
r := require.New(t)
peerList, err := client.Peers()
Expand Down Expand Up @@ -531,6 +608,8 @@ func TestSmartVolume(t *testing.T) {
t.Run("Smartvol Disperse Volume", testSmartVolumeDisperse)
t.Run("Smartvol Distributed-Replicate Volume", testSmartVolumeDistributeReplicate)
t.Run("Smartvol Distributed-Disperse Volume", testSmartVolumeDistributeDisperse)
t.Run("Smartvol Auto Distributed-Replicate Volume", testSmartVolumeAutoDistributeReplicate)
t.Run("Smartvol Auto Distributed-Disperse Volume", testSmartVolumeAutoDistributeDisperse)
t.Run("Replace Brick", testReplaceBrick)
t.Run("Edit device", editDevice)

Expand Down
7 changes: 7 additions & 0 deletions glustercli/cmd/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ func formatPID(pid int) string {
}

func sizeToBytes(value string) (uint64, error) {
if value == "" {
return 0, nil
}
sizeParts := validSizeFormat.FindStringSubmatch(value)
if len(sizeParts) == 0 {
return 0, errors.New("invalid size format")
Expand All @@ -52,6 +55,10 @@ func sizeToBytes(value string) (uint64, error) {
size = sizeValue * utils.KiB
case "KB":
size = sizeValue * utils.KB
case "M", "MiB":
size = sizeValue * utils.MiB
case "MB":
size = sizeValue * utils.MB
case "G", "GiB":
size = sizeValue * utils.GiB
case "GB":
Expand Down
8 changes: 8 additions & 0 deletions glustercli/cmd/volume-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ var (
flagCreateSnapshotReserveFactor float64 = 1
flagCreateSubvolZoneOverlap bool
flagAverageFileSize string
flagCreateMaxBrickSize string

volumeCreateCmd = &cobra.Command{
Use: "create <volname> [<brick> [<brick>]...|--size <size>]",
Expand Down Expand Up @@ -85,6 +86,7 @@ func init() {
volumeCreateCmd.Flags().Float64Var(&flagCreateSnapshotReserveFactor, "snapshot-reserve-factor", 1, "Snapshot Reserve Factor")
volumeCreateCmd.Flags().BoolVar(&flagCreateSubvolZoneOverlap, "subvols-zones-overlap", false, "Brick belonging to other Sub volume can be created in the same zone")
volumeCreateCmd.Flags().StringVar(&flagAverageFileSize, "average-file-size", "1M", "Average size of the files")
volumeCreateCmd.Flags().StringVar(&flagCreateMaxBrickSize, "max-brick-size", "", "Max brick size for auto distribute count")

volumeCmd.AddCommand(volumeCreateCmd)
}
Expand All @@ -102,10 +104,16 @@ func smartVolumeCreate(cmd *cobra.Command, args []string) {
failure("Invalid File Size specified", nil, 1)
}

maxBrickSize, err := sizeToBytes(flagCreateMaxBrickSize)
if err != nil {
failure("Invalid Max Brick size Size specified", nil, 1)
}

req := api.VolCreateReq{
Name: args[0],
Transport: flagCreateTransport,
Size: size,
MaxBrickSize: maxBrickSize,
ReplicaCount: flagCreateReplicaCount,
ArbiterCount: flagCreateArbiterCount,
AverageFileSize: avgFileSize,
Expand Down
55 changes: 39 additions & 16 deletions glusterd2/bricksplanner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,20 @@ import (
"errors"
"fmt"
"path"
"strconv"

"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/pkg/lvmutils"
gutils "github.com/gluster/glusterd2/pkg/utils"

config "github.com/spf13/viper"
)

const (
minBrickSize = 20 * gutils.MiB
)

func handleReplicaSubvolReq(req *api.VolCreateReq) error {
if req.ReplicaCount < 2 {
return nil
Expand Down Expand Up @@ -66,20 +72,6 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
var err error
bricksMountRoot := path.Join(config.GetString("rundir"), "/bricks")

// If Distribute count is zero then automatically decide
// the distribute count based on available size in each device
// TODO: Auto find the distribute count
numSubvols := 1
if req.DistributeCount > 0 {
numSubvols = req.DistributeCount
}

// User input will be in Bytes
subvolSize := req.Size
if numSubvols > 1 {
subvolSize = subvolSize / uint64(numSubvols)
}

// Default Subvol Type
req.SubvolType = "distribute"

Expand All @@ -95,6 +87,37 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
return nil, err
}

if req.MaxBrickSize > 0 && req.MaxBrickSize < minBrickSize {
return nil, errors.New("invalid max-brick-size, Minimum size required is " + strconv.Itoa(minBrickSize))
}

// If max Brick size is specified then decide distribute
// count and Volume Size based on Volume Type
if req.MaxBrickSize > 0 && req.Size > req.MaxBrickSize {
// In case of replica and distribute, brick size is equal to
// subvolume size, In case of disperse volume
// subvol size = brick size * disperse-data-count
maxSubvolSize := req.MaxBrickSize
if req.DisperseDataCount > 0 {
maxSubvolSize = req.MaxBrickSize * uint64(req.DisperseDataCount)
}
req.DistributeCount = int(req.Size / maxSubvolSize)
if req.Size%maxSubvolSize > 0 {
req.DistributeCount++
}
}

numSubvols := 1
if req.DistributeCount > 0 {
numSubvols = req.DistributeCount
}

// User input will be in Bytes
subvolSize := req.Size
if numSubvols > 1 {
subvolSize = subvolSize / uint64(numSubvols)
}

subvolplanner, exists := subvolPlanners[req.SubvolType]
if !exists {
return nil, errors.New("subvolume type not supported")
Expand All @@ -121,8 +144,8 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
BrickDirSuffix: "/brick",
TpName: fmt.Sprintf("tp_%s_s%d_b%d", req.Name, i+1, j+1),
LvName: fmt.Sprintf("brick_%s_s%d_b%d", req.Name, i+1, j+1),
Size: eachBrickSize,
TpSize: eachBrickTpSize,
Size: lvmutils.NormalizeSize(eachBrickSize),
TpSize: lvmutils.NormalizeSize(eachBrickTpSize),
TpMetadataSize: lvmutils.GetPoolMetadataSize(eachBrickTpSize),
FsType: "xfs",
MntOpts: "rw,inode64,noatime,nouuid",
Expand Down
1 change: 1 addition & 0 deletions pkg/api/volume_req.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ type VolCreateReq struct {
Metadata map[string]string `json:"metadata,omitempty"`
Flags map[string]bool `json:"flags,omitempty"`
Size uint64 `json:"size"`
MaxBrickSize uint64 `json:"max-brick-size,omitempty"`
DistributeCount int `json:"distribute,omitempty"`
ReplicaCount int `json:"replica,omitempty"`
ArbiterCount int `json:"arbiter,omitempty"`
Expand Down
15 changes: 10 additions & 5 deletions pkg/lvmutils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,16 +103,12 @@ func GetPoolMetadataSize(poolsize uint64) uint64 {
// Minimum metadata size required is 0.5% and Max upto 16GB ~ 17179869184 Bytes

metadataSize := uint64(float64(poolsize) * 0.005)
rem := metadataSize % 512
if rem > 0 {
metadataSize += (512 - rem)
}

if metadataSize > maxMetadataSize {
metadataSize = maxMetadataSize
}

return metadataSize
return NormalizeSize(metadataSize)
}

// CreateTP creates LVM Thin Pool
Expand Down Expand Up @@ -345,3 +341,12 @@ func ExtendThinpool(expansionTpSizePerBrick uint64, vgName string, tpName string
err := utils.ExecuteCommandRun("lvextend", fmt.Sprintf("-L+%dB", expansionTpSizePerBrick), fmt.Sprintf("/dev/%s/%s", vgName, tpName))
return err
}

// NormalizeSize converts the value to multiples of 512
func NormalizeSize(size uint64) uint64 {
rem := size % 512
if rem > 0 {
size = size - rem
}
return size
}

0 comments on commit 56dc388

Please sign in to comment.