Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Commit

Permalink
Merge branch 'master' of https://github.com/gluster/glusterd2 into group
Browse files Browse the repository at this point in the history
Conflicts:
	pkg/errors/error.go
  • Loading branch information
rishubhjain committed May 23, 2018
2 parents 3ed7136 + b528b9d commit 62273e4
Show file tree
Hide file tree
Showing 32 changed files with 376 additions and 219 deletions.
1 change: 1 addition & 0 deletions doc/endpoints.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 6 additions & 6 deletions doc/quick-start-user-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ INFO[2017-08-28T16:03:58+05:30] started GlusterD SunRPC server ip

Now you have two nodes running glusterd2.

> NOTE: Ensure that firewalld is configured (or stopped) to let traffic on ports ` before attaching a peer.
> NOTE: Ensure that firewalld is configured (or stopped) to let traffic on ports ` before adding a peer.
## Attach peer
## Add peer

Glusterd2 natively provides only ReST API for clients to perform management operations. A CLI is provided which interacts with glusterd2 using the [ReST APIs](https://github.com/gluster/glusterd2/wiki/ReST-API).

Expand All @@ -99,7 +99,7 @@ $ cat addpeer.json
"addresses": ["192.168.56.102"]
}
```
`addresses` takes a list of address by which the new host can be added. It can be FQDNs, short-names or IP addresses. Note that if you want to attach multiple peers use below API to attach each peer one at a time.
`addresses` takes a list of address by which the new host can be added. It can be FQDNs, short-names or IP addresses. Note that if you want to add multiple peers use below API to add each peer one at a time.

Send a HTTP request to `node1` to add `node2` as peer:

Expand All @@ -109,7 +109,7 @@ $ curl -X POST http://192.168.56.101:24007/v1/peers --data @addpeer.json -H 'Con

or using glustercli:

$ glustercli peer probe 192.168.56.102
$ glustercli peer add 192.168.56.102

You will get the Peer ID of the newly added peer as response.

Expand All @@ -123,7 +123,7 @@ $ curl -X GET http://192.168.56.101:24007/v1/peers

or by using the glustercli:

$ glustercli pool list
$ glustercli peer list

Note the UUIDs in the response. We will use the same in volume create request below.

Expand Down Expand Up @@ -199,4 +199,4 @@ Verify that `glusterfsd` process is running on both nodes.

* Issues with 2 node clusters
* Restarting glusterd2 does not restore the cluster
* Peer detach doesn't work
* Peer remove doesn't work
6 changes: 3 additions & 3 deletions e2e/peer_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@ func TestAddRemovePeer(t *testing.T) {

client := initRestclient(g1.ClientAddress)

_, err2 := client.PeerProbe(g2.PeerAddress)
_, err2 := client.PeerAdd(g2.PeerAddress)
r.Nil(err2)

time.Sleep(6 * time.Second)

// add peer: ask g1 to add g3 as peer
_, err3 := client.PeerProbe(g3.PeerAddress)
_, err3 := client.PeerAdd(g3.PeerAddress)
r.Nil(err3)

time.Sleep(6 * time.Second)
Expand All @@ -44,6 +44,6 @@ func TestAddRemovePeer(t *testing.T) {
r.Len(peers, 3)

// remove peer: ask g1 to remove g2 as peer
err5 := client.PeerDetach(g2.PeerID())
err5 := client.PeerRemove(g2.PeerID())
r.Nil(err5)
}
37 changes: 37 additions & 0 deletions e2e/volume_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ func testVolumeCreate(t *testing.T) {
},
},
},
Metadata: map[string]string{
"owner": "gd2test",
},
Force: true,
}
_, err := client.VolumeCreate(createReq)
Expand Down Expand Up @@ -148,6 +151,40 @@ func testVolumeStop(t *testing.T) {

func testVolumeList(t *testing.T) {
r := require.New(t)
var matchingQueries []map[string]string
var nonMatchingQueries []map[string]string

matchingQueries = append(matchingQueries, map[string]string{
"key": "owner",
"value": "gd2test",
})
matchingQueries = append(matchingQueries, map[string]string{
"key": "owner",
})
matchingQueries = append(matchingQueries, map[string]string{
"value": "gd2test",
})
for _, filter := range matchingQueries {
volumes, err := client.Volumes("", filter)
r.Nil(err)
r.Len(volumes, 1)
}

nonMatchingQueries = append(nonMatchingQueries, map[string]string{
"key": "owner",
"value": "gd2-test",
})
nonMatchingQueries = append(nonMatchingQueries, map[string]string{
"key": "owners",
})
nonMatchingQueries = append(nonMatchingQueries, map[string]string{
"value": "gd2tests",
})
for _, filter := range nonMatchingQueries {
volumes, err := client.Volumes("", filter)
r.Nil(err)
r.Len(volumes, 0)
}

volumes, err := client.Volumes("")
r.Nil(err)
Expand Down
7 changes: 7 additions & 0 deletions glustercli/cmd/bitrot.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ var bitrotScrubThrottleCmd = &cobra.Command{
option = append(option, "bit-rot.scrub-throttle")
option = append(option, args[1])

// Set Option set flag to advanced
flagSetAdv = true
err := volumeOptionJSONHandler(cmd, volname, option)
if err != nil {
if verbose {
Expand All @@ -125,6 +127,8 @@ var bitrotScrubFrequencyCmd = &cobra.Command{
option = append(option, "bit-rot.scrub-freq")
option = append(option, args[1])

// Set Option set flag to advanced
flagSetAdv = true
err := volumeOptionJSONHandler(cmd, volname, option)
if err != nil {
if verbose {
Expand Down Expand Up @@ -152,6 +156,9 @@ var bitrotScrubCmd = &cobra.Command{
case scrubPause, scrubResume:
option = append(option, "bit-rot.scrub-state")
option = append(option, args[1])

// Set Option set flag to advanced
flagSetAdv = true
err := volumeOptionJSONHandler(cmd, volname, option)
if err != nil {
if verbose {
Expand Down
60 changes: 27 additions & 33 deletions glustercli/cmd/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,86 +14,80 @@ import (

const (
helpPeerCmd = "Gluster Peer Management"
helpPeerProbeCmd = "probe peer specified by <HOSTNAME>"
helpPeerDetachCmd = "detach peer specified by <HOSTNAME or PeerID>"
helpPeerAddCmd = "add peer specified by <HOSTNAME>"
helpPeerRemoveCmd = "remove peer specified by <HOSTNAME or PeerID>"
helpPeerStatusCmd = "list status of peers"
helpPoolListCmd = "list all the nodes in the pool (including localhost)"
helpPeerListCmd = "list all the nodes in the pool (including localhost)"
)

var (
// Peer Detach Command Flags
flagPeerDetachForce bool
// Peer Remove Command Flags
flagPeerRemoveForce bool
)

func init() {
peerCmd.AddCommand(peerProbeCmd)
peerCmd.AddCommand(peerAddCmd)

peerDetachCmd.Flags().BoolVarP(&flagPeerDetachForce, "force", "f", false, "Force")
peerRemoveCmd.Flags().BoolVarP(&flagPeerRemoveForce, "force", "f", false, "Force")

peerCmd.AddCommand(peerDetachCmd)
peerCmd.AddCommand(peerRemoveCmd)

peerCmd.AddCommand(peerStatusCmd)

poolCmd.AddCommand(poolListCmd)
peerCmd.AddCommand(peerListCmd)

RootCmd.AddCommand(peerCmd)
RootCmd.AddCommand(poolCmd)
}

var peerCmd = &cobra.Command{
Use: "peer",
Short: helpPeerCmd,
}

var poolCmd = &cobra.Command{
Use: "pool",
Short: helpPeerCmd,
}

var peerProbeCmd = &cobra.Command{
Use: "probe <HOSTNAME>",
Short: helpPeerProbeCmd,
var peerAddCmd = &cobra.Command{
Use: "add <HOSTNAME>",
Short: helpPeerAddCmd,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
hostname := cmd.Flags().Args()[0]
peer, err := client.PeerProbe(hostname)
peer, err := client.PeerAdd(hostname)
if err != nil {
if verbose {
log.WithFields(log.Fields{
"host": hostname,
"error": err.Error(),
}).Error("peer probe failed")
}).Error("peer add failed")
}
failure("Peer probe failed", err, 1)
failure("Peer add failed", err, 1)
}
fmt.Println("Peer probe successful")
fmt.Println("Peer add successful")
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"ID", "Name", "Peer Addresses"})
table.Append([]string{peer.ID.String(), peer.Name, strings.Join(peer.PeerAddresses, ",")})
table.Render()
},
}

var peerDetachCmd = &cobra.Command{
Use: "detach <HOSTNAME or PeerID>",
Short: helpPeerDetachCmd,
var peerRemoveCmd = &cobra.Command{
Use: "remove <HOSTNAME or PeerID>",
Short: helpPeerRemoveCmd,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
hostname := cmd.Flags().Args()[0]
peerID, err := getPeerID(hostname)
if err == nil {
err = client.PeerDetach(peerID)
err = client.PeerRemove(peerID)
}
if err != nil {
if verbose {
log.WithFields(log.Fields{
"host": hostname,
"error": err.Error(),
}).Error("peer detach failed")
}).Error("peer remove failed")
}
failure("Peer detach failed", err, 1)
failure("Peer remove failed", err, 1)
}
fmt.Println("Peer detach success")
fmt.Println("Peer remove success")
},
}

Expand All @@ -108,9 +102,9 @@ func peerStatusHandler(cmd *cobra.Command) {
failure("Failed to get Peers list", err, 1)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"ID", "Name", "Peer Addresses"})
table.SetHeader([]string{"ID", "Name", "Peer Addresses", "Online"})
for _, peer := range peers {
table.Append([]string{peer.ID.String(), peer.Name, strings.Join(peer.PeerAddresses, ",")})
table.Append([]string{peer.ID.String(), peer.Name, strings.Join(peer.PeerAddresses, ","), formatBoolYesNo(peer.Online)})
}
table.Render()
}
Expand All @@ -124,9 +118,9 @@ var peerStatusCmd = &cobra.Command{
},
}

var poolListCmd = &cobra.Command{
var peerListCmd = &cobra.Command{
Use: "list",
Short: helpPoolListCmd,
Short: helpPeerListCmd,
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
peerStatusHandler(cmd)
Expand Down
8 changes: 8 additions & 0 deletions glustercli/cmd/utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
package cmd

func formatBoolYesNo(value bool) string {
if value == true {
return "yes"
}
return "no"
}
34 changes: 29 additions & 5 deletions glustercli/cmd/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ var (
flagExpandCmdReplicaCount int
flagExpandCmdForce bool

// Filter Volume Info/List command flags
flagCmdFilterKey string
flagCmdFilterValue string

// Edit Command Flags
flagCmdMetadataKey string
flagCmdMetadataValue string
Expand All @@ -62,10 +66,14 @@ func init() {
volumeCmd.AddCommand(volumeGetCmd)
volumeCmd.AddCommand(volumeResetCmd)

volumeInfoCmd.Flags().StringVar(&flagCmdFilterKey, "key", "", "Filter by metadata key")
volumeInfoCmd.Flags().StringVar(&flagCmdFilterValue, "value", "", "Filter by metadata value")
volumeCmd.AddCommand(volumeInfoCmd)

volumeCmd.AddCommand(volumeStatusCmd)

volumeListCmd.Flags().StringVar(&flagCmdFilterKey, "key", "", "Filter by metadata Key")
volumeListCmd.Flags().StringVar(&flagCmdFilterValue, "value", "", "Filter by metadata value")
volumeCmd.AddCommand(volumeListCmd)

// Volume Expand
Expand Down Expand Up @@ -254,13 +262,16 @@ func volumeInfoDisplayNumbricks(vol api.VolumeGetResp) {
}

func volumeInfoDisplay(vol api.VolumeGetResp) {

fmt.Println()
fmt.Println("Volume Name:", vol.Name)
fmt.Println("Type:", vol.Type)
fmt.Println("Volume ID:", vol.ID)
fmt.Println("State:", vol.State)
fmt.Println("Transport-type:", vol.Transport)
fmt.Println("Options:")
for key, value := range vol.Options {
fmt.Printf(" %s: %s\n", key, value)
}
volumeInfoDisplayNumbricks(vol)
for sIdx, subvol := range vol.Subvols {
for bIdx, brick := range subvol.Bricks {
Expand All @@ -281,8 +292,21 @@ func volumeInfoHandler2(cmd *cobra.Command, isInfo bool) error {
volname = cmd.Flags().Args()[0]
}
if volname == "" {
vols, err = client.Volumes("")
if flagCmdFilterKey == "" && flagCmdFilterValue == "" {
vols, err = client.Volumes("")
} else if flagCmdFilterKey != "" && flagCmdFilterValue == "" {
vols, err = client.Volumes("", map[string]string{"key": flagCmdFilterKey})
} else if flagCmdFilterKey == "" && flagCmdFilterValue != "" {
vols, err = client.Volumes("", map[string]string{"value": flagCmdFilterValue})
} else if flagCmdFilterKey != "" && flagCmdFilterValue != "" {
vols, err = client.Volumes("", map[string]string{"key": flagCmdFilterKey,
"value": flagCmdFilterValue,
})
}
} else {
if flagCmdFilterKey != "" || flagCmdFilterValue != "" {
return errors.New("Invalid command. Cannot give filter arguments when providing volname")
}
vols, err = client.Volumes(volname)
}

Expand All @@ -306,7 +330,7 @@ func volumeInfoHandler2(cmd *cobra.Command, isInfo bool) error {
}

var volumeInfoCmd = &cobra.Command{
Use: "info",
Use: "info [<volname> |--key <key>|--value <value>|--key <key> --value <value>]",
Short: helpVolumeInfoCmd,
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
Expand All @@ -323,7 +347,7 @@ var volumeInfoCmd = &cobra.Command{
}

var volumeListCmd = &cobra.Command{
Use: "list",
Use: "list [--key <key>|--value <value>|--key <key> --value <value>]",
Short: helpVolumeListCmd,
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
Expand Down Expand Up @@ -455,6 +479,6 @@ var volumeEditCmd = &cobra.Command{
}
failure("Failed to edit metadata", err, 1)
}
fmt.Printf("Metadata edit successfull\n")
fmt.Printf("Metadata edit successful\n")
},
}
Loading

0 comments on commit 62273e4

Please sign in to comment.