Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Commit

Permalink
Merge branch 'master' of https://github.com/gluster/glusterd2 into re…
Browse files Browse the repository at this point in the history
…move-brick
  • Loading branch information
rishubhjain committed Jun 18, 2018
2 parents 4707158 + 9dda1d5 commit bdce89d
Show file tree
Hide file tree
Showing 13 changed files with 111 additions and 107 deletions.
2 changes: 1 addition & 1 deletion e2e/georep_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ func TestGeorepCreateDelete(t *testing.T) {
r.Nil(err)
defer teardownCluster(gds)

brickDir, err := ioutil.TempDir("", "TestGeorepCreate")
brickDir, err := ioutil.TempDir(baseWorkdir, t.Name())
r.Nil(err)
defer os.RemoveAll(brickDir)

Expand Down
2 changes: 1 addition & 1 deletion e2e/glustershd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func TestSelfHealInfo(t *testing.T) {
r.Nil(err)
defer teardownCluster(gds)

brickDir, err := ioutil.TempDir("", t.Name())
brickDir, err := ioutil.TempDir(baseWorkdir, t.Name())
r.Nil(err)
defer os.RemoveAll(brickDir)

Expand Down
2 changes: 1 addition & 1 deletion e2e/quota_enable.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func testQuotaEnable(t *testing.T) {
r.Nil(err)
defer teardownCluster(gds)

brickDir, err := ioutil.TempDir("", t.Name())
brickDir, err := ioutil.TempDir(baseWorkdir, t.Name())
r.Nil(err)
defer os.RemoveAll(brickDir)

Expand Down
2 changes: 1 addition & 1 deletion e2e/restart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ func TestRestart(t *testing.T) {
r.Nil(err)
r.True(gd.IsRunning())

dir, err := ioutil.TempDir("", "")
dir, err := ioutil.TempDir(baseWorkdir, t.Name())
r.Nil(err)
defer os.RemoveAll(dir)

Expand Down
10 changes: 5 additions & 5 deletions e2e/smartvol_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,15 +226,15 @@ func TestSmartVolume(t *testing.T) {

client = initRestclient(gds[0].ClientAddress)

tmpDir, err = ioutil.TempDir("", t.Name())
devicesDir, err := ioutil.TempDir(baseWorkdir, t.Name())
r.Nil(err)
t.Logf("Using temp dir: %s", tmpDir)
t.Logf("Using temp dir: %s", devicesDir)

// Device Setup
// Around 150MB will be reserved during pv/vg creation, create device with more size
r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev1.img", "1", "400M"))
r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev2.img", "2", "400M"))
r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev3.img", "3", "400M"))
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev1.img", "1", "400M"))
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev2.img", "2", "400M"))
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev3.img", "3", "400M"))

_, err = client.DeviceAdd(gds[0].PeerID(), "/dev/gluster_loop1")
r.Nil(err)
Expand Down
14 changes: 11 additions & 3 deletions e2e/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ type gdProcess struct {
ClientAddress string `toml:"clientaddress"`
PeerAddress string `toml:"peeraddress"`
Workdir string `toml:"workdir"`
LocalStateDir string `toml:"localstatedir"`
Rundir string `toml:"rundir"`
uuid string
}
Expand All @@ -41,7 +42,7 @@ func (g *gdProcess) Stop() error {
return g.Cmd.Process.Kill()
}

func (g *gdProcess) UpdateDirs() {
func (g *gdProcess) updateDirs() {
g.Workdir = path.Clean(g.Workdir)
if !path.IsAbs(g.Workdir) {
g.Workdir = path.Join(baseWorkdir, g.Workdir)
Expand All @@ -50,6 +51,10 @@ func (g *gdProcess) UpdateDirs() {
if !path.IsAbs(g.Rundir) {
g.Rundir = path.Join(baseWorkdir, g.Rundir)
}
g.LocalStateDir = path.Clean(g.LocalStateDir)
if !path.IsAbs(g.LocalStateDir) {
g.LocalStateDir = path.Join(baseWorkdir, g.LocalStateDir)
}
}

func (g *gdProcess) EraseWorkdir() error {
Expand Down Expand Up @@ -117,7 +122,10 @@ func spawnGlusterd(configFilePath string, cleanStart bool) (*gdProcess, error) {
return nil, err
}

g.UpdateDirs()
// The config files in e2e/config contain relative paths, convert them
// to absolute paths.
g.updateDirs()

if cleanStart {
g.EraseWorkdir() // cleanup leftovers from previous test
}
Expand All @@ -133,6 +141,7 @@ func spawnGlusterd(configFilePath string, cleanStart bool) (*gdProcess, error) {
g.Cmd = exec.Command(path.Join(binDir, "glusterd2"),
"--config", absConfigFilePath,
"--workdir", g.Workdir,
"--localstatedir", g.LocalStateDir,
"--rundir", g.Rundir,
"--logdir", path.Join(g.Workdir, "log"),
"--logfile", "glusterd2.log")
Expand Down Expand Up @@ -320,7 +329,6 @@ func loopDevicesCleanup(t *testing.T) error {
cleanupAllBrickMounts(t)
cleanupAllGlusterVgs(t)
cleanupAllGlusterPvs(t)
cleanupAllGlusterPvs(t)

// Cleanup device files
devicefiles, err := filepath.Glob(baseWorkdir + "/*.img")
Expand Down
24 changes: 24 additions & 0 deletions glustercli/cmd/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const (
helpVolumeInfoCmd = "Get Gluster Volume Info"
helpVolumeListCmd = "List all Gluster Volumes"
helpVolumeStatusCmd = "Get Gluster Volume Status"
helpVolumeSizeCmd = "Get Gluster Volume Size Usage"
helpVolumeExpandCmd = "Expand a Gluster Volume"
helpVolumeEditCmd = "Edit metadata (key-value pairs) of a volume. Glusterd2 will not interpret these key and value in any way"
)
Expand Down Expand Up @@ -67,6 +68,7 @@ func init() {

volumeCmd.AddCommand(volumeGetCmd)
volumeCmd.AddCommand(volumeResetCmd)
volumeCmd.AddCommand(volumeSizeCmd)

volumeInfoCmd.Flags().StringVar(&flagCmdFilterKey, "key", "", "Filter by metadata key")
volumeInfoCmd.Flags().StringVar(&flagCmdFilterValue, "value", "", "Filter by metadata value")
Expand Down Expand Up @@ -432,6 +434,28 @@ var volumeStatusCmd = &cobra.Command{
},
}

var volumeSizeCmd = &cobra.Command{
Use: "size",
Short: helpVolumeSizeCmd,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
volname := cmd.Flags().Args()[0]
vol, err := client.VolumeStatus(volname)
if err != nil {
if verbose {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("error getting volume size")
}
failure("Error getting Volume size", err, 1)
}
fmt.Println("Volume:", volname)
fmt.Printf("Capacity: %d bytes\n", vol.Size.Capacity)
fmt.Printf("Used: %d bytes\n", vol.Size.Used)
fmt.Printf("Free: %d bytes\n", vol.Size.Free)
},
}

var volumeExpandCmd = &cobra.Command{
Use: "add-brick",
Short: helpVolumeExpandCmd,
Expand Down
89 changes: 19 additions & 70 deletions glusterd2/commands/volumes/volume-shrink.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ import (
"github.com/gluster/glusterd2/glusterd2/transaction"
"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/plugins/rebalance"
customerror "github.com/gluster/glusterd2/pkg/errors"
rebalance "github.com/gluster/glusterd2/plugins/rebalance"
rebalanceapi "github.com/gluster/glusterd2/plugins/rebalance/api"

"github.com/gorilla/mux"
Expand All @@ -26,7 +27,7 @@ func registerVolShrinkStepFuncs() {
}

func startRebalance(c transaction.TxnCtx) error {
var rinfo rebalanceapi.RebalanceInfo
var rinfo rebalanceapi.RebalInfo
err := c.Get("rinfo", &rinfo)
if err != nil {
return err
Expand All @@ -47,7 +48,7 @@ func validateVolumeShrinkReq(req api.VolShrinkReq) error {

for _, brick := range req.Bricks {
if dupEntry[brick.PeerID+filepath.Clean(brick.Path)] == true {
return errors.ErrDuplicateBrickPath
return customerror.ErrDuplicateBrickPath
}
dupEntry[brick.PeerID+filepath.Clean(brick.Path)] = true

Expand All @@ -66,7 +67,7 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {

var req api.VolShrinkReq
if err := restutils.UnmarshalRequest(r, &req); err != nil {
restutils.SendHTTPError(ctx, w, http.StatusUnprocessableEntity, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusUnprocessableEntity, err)
return
}

Expand All @@ -75,54 +76,6 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {
return
}

volinfo, err := volume.GetVolume(volname)
if err != nil {
restutils.SendHTTPError(ctx, w, http.StatusNotFound, err.Error(), api.ErrCodeDefault)
return
}

for index := range req.Bricks {
for _, b := range req.Bricks {
isPresent = false
for _, brick := range volinfo.Subvols[index].Bricks {
if brick.PeerID.String() == b.PeerID && brick.Path == filepath.Clean(b.Path) {
flag = true
break
}
}
if !isPresent {
restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "One or more brick is not part of given volume")
return
}
}
}

switch volinfo.Type {
case volume.Distribute:
case volume.Replicate:
case volume.DistReplicate:
if len(req.Bricks)%volinfo.Subvols[0].ReplicaCount != 0 {
err := errors.New("wrong number of bricks to remove")
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err.Error(), api.ErrCodeDefault)
return
}
default:
err := errors.New("not implemented: " + volinfo.Type.String())
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err.Error(), api.ErrCodeDefault)
return

}

nodes, err := req.Nodes()
if err != nil {
logger.WithError(err).Error("could not prepare node list")
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
return
}

txn := transaction.NewTxn(ctx)
defer txn.Cleanup()

txn, err := transaction.NewTxnWithLocks(ctx, volname)
if err != nil {
status, err := restutils.ErrToStatusCode(err)
Expand All @@ -133,16 +86,16 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {

volinfo, err := volume.GetVolume(volname)
if err != nil {
restutils.SendHTTPError(ctx, w, http.StatusNotFound, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusNotFound, err)
return
}

for index := range req.Bricks {
for _, b := range req.Bricks {
isPresent = false
isPresent := false
for _, brick := range volinfo.Subvols[index].Bricks {
if brick.PeerID.String() == b.PeerID && brick.Path == filepath.Clean(b.Path) {
flag = true
isPresent = true
break
}
}
Expand Down Expand Up @@ -177,7 +130,6 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {
}

txn.Steps = []*transaction.Step{
lock,
{
DoFunc: "vol-shrink.UpdateVolinfo",
Nodes: []uuid.UUID{gdctx.MyUUID},
Expand All @@ -190,8 +142,6 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {
DoFunc: "vol-shrink.StartRebalance",
Nodes: nodes,
},

unlock,
}

decommissionedSubvols, err := findDecommissioned(req.Bricks, volinfo)
Expand All @@ -205,29 +155,28 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {
// It seems that there is no other way to include this information in the rebalance volfile right now.
volinfo.Options["distribute.decommissioned-bricks"] = strings.TrimSpace(decommissionedSubvols)

var rinfo rebalanceapi.RebalanceInfo
var commit uint64
var rinfo rebalanceapi.RebalInfo
rinfo.Volname = volname
rinfo.RebalanceID = uuid.NewRandom()
rinfo.Cmd = rebalanceapi.GfDefragCmdStartForce
rinfo.Status = rebalanceapi.GfDefragStatusNotStarted
rinfo.CommitHash = rebalance.SetCommitHash(commit)
rinfo.Cmd = rebalanceapi.CmdStartForce
rinfo.State = rebalanceapi.NotStarted
rinfo.CommitHash = rebalance.SetCommitHash()
if err := txn.Ctx.Set("rinfo", rinfo); err != nil {
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
return
}

if err := txn.Ctx.Set("volinfo", volinfo); err != nil {
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
return
}

if err = txn.Do(); err != nil {
logger.WithError(err).Error("remove bricks start transaction failed")
if err == transaction.ErrLockTimeout {
restutils.SendHTTPError(ctx, w, http.StatusConflict, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusConflict, err)
} else {
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err.Error(), api.ErrCodeDefault)
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
}
return
}
Expand All @@ -239,21 +188,21 @@ func volumeShrinkHandler(w http.ResponseWriter, r *http.Request) {
func findDecommissioned(bricks []api.BrickReq, volinfo *volume.Volinfo) (string, error) {
brickSet := make(map[string]bool)
for _, brick := range bricks {
u := uuid.Parse(brick.NodeID)
u := uuid.Parse(brick.PeerID)
if u == nil {
return "", errors.New("Invalid nodeid")
}
path, err := filepath.Abs(brick.Path)
if err != nil {
return "", err
}
brickSet[brick.NodeID+":"+path] = true
brickSet[brick.PeerID+":"+path] = true
}

var subvolMap = make(map[string]int)
for _, subvol := range volinfo.Subvols {
for _, b := range subvol.Bricks {
if brickSet[b.NodeID.String()+":"+b.Path] {
if brickSet[b.PeerID.String()+":"+b.Path] {
if count, ok := subvolMap[subvol.Name]; !ok {
subvolMap[subvol.Name] = 1
} else {
Expand Down
17 changes: 17 additions & 0 deletions pkg/api/brickutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,20 @@ func (req *VolExpandReq) Nodes() ([]uuid.UUID, error) {
}
return nodes, nil
}

// Nodes extracts list of Peer IDs from Volume Shrink request
func (req *VolShrinkReq) Nodes() ([]uuid.UUID, error) {
var nodesMap = make(map[string]bool)
var nodes []uuid.UUID
for _, brick := range req.Bricks {
if _, ok := nodesMap[brick.PeerID]; !ok {
nodesMap[brick.PeerID] = true
u := uuid.Parse(brick.PeerID)
if u == nil {
return nil, fmt.Errorf("Failed to parse peer ID: %s", brick.PeerID)
}
nodes = append(nodes, u)
}
}
return nodes, nil
}
Loading

0 comments on commit bdce89d

Please sign in to comment.