Skip to content

Commit

Permalink
pool: skip updating crush rules for stretch clusters
Browse files Browse the repository at this point in the history
Pools in stretch clusters must all specify the same
CRUSH rule. No pools can use a different rule. When there
is a change in the device class, we do not even expect to update
the crush rules in a stretch cluster. Different device classes
are not supported in stretch clusters, and it's expected to be
a homogenous environment. Therefore, skip all crush rule updates
in stretch clusters.

Signed-off-by: Travis Nielsen <tnielsen@redhat.com>
(cherry picked from commit b92bbba)
(cherry picked from commit cde2c00)
  • Loading branch information
travisn committed Jul 10, 2024
1 parent 487b95a commit 2eea810
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 0 deletions.
5 changes: 5 additions & 0 deletions pkg/daemon/ceph/client/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,11 @@ func createReplicatedPoolForApp(context *clusterd.Context, clusterInfo *ClusterI
}

func updatePoolCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, pool cephv1.NamedPoolSpec) error {
if clusterSpec.IsStretchCluster() {
logger.Debugf("skipping crush rule update for pool %q in a stretch cluster", pool.Name)
return nil
}

if pool.FailureDomain == "" && pool.DeviceClass == "" {
logger.Debugf("skipping check for failure domain and deviceClass on pool %q as it is not specified", pool.Name)
return nil
Expand Down
23 changes: 23 additions & 0 deletions pkg/daemon/ceph/client/pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,10 +251,12 @@ func TestUpdateFailureDomain(t *testing.T) {
currentFailureDomain := "rack"
currentDeviceClass := "default"
testCrushRuleName := "test_rule"
cephCommandCalled := false
executor := &exectest.MockExecutor{}
context := &clusterd.Context{Executor: executor}
executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
logger.Infof("Command: %s %v", command, args)
cephCommandCalled = true
if args[1] == "pool" {
if args[2] == "get" {
assert.Equal(t, "mypool", args[3])
Expand Down Expand Up @@ -318,6 +320,27 @@ func TestUpdateFailureDomain(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "mypool_zone", newCrushRule)
})

t.Run("stretch cluster skips crush rule update", func(t *testing.T) {
p := cephv1.NamedPoolSpec{
Name: "mypool",
PoolSpec: cephv1.PoolSpec{
FailureDomain: "zone",
Replicated: cephv1.ReplicatedSpec{Size: 3},
EnableCrushUpdates: true,

Check failure on line 330 in pkg/daemon/ceph/client/pool_test.go

View workflow job for this annotation

GitHub Actions / golangci-lint

unknown field EnableCrushUpdates in struct literal of type "github.com/rook/rook/pkg/apis/ceph.rook.io/v1".PoolSpec (typecheck)

Check failure on line 330 in pkg/daemon/ceph/client/pool_test.go

View workflow job for this annotation

GitHub Actions / unittests

unknown field EnableCrushUpdates in struct literal of type "github.com/rook/rook/pkg/apis/ceph.rook.io/v1".PoolSpec
},
}
clusterSpec := &cephv1.ClusterSpec{
Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.MonZoneSpec{{Name: "zone1"}, {Name: "zone2"}, {Name: "zone3", Arbiter: true}}}},
Storage: cephv1.StorageScopeSpec{},
}
newCrushRule = ""
cephCommandCalled = false
err := updatePoolCrushRule(context, AdminTestClusterInfo("mycluster"), clusterSpec, p)
assert.NoError(t, err)
assert.Equal(t, "", newCrushRule)
assert.False(t, cephCommandCalled)
})
}

func TestExtractPoolDetails(t *testing.T) {
Expand Down

0 comments on commit 2eea810

Please sign in to comment.