Skip to content

Commit

Permalink
test: Cover TestCheckRedisCluster
Browse files Browse the repository at this point in the history
Signed-off-by: Shubham Gupta <iamshubhamgupta2001@gmail.com>
  • Loading branch information
shubham-cmyk committed Mar 31, 2024
1 parent e58ab50 commit b5fe6e6
Show file tree
Hide file tree
Showing 2 changed files with 128 additions and 18 deletions.
30 changes: 15 additions & 15 deletions k8sutils/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,9 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg
pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key)
if err != nil {
logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name)
} else {
cmd = append(cmd, "-a", pass)
}
cmd = append(cmd, "-a", pass)
}

cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...)
Expand All @@ -185,7 +186,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter
leaderCounts := cr.Spec.GetReplicaCounts("leader")
followerPerLeader := followerCounts / leaderCounts

nodes := checkRedisCluster(ctx, client, logger, cr)
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

Check warning on line 190 in k8sutils/redis.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis.go#L189-L190

Added lines #L189 - L190 were not covered by tests

nodes := checkRedisCluster(ctx, redisClient, logger)

Check warning on line 192 in k8sutils/redis.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis.go#L192

Added line #L192 was not covered by tests
for followerIdx := 0; followerIdx <= int(followerCounts)-1; {
for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ {
followerPod := RedisDetails{
Expand Down Expand Up @@ -222,18 +226,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter
}

// checkRedisCluster will check the redis cluster have sufficient nodes or not
func checkRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) [][]string {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
cmd := redis.NewStringCmd(ctx, "cluster", "nodes")
err := redisClient.Process(ctx, cmd)
func checkRedisCluster(ctx context.Context, redisClient *redis.Client, logger logr.Logger) [][]string {
output, err := redisClient.ClusterNodes(ctx).Result()
if err != nil {
logger.Error(err, "Redis command failed with this error")
}

output, err := cmd.Result()
if err != nil {
logger.Error(err, "Redis command failed with this error")
logger.Error(err, "Error in getting Redis cluster nodes")
}
logger.V(1).Info("Redis cluster nodes are listed", "Output", output)

Expand Down Expand Up @@ -298,8 +294,10 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, lo

// CheckRedisNodeCount will check the count of redis nodes
func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

Check warning on line 298 in k8sutils/redis.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis.go#L297-L298

Added lines #L297 - L298 were not covered by tests
var redisNodeType string
clusterNodes := checkRedisCluster(ctx, client, logger, cr)
clusterNodes := checkRedisCluster(ctx, redisClient, logger)

Check warning on line 300 in k8sutils/redis.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis.go#L300

Added line #L300 was not covered by tests
count := len(clusterNodes)

switch nodeType {
Expand All @@ -326,7 +324,9 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge

// CheckRedisClusterState will check the redis cluster state
func CheckRedisClusterState(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) int {
clusterNodes := checkRedisCluster(ctx, client, logger, cr)
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
clusterNodes := checkRedisCluster(ctx, redisClient, logger)

Check warning on line 329 in k8sutils/redis.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis.go#L327-L329

Added lines #L327 - L329 were not covered by tests
count := 0
for _, node := range clusterNodes {
if strings.Contains(node[2], "fail") || strings.Contains(node[7], "disconnected") {
Expand Down
116 changes: 113 additions & 3 deletions k8sutils/redis_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,15 +353,26 @@ func TestGetRedisTLSArgs(t *testing.T) {

func TestCreateRedisReplicationCommand(t *testing.T) {
logger := logr.Discard()
type secret struct {
name string
namespace string
key string
}
tests := []struct {
name string
redisCluster *redisv1beta2.RedisCluster
name string
redisCluster *redisv1beta2.RedisCluster
secret
leaderPod RedisDetails
followerPod RedisDetails
expectedCommand []string
}{
{
name: "Test case with cluster version v7",
secret: secret{
name: "redis-password-secret",
namespace: "default",
key: "password",
},
redisCluster: &redisv1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Expand Down Expand Up @@ -397,6 +408,47 @@ func TestCreateRedisReplicationCommand(t *testing.T) {
"-a", "password",
},
},
{
name: "Test case with cluster version v7 failed to get password",
secret: secret{
name: "wrong-name",
namespace: "default",
key: "password",
},
redisCluster: &redisv1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Namespace: "default",
},
Spec: redisv1beta2.RedisClusterSpec{
Size: ptr.To(int32(3)),
KubernetesConfig: redisv1beta2.KubernetesConfig{
KubernetesConfig: api.KubernetesConfig{
ExistingPasswordSecret: &api.ExistingPasswordSecret{
Name: ptr.To("redis-password-secret"),
Key: ptr.To("password"),
},
},
},
ClusterVersion: ptr.To("v7"),
Port: ptr.To(6379),
},
},
leaderPod: RedisDetails{
PodName: "redis-cluster-leader-0",
Namespace: "default",
},
followerPod: RedisDetails{
PodName: "redis-cluster-follower-0",
Namespace: "default",
},
expectedCommand: []string{
"redis-cli", "--cluster", "add-node",
"redis-cluster-follower-0.redis-cluster-follower-headless.default.svc:6379",
"redis-cluster-leader-0.redis-cluster-leader-headless.default.svc:6379",
"--cluster-slave",
},
},
{
name: "Test case without cluster version v7",
redisCluster: &redisv1beta2.RedisCluster{
Expand Down Expand Up @@ -431,7 +483,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) {
pods := mock_utils.CreateFakeObjectWithPodIPs(tt.redisCluster)
var secret []runtime.Object
if tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret != nil {
secret = mock_utils.CreateFakeObjectWithSecret(*tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret.Name, tt.redisCluster.GetNamespace(), *tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret.Key)
secret = mock_utils.CreateFakeObjectWithSecret(tt.secret.name, tt.secret.namespace, tt.secret.key)
}

var objects []runtime.Object
Expand Down Expand Up @@ -682,3 +734,61 @@ func Test_checkRedisServerRole(t *testing.T) {
})
}
}

func TestCheckRedisCluster(t *testing.T) {
logger := logr.Discard() // Discard logs

tests := []struct {
name string
expectError error
clusterNodesOutput string
expectedResult [][]string
}{
{
name: "Detailed cluster nodes output",
clusterNodesOutput: `07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004,hostname4 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected
67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002,hostname2 master - 0 1426238316232 2 connected 5461-10922
292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003,hostname3 master - 0 1426238318243 3 connected 10923-16383
6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005@31005,hostname5 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected
824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006@31006,hostname6 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected
e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself,master - 0 0 1 connected 0-5460`,
expectedResult: [][]string{
{"07c37dfeb235213a872192d90877d0cd55635b91", "127.0.0.1:30004@31004,hostname4", "slave", "e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "0", "1426238317239", "4", "connected"},
{"67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "127.0.0.1:30002@31002,hostname2", "master", "-", "0", "1426238316232", "2", "connected", "5461-10922"},
{"292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "127.0.0.1:30003@31003,hostname3", "master", "-", "0", "1426238318243", "3", "connected", "10923-16383"},
{"6ec23923021cf3ffec47632106199cb7f496ce01", "127.0.0.1:30005@31005,hostname5", "slave", "67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "0", "1426238316232", "5", "connected"},
{"824fe116063bc5fcf9f4ffd895bc17aee7731ac3", "127.0.0.1:30006@31006,hostname6", "slave", "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "0", "1426238317741", "6", "connected"},
{"e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "127.0.0.1:30001@31001,hostname1", "myself,master", "-", "0", "0", "1", "connected", "0-5460"},
},
},
{
name: "Error from ClusterNodes",
expectError: redis.ErrClosed,
expectedResult: nil,
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
db, mock := redismock.NewClientMock()

if tc.expectError != nil {
mock.ExpectClusterNodes().SetErr(tc.expectError)
} else {
mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput)
}
result := checkRedisCluster(context.TODO(), db, logger)

if tc.expectError != nil {
assert.Nil(t, result)
} else {
assert.ElementsMatch(t, tc.expectedResult, result)
}

// Ensure all expectations are met
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
}
}

0 comments on commit b5fe6e6

Please sign in to comment.