From b5fe6e6d9bc6636d054f74db991603788164094d Mon Sep 17 00:00:00 2001 From: Shubham Gupta Date: Sun, 31 Mar 2024 22:06:34 +0530 Subject: [PATCH] test: Cover TestCheckRedisCluster Signed-off-by: Shubham Gupta --- k8sutils/redis.go | 30 +++++------ k8sutils/redis_test.go | 116 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 128 insertions(+), 18 deletions(-) diff --git a/k8sutils/redis.go b/k8sutils/redis.go index bb983084c..525677a2e 100644 --- a/k8sutils/redis.go +++ b/k8sutils/redis.go @@ -165,8 +165,9 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name) + } else { + cmd = append(cmd, "-a", pass) } - cmd = append(cmd, "-a", pass) } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...) @@ -185,7 +186,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - nodes := checkRedisCluster(ctx, client, logger, cr) + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() + + nodes := checkRedisCluster(ctx, redisClient, logger) for followerIdx := 0; followerIdx <= int(followerCounts)-1; { for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ { followerPod := RedisDetails{ @@ -222,18 +226,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter } // checkRedisCluster will check the redis cluster have sufficient nodes or not -func checkRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) [][]string { - redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") - defer redisClient.Close() - cmd := redis.NewStringCmd(ctx, "cluster", "nodes") - err := redisClient.Process(ctx, cmd) +func checkRedisCluster(ctx context.Context, redisClient *redis.Client, logger logr.Logger) [][]string { + output, err := redisClient.ClusterNodes(ctx).Result() if err != nil { - logger.Error(err, "Redis command failed with this error") - } - - output, err := cmd.Result() - if err != nil { - logger.Error(err, "Redis command failed with this error") + logger.Error(err, "Error in getting Redis cluster nodes") } logger.V(1).Info("Redis cluster nodes are listed", "Output", output) @@ -298,8 +294,10 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, lo // CheckRedisNodeCount will check the count of redis nodes func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() var redisNodeType string - clusterNodes := checkRedisCluster(ctx, client, logger, cr) + clusterNodes := checkRedisCluster(ctx, redisClient, logger) count := len(clusterNodes) switch nodeType { @@ -326,7 +324,9 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge // CheckRedisClusterState will check the redis cluster state func CheckRedisClusterState(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) int { - clusterNodes := checkRedisCluster(ctx, client, logger, cr) + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() + clusterNodes := checkRedisCluster(ctx, redisClient, logger) count := 0 for _, node := range clusterNodes { if strings.Contains(node[2], "fail") || strings.Contains(node[7], "disconnected") { diff --git a/k8sutils/redis_test.go b/k8sutils/redis_test.go index 884acdce9..54a3596ed 100644 --- a/k8sutils/redis_test.go +++ b/k8sutils/redis_test.go @@ -353,15 +353,26 @@ func TestGetRedisTLSArgs(t *testing.T) { func TestCreateRedisReplicationCommand(t *testing.T) { logger := logr.Discard() + type secret struct { + name string + namespace string + key string + } tests := []struct { - name string - redisCluster *redisv1beta2.RedisCluster + name string + redisCluster *redisv1beta2.RedisCluster + secret leaderPod RedisDetails followerPod RedisDetails expectedCommand []string }{ { name: "Test case with cluster version v7", + secret: secret{ + name: "redis-password-secret", + namespace: "default", + key: "password", + }, redisCluster: &redisv1beta2.RedisCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "redis-cluster", @@ -397,6 +408,47 @@ func TestCreateRedisReplicationCommand(t *testing.T) { "-a", "password", }, }, + { + name: "Test case with cluster version v7 failed to get password", + secret: secret{ + name: "wrong-name", + namespace: "default", + key: "password", + }, + redisCluster: &redisv1beta2.RedisCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisClusterSpec{ + Size: ptr.To(int32(3)), + KubernetesConfig: redisv1beta2.KubernetesConfig{ + KubernetesConfig: api.KubernetesConfig{ + ExistingPasswordSecret: &api.ExistingPasswordSecret{ + Name: ptr.To("redis-password-secret"), + Key: ptr.To("password"), + }, + }, + }, + ClusterVersion: ptr.To("v7"), + Port: ptr.To(6379), + }, + }, + leaderPod: RedisDetails{ + PodName: "redis-cluster-leader-0", + Namespace: "default", + }, + followerPod: RedisDetails{ + PodName: "redis-cluster-follower-0", + Namespace: "default", + }, + expectedCommand: []string{ + "redis-cli", "--cluster", "add-node", + "redis-cluster-follower-0.redis-cluster-follower-headless.default.svc:6379", + "redis-cluster-leader-0.redis-cluster-leader-headless.default.svc:6379", + "--cluster-slave", + }, + }, { name: "Test case without cluster version v7", redisCluster: &redisv1beta2.RedisCluster{ @@ -431,7 +483,7 @@ func TestCreateRedisReplicationCommand(t *testing.T) { pods := mock_utils.CreateFakeObjectWithPodIPs(tt.redisCluster) var secret []runtime.Object if tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - secret = mock_utils.CreateFakeObjectWithSecret(*tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret.Name, tt.redisCluster.GetNamespace(), *tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + secret = mock_utils.CreateFakeObjectWithSecret(tt.secret.name, tt.secret.namespace, tt.secret.key) } var objects []runtime.Object @@ -682,3 +734,61 @@ func Test_checkRedisServerRole(t *testing.T) { }) } } + +func TestCheckRedisCluster(t *testing.T) { + logger := logr.Discard() // Discard logs + + tests := []struct { + name string + expectError error + clusterNodesOutput string + expectedResult [][]string + }{ + { + name: "Detailed cluster nodes output", + clusterNodesOutput: `07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004,hostname4 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected +67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002,hostname2 master - 0 1426238316232 2 connected 5461-10922 +292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003,hostname3 master - 0 1426238318243 3 connected 10923-16383 +6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005@31005,hostname5 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected +824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006@31006,hostname6 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected +e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself,master - 0 0 1 connected 0-5460`, + expectedResult: [][]string{ + {"07c37dfeb235213a872192d90877d0cd55635b91", "127.0.0.1:30004@31004,hostname4", "slave", "e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "0", "1426238317239", "4", "connected"}, + {"67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "127.0.0.1:30002@31002,hostname2", "master", "-", "0", "1426238316232", "2", "connected", "5461-10922"}, + {"292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "127.0.0.1:30003@31003,hostname3", "master", "-", "0", "1426238318243", "3", "connected", "10923-16383"}, + {"6ec23923021cf3ffec47632106199cb7f496ce01", "127.0.0.1:30005@31005,hostname5", "slave", "67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "0", "1426238316232", "5", "connected"}, + {"824fe116063bc5fcf9f4ffd895bc17aee7731ac3", "127.0.0.1:30006@31006,hostname6", "slave", "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "0", "1426238317741", "6", "connected"}, + {"e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "127.0.0.1:30001@31001,hostname1", "myself,master", "-", "0", "0", "1", "connected", "0-5460"}, + }, + }, + { + name: "Error from ClusterNodes", + expectError: redis.ErrClosed, + expectedResult: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db, mock := redismock.NewClientMock() + + if tc.expectError != nil { + mock.ExpectClusterNodes().SetErr(tc.expectError) + } else { + mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput) + } + result := checkRedisCluster(context.TODO(), db, logger) + + if tc.expectError != nil { + assert.Nil(t, result) + } else { + assert.ElementsMatch(t, tc.expectedResult, result) + } + + // Ensure all expectations are met + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } + }) + } +}