diff --git a/.github/workflows/operator-ci.yaml b/.github/workflows/operator-ci.yaml index f9322b585..f32f67757 100644 --- a/.github/workflows/operator-ci.yaml +++ b/.github/workflows/operator-ci.yaml @@ -86,7 +86,6 @@ jobs: uses: golangci/golangci-lint-action@v3 with: version: v1.54.0 - args: --timeout=5m0s ./... --verbose --out-format=github-actions container_quality_dockerfile_lint: needs: [gofmt, govet] diff --git a/.golangci.yml b/.golangci.yml index 48fc1e0d6..a72eb7d4d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -25,6 +25,10 @@ issues: run: timeout: 5m go-version: "1.21" + output: + format: colored-line-number + sort-results: true + test: false # Exclude third-party packages and go.mod from the lint checks exclude: | diff --git a/controllers/rediscluster_controller.go b/controllers/rediscluster_controller.go index 634560949..8a43c5840 100644 --- a/controllers/rediscluster_controller.go +++ b/controllers/rediscluster_controller.go @@ -80,21 +80,21 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // check whether the redis is leader or not ? // if not true then make it leader pod - if !(k8sutils.VerifyLeaderPod(ctx, instance)) { + if !(k8sutils.VerifyLeaderPod(ctx, r.K8sClient, r.Log, instance)) { // lastLeaderPod is slaving right now Make it the master Pod // We have to bring a manual failover here to make it a leaderPod // clusterFailover should also include the clusterReplicate since we have to map the followers to new leader - k8sutils.ClusterFailover(ctx, instance) + k8sutils.ClusterFailover(ctx, r.K8sClient, r.Log, instance) } // Step 1 Rehard the Cluster - k8sutils.ReshardRedisCluster(instance) + k8sutils.ReshardRedisCluster(r.K8sClient, r.Log, instance) // Step 2 Remove the Follower Node - k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, instance) + k8sutils.RemoveRedisFollowerNodesFromCluster(ctx, r.K8sClient, r.Log, instance) // Step 3 Remove the Leader Node - k8sutils.RemoveRedisNodeFromCluster(ctx, instance) + k8sutils.RemoveRedisNodeFromCluster(ctx, r.K8sClient, r.Log, instance) // Step 4 Rebalance the cluster - k8sutils.RebalanceRedisCluster(instance) + k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance) return ctrl.Result{RequeueAfter: time.Second * 100}, nil } @@ -185,34 +185,34 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request } reqLogger.Info("Creating redis cluster by executing cluster creation commands", "Leaders.Ready", strconv.Itoa(int(redisLeaderInfo.Status.ReadyReplicas)), "Followers.Ready", strconv.Itoa(int(redisFollowerInfo.Status.ReadyReplicas))) - if k8sutils.CheckRedisNodeCount(ctx, instance, "") != totalReplicas { - leaderCount := k8sutils.CheckRedisNodeCount(ctx, instance, "leader") + if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") != totalReplicas { + leaderCount := k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "leader") if leaderCount != leaderReplicas { reqLogger.Info("Not all leader are part of the cluster...", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas) if leaderCount <= 2 { - k8sutils.ExecuteRedisClusterCommand(ctx, instance) + k8sutils.ExecuteRedisClusterCommand(ctx, r.K8sClient, r.Log, instance) } else { if leaderCount < leaderReplicas { // Scale up the cluster // Step 2 : Add Redis Node - k8sutils.AddRedisNodeToCluster(ctx, instance) + k8sutils.AddRedisNodeToCluster(ctx, r.K8sClient, r.Log, instance) // Step 3 Rebalance the cluster using the empty masters - k8sutils.RebalanceRedisClusterEmptyMasters(instance) + k8sutils.RebalanceRedisClusterEmptyMasters(r.K8sClient, r.Log, instance) } } } else { if followerReplicas > 0 && redisFollowerInfo.Status.ReadyReplicas == followerReplicas { reqLogger.Info("All leader are part of the cluster, adding follower/replicas", "Leaders.Count", leaderCount, "Instance.Size", leaderReplicas, "Follower.Replicas", followerReplicas) - k8sutils.ExecuteRedisReplicationCommand(ctx, instance) + k8sutils.ExecuteRedisReplicationCommand(ctx, r.K8sClient, r.Log, instance) } else { reqLogger.Info("no follower/replicas configured, skipping replication configuration", "Leaders.Count", leaderCount, "Leader.Size", leaderReplicas, "Follower.Replicas", followerReplicas) } } } else { reqLogger.Info("Redis leader count is desired") - if int(totalReplicas) > 1 && k8sutils.CheckRedisClusterState(ctx, instance) >= int(totalReplicas)-1 { + if int(totalReplicas) > 1 && k8sutils.CheckRedisClusterState(ctx, r.K8sClient, r.Log, instance) >= int(totalReplicas)-1 { reqLogger.Info("Redis leader is not desired, executing failover operation") - err = k8sutils.ExecuteFailoverOperation(ctx, instance) + err = k8sutils.ExecuteFailoverOperation(ctx, r.K8sClient, r.Log, instance) if err != nil { return ctrl.Result{RequeueAfter: time.Second * 10}, err } @@ -221,8 +221,8 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Check If there is No Empty Master Node - if k8sutils.CheckRedisNodeCount(ctx, instance, "") == totalReplicas { - k8sutils.CheckIfEmptyMasters(ctx, instance) + if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") == totalReplicas { + k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, r.Log, instance) } reqLogger.Info("Will reconcile redis cluster operator in again 10 seconds") diff --git a/controllers/redisreplication_controller.go b/controllers/redisreplication_controller.go index 193cc3de9..c424c85d5 100644 --- a/controllers/redisreplication_controller.go +++ b/controllers/redisreplication_controller.go @@ -80,11 +80,11 @@ func (r *RedisReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Req reqLogger.Info("Creating redis replication by executing replication creation commands", "Replication.Ready", strconv.Itoa(int(redisReplicationInfo.Status.ReadyReplicas))) - if len(k8sutils.GetRedisNodesByRole(ctx, instance, "master")) > int(leaderReplicas) { + if len(k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "master")) > int(leaderReplicas) { - masterNodes := k8sutils.GetRedisNodesByRole(ctx, instance, "master") - slaveNodes := k8sutils.GetRedisNodesByRole(ctx, instance, "slave") - err := k8sutils.CreateMasterSlaveReplication(ctx, instance, masterNodes, slaveNodes) + masterNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "master") + slaveNodes := k8sutils.GetRedisNodesByRole(ctx, r.K8sClient, r.Log, instance, "slave") + err := k8sutils.CreateMasterSlaveReplication(ctx, r.K8sClient, r.Log, instance, masterNodes, slaveNodes) if err != nil { return ctrl.Result{RequeueAfter: time.Second * 60}, err } diff --git a/controllers/redissentinel_controller.go b/controllers/redissentinel_controller.go index 18619a174..3a88dcc74 100644 --- a/controllers/redissentinel_controller.go +++ b/controllers/redissentinel_controller.go @@ -55,7 +55,7 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Create Redis Sentinel - err = k8sutils.CreateRedisSentinel(ctx, instance) + err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance) if err != nil { return ctrl.Result{}, err } diff --git a/k8sutils/cluster-scaling.go b/k8sutils/cluster-scaling.go index b191b63f5..8304d0060 100644 --- a/k8sutils/cluster-scaling.go +++ b/k8sutils/cluster-scaling.go @@ -6,15 +6,16 @@ import ( "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" + "github.com/go-logr/logr" redis "github.com/redis/go-redis/v9" + "k8s.io/client-go/kubernetes" ) // Reshard the redis Cluster -func ReshardRedisCluster(cr *redisv1beta2.RedisCluster) { +func ReshardRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { ctx := context.TODO() - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") // Transfer Pod details transferPOD := RedisDetails{ @@ -35,7 +36,7 @@ func ReshardRedisCluster(cr *redisv1beta2.RedisCluster) { } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -48,17 +49,17 @@ func ReshardRedisCluster(cr *redisv1beta2.RedisCluster) { //--cluster-from --cluster-to --cluster-slots --cluster-yes // Remove Node - removeNodeID := getRedisNodeID(ctx, cr, removePOD) + removeNodeID := getRedisNodeID(ctx, client, logger, cr, removePOD) cmd = append(cmd, "--cluster-from") cmd = append(cmd, removeNodeID) // Transfer Node - transferNodeID := getRedisNodeID(ctx, cr, transferPOD) + transferNodeID := getRedisNodeID(ctx, client, logger, cr, transferPOD) cmd = append(cmd, "--cluster-to") cmd = append(cmd, transferNodeID) // Cluster Slots - slot := getRedisClusterSlots(ctx, cr, removeNodeID) + slot := getRedisClusterSlots(ctx, client, logger, cr, removeNodeID) cmd = append(cmd, "--cluster-slots") cmd = append(cmd, slot) @@ -73,11 +74,10 @@ func ReshardRedisCluster(cr *redisv1beta2.RedisCluster) { executeCommand(cr, cmd, cr.ObjectMeta.Name+"-leader-0") } -func getRedisClusterSlots(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeID string) string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func getRedisClusterSlots(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeID string) string { totalSlots := 0 - redisClient := configureRedisClient(cr, cr.ObjectMeta.Name+"-leader-0") + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() redisClusterInfo, err := redisClient.ClusterNodes(ctx).Result() if err != nil { @@ -114,20 +114,18 @@ func getRedisClusterSlots(ctx context.Context, cr *redisv1beta2.RedisCluster, no } // getRedisNodeID would return nodeID of a redis node by passing pod -func getRedisNodeID(ctx context.Context, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { - var client *redis.Client - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - client = configureRedisClient(cr, pod.PodName) - defer client.Close() +func getRedisNodeID(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, pod RedisDetails) string { + redisClient := configureRedisClient(client, logger, cr, pod.PodName) + defer redisClient.Close() - pong, err := client.Ping(ctx).Result() + pong, err := redisClient.Ping(ctx).Result() if err != nil || pong != "PONG" { logger.Error(err, "Failed to ping Redis server") return "" } cmd := redis.NewStringCmd(ctx, "cluster", "myid") - err = client.Process(ctx, cmd) + err = redisClient.Process(ctx, cmd) if err != nil { logger.Error(err, "Redis command failed with this error") return "" @@ -143,8 +141,7 @@ func getRedisNodeID(ctx context.Context, cr *redisv1beta2.RedisCluster, pod Redi } // Rebalance the Redis CLuster using the Empty Master Nodes -func RebalanceRedisClusterEmptyMasters(cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func RebalanceRedisClusterEmptyMasters(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : --cluster-use-empty-masters -a var cmd []string pod := RedisDetails{ @@ -162,7 +159,7 @@ func RebalanceRedisClusterEmptyMasters(cr *redisv1beta2.RedisCluster) { cmd = append(cmd, "--cluster-use-empty-masters") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -176,29 +173,27 @@ func RebalanceRedisClusterEmptyMasters(cr *redisv1beta2.RedisCluster) { executeCommand(cr, cmd, cr.ObjectMeta.Name+"-leader-1") } -func CheckIfEmptyMasters(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - totalRedisLeaderNodes := CheckRedisNodeCount(ctx, cr, "leader") +func CheckIfEmptyMasters(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { + totalRedisLeaderNodes := CheckRedisNodeCount(ctx, client, logger, cr, "leader") for i := 0; i < int(totalRedisLeaderNodes); i++ { pod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(i), Namespace: cr.Namespace, } - podNodeID := getRedisNodeID(ctx, cr, pod) - podSlots := getRedisClusterSlots(ctx, cr, podNodeID) + podNodeID := getRedisNodeID(ctx, client, logger, cr, pod) + podSlots := getRedisClusterSlots(ctx, client, logger, cr, podNodeID) if podSlots == "0" || podSlots == "" { logger.V(1).Info("Found Empty Redis Leader Node", "pod", pod) - RebalanceRedisClusterEmptyMasters(cr) + RebalanceRedisClusterEmptyMasters(client, logger, cr) break } } } // Rebalance Redis Cluster Would Rebalance the Redis Cluster without using the empty masters -func RebalanceRedisCluster(cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func RebalanceRedisCluster(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { // cmd = redis-cli --cluster rebalance : -a var cmd []string pod := RedisDetails{ @@ -214,7 +209,7 @@ func RebalanceRedisCluster(cr *redisv1beta2.RedisCluster) { } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -229,10 +224,9 @@ func RebalanceRedisCluster(cr *redisv1beta2.RedisCluster) { } // Add redis cluster node would add a node to the existing redis cluster using redis-cli -func AddRedisNodeToCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func AddRedisNodeToCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string - activeRedisNode := CheckRedisNodeCount(ctx, cr, "leader") + activeRedisNode := CheckRedisNodeCount(ctx, client, logger, cr, "leader") newPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-" + strconv.Itoa(int(activeRedisNode)), @@ -254,7 +248,7 @@ func AddRedisNodeToCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) { } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -269,10 +263,8 @@ func AddRedisNodeToCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) { } // getAttachedFollowerNodeIDs would return a slice of redis followers attached to a redis leader -func getAttachedFollowerNodeIDs(ctx context.Context, cr *redisv1beta2.RedisCluster, masterNodeID string) []string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - - redisClient := configureRedisClient(cr, cr.ObjectMeta.Name+"-leader-0") +func getAttachedFollowerNodeIDs(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, masterNodeID string) []string { + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") defer redisClient.Close() redisClusterInfo, err := redisClient.ClusterNodes(ctx).Result() if err != nil { @@ -299,10 +291,9 @@ func getAttachedFollowerNodeIDs(ctx context.Context, cr *redisv1beta2.RedisClust } // Remove redis follower node would remove all follower nodes of last leader node using redis-cli -func RemoveRedisFollowerNodesFromCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func RemoveRedisFollowerNodesFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -316,7 +307,7 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, cr *redisv1beta2.R cmd = []string{"redis-cli"} if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -325,8 +316,8 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, cr *redisv1beta2.R } cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) - lastLeaderPodNodeID := getRedisNodeID(ctx, cr, lastLeaderPod) - followerNodeIDs := getAttachedFollowerNodeIDs(ctx, cr, lastLeaderPodNodeID) + lastLeaderPodNodeID := getRedisNodeID(ctx, client, logger, cr, lastLeaderPod) + followerNodeIDs := getAttachedFollowerNodeIDs(ctx, client, logger, cr, lastLeaderPodNodeID) cmd = append(cmd, "--cluster", "del-node") if *cr.Spec.ClusterVersion == "v7" { @@ -345,10 +336,9 @@ func RemoveRedisFollowerNodesFromCluster(ctx context.Context, cr *redisv1beta2.R } // Remove redis cluster node would remove last node to the existing redis cluster using redis-cli -func RemoveRedisNodeFromCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func RemoveRedisNodeFromCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string - currentRedisCount := CheckRedisNodeCount(ctx, cr, "leader") + currentRedisCount := CheckRedisNodeCount(ctx, client, logger, cr, "leader") existingPod := RedisDetails{ PodName: cr.ObjectMeta.Name + "-leader-0", @@ -367,11 +357,11 @@ func RemoveRedisNodeFromCluster(ctx context.Context, cr *redisv1beta2.RedisClust cmd = append(cmd, getRedisServerIP(existingPod)+":6379") } - removePodNodeID := getRedisNodeID(ctx, cr, removePod) + removePodNodeID := getRedisNodeID(ctx, client, logger, cr, removePod) cmd = append(cmd, removePodNodeID) if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -382,18 +372,17 @@ func RemoveRedisNodeFromCluster(ctx context.Context, cr *redisv1beta2.RedisClust cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...) logger.V(1).Info("Redis cluster leader remove command is", "Command", cmd) - if getRedisClusterSlots(ctx, cr, removePodNodeID) != "0" { + if getRedisClusterSlots(ctx, client, logger, cr, removePodNodeID) != "0" { logger.V(1).Info("Skipping execution remove leader not empty", "cmd", cmd) } executeCommand(cr, cmd, cr.ObjectMeta.Name+"-leader-0") } // verifyLeaderPod return true if the pod is leader/master -func VerifyLeaderPod(ctx context.Context, cr *redisv1beta2.RedisCluster) bool { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, cr, "leader"))-1) +func VerifyLeaderPod(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool { + podName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) - redisClient := configureRedisClient(cr, podName) + redisClient := configureRedisClient(client, logger, cr, podName) defer redisClient.Close() info, err := redisClient.Info(ctx, "replication").Result() if err != nil { @@ -412,11 +401,9 @@ func VerifyLeaderPod(ctx context.Context, cr *redisv1beta2.RedisCluster) bool { return false } -func ClusterFailover(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, cr, "leader"))-1) +func ClusterFailover(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { + slavePodName := cr.Name + "-leader-" + strconv.Itoa(int(CheckRedisNodeCount(ctx, client, logger, cr, "leader"))-1) // cmd = redis-cli cluster failover -a - var cmd []string pod := RedisDetails{ PodName: slavePodName, @@ -432,7 +419,7 @@ func ClusterFailover(ctx context.Context, cr *redisv1beta2.RedisCluster) { } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } diff --git a/k8sutils/finalizers_test.go b/k8sutils/finalizers_test.go index d8175ced9..ad67c0247 100644 --- a/k8sutils/finalizers_test.go +++ b/k8sutils/finalizers_test.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + // "time" + "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" @@ -12,8 +14,12 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + + // "k8s.io/apimachinery/pkg/types" + // utilruntime "k8s.io/apimachinery/pkg/util/runtime" k8sClientFake "k8s.io/client-go/kubernetes/fake" "k8s.io/utils/pointer" + // ctrlClientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" ) // func TestHandleRedisFinalizer(t *testing.T) { diff --git a/k8sutils/redis-sentinel.go b/k8sutils/redis-sentinel.go index 23d9ce509..42e96b38b 100644 --- a/k8sutils/redis-sentinel.go +++ b/k8sutils/redis-sentinel.go @@ -7,9 +7,11 @@ import ( commonapi "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" ) // RedisSentinelSTS is a interface to call Redis Statefulset function @@ -32,7 +34,7 @@ type RedisReplicationObject struct { } // Redis Sentinel Create the Redis Sentinel Setup -func CreateRedisSentinel(ctx context.Context, cr *redisv1beta2.RedisSentinel) error { +func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { prop := RedisSentinelSTS{ RedisStateFulType: "sentinel", Affinity: cr.Spec.Affinity, @@ -45,7 +47,7 @@ func CreateRedisSentinel(ctx context.Context, cr *redisv1beta2.RedisSentinel) er prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig } - return prop.CreateRedisSentinelSetup(ctx, cr) + return prop.CreateRedisSentinelSetup(ctx, client, logger, cr) } @@ -59,10 +61,8 @@ func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel) error { } // Create Redis Sentinel Cluster Setup -func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cr *redisv1beta2.RedisSentinel) error { - +func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType - logger := statefulSetLogger(cr.Namespace, stateFulName) labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) objectMetaInfo := generateObjectMetaInformation(stateFulName, cr.Namespace, labels, annotations) @@ -72,7 +72,7 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cr generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), - generateRedisSentinelContainerParams(ctx, cr, service.ReadinessProbe, service.LivenessProbe), + generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe), cr.Spec.Sidecars, ) @@ -141,8 +141,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in } // Create Redis Sentinel Statefulset Container Params -func generateRedisSentinelContainerParams(ctx context.Context, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe) containerParameters { - +func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -151,7 +150,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, cr *redisv1beta2. ImagePullPolicy: cr.Spec.KubernetesConfig.ImagePullPolicy, Resources: cr.Spec.KubernetesConfig.Resources, SecurityContext: cr.Spec.SecurityContext, - AdditionalEnvVariable: getSentinelEnvVariable(ctx, cr), + AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr), } if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars @@ -240,8 +239,7 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. } -func getSentinelEnvVariable(ctx context.Context, cr *redisv1beta2.RedisSentinel) *[]corev1.EnvVar { - +func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) *[]corev1.EnvVar { if cr.Spec.RedisSentinelConfig == nil { return &[]corev1.EnvVar{} } @@ -253,7 +251,7 @@ func getSentinelEnvVariable(ctx context.Context, cr *redisv1beta2.RedisSentinel) }, { Name: "IP", - Value: getRedisReplicationMasterIP(ctx, cr), + Value: getRedisReplicationMasterIP(ctx, client, logger, cr), }, { Name: "PORT", @@ -281,8 +279,7 @@ func getSentinelEnvVariable(ctx context.Context, cr *redisv1beta2.RedisSentinel) } -func getRedisReplicationMasterIP(ctx context.Context, cr *redisv1beta2.RedisSentinel) string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) string { dClient, err := GenerateK8sDynamicClient(GenerateK8sConfig) if err != nil { logger.Error(err, "Failed to generate dynamic client") @@ -322,7 +319,7 @@ func getRedisReplicationMasterIP(ctx context.Context, cr *redisv1beta2.RedisSent return "" } - masterPods := GetRedisNodesByRole(ctx, &replicationInstance, "master") + masterPods := GetRedisNodesByRole(ctx, client, logger, &replicationInstance, "master") if len(masterPods) == 0 { realMasterPod = "" @@ -331,7 +328,7 @@ func getRedisReplicationMasterIP(ctx context.Context, cr *redisv1beta2.RedisSent } else if len(masterPods) == 1 { realMasterPod = masterPods[0] } else { - realMasterPod = checkAttachedSlave(ctx, &replicationInstance, masterPods) + realMasterPod = checkAttachedSlave(ctx, client, logger, &replicationInstance, masterPods) } realMasterInfo := RedisDetails{ diff --git a/k8sutils/redis-sentinel_test.go b/k8sutils/redis-sentinel_test.go index 883bc78d5..9aedb48b4 100644 --- a/k8sutils/redis-sentinel_test.go +++ b/k8sutils/redis-sentinel_test.go @@ -8,6 +8,7 @@ import ( common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -182,7 +183,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelContainerParams(context.TODO(), input, nil, nil) + actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } diff --git a/k8sutils/redis.go b/k8sutils/redis.go index dda5aabef..a34a7934a 100644 --- a/k8sutils/redis.go +++ b/k8sutils/redis.go @@ -14,6 +14,7 @@ import ( redis "github.com/redis/go-redis/v9" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" ) @@ -92,13 +93,12 @@ func CreateMultipleLeaderRedisCommand(cr *redisv1beta2.RedisCluster) []string { } // ExecuteRedisClusterCommand will execute redis cluster creation command -func ExecuteRedisClusterCommand(ctx context.Context, cr *redisv1beta2.RedisCluster) { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func ExecuteRedisClusterCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var cmd []string replicas := cr.Spec.GetReplicaCounts("leader") switch int(replicas) { case 1: - err := executeFailoverCommand(ctx, cr, "leader") + err := executeFailoverCommand(ctx, client, logger, cr, "leader") if err != nil { logger.Error(err, "error executing failover command") } @@ -108,7 +108,7 @@ func ExecuteRedisClusterCommand(ctx context.Context, cr *redisv1beta2.RedisClust } if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -133,8 +133,7 @@ func getRedisTLSArgs(tlsConfig *redisv1beta2.TLSConfig, clientHost string) []str } // createRedisReplicationCommand will create redis replication creation command -func createRedisReplicationCommand(cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, leaderPod RedisDetails, followerPod RedisDetails) []string { cmd := []string{"redis-cli", "--cluster", "add-node"} if *cr.Spec.ClusterVersion == "v7" { cmd = append(cmd, getRedisHostname(followerPod, cr, "follower")+":6379") @@ -146,7 +145,7 @@ func createRedisReplicationCommand(cr *redisv1beta2.RedisCluster, leaderPod Redi cmd = append(cmd, "--cluster-slave") if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } @@ -159,14 +158,13 @@ func createRedisReplicationCommand(cr *redisv1beta2.RedisCluster, leaderPod Redi } // ExecuteRedisReplicationCommand will execute the replication command -func ExecuteRedisReplicationCommand(ctx context.Context, cr *redisv1beta2.RedisCluster) { +func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) { var podIP string - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) followerCounts := cr.Spec.GetReplicaCounts("follower") leaderCounts := cr.Spec.GetReplicaCounts("leader") followerPerLeader := followerCounts / leaderCounts - nodes := checkRedisCluster(ctx, cr) + nodes := checkRedisCluster(ctx, client, logger, cr) for followerIdx := 0; followerIdx <= int(followerCounts)-1; { for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ { followerPod := RedisDetails{ @@ -180,8 +178,8 @@ func ExecuteRedisReplicationCommand(ctx context.Context, cr *redisv1beta2.RedisC podIP = getRedisServerIP(followerPod) if !checkRedisNodePresence(cr, nodes, podIP) { logger.V(1).Info("Adding node to cluster.", "Node.IP", podIP, "Follower.Pod", followerPod) - cmd := createRedisReplicationCommand(cr, leaderPod, followerPod) - redisClient := configureRedisClient(cr, followerPod.PodName) + cmd := createRedisReplicationCommand(client, logger, cr, leaderPod, followerPod) + redisClient := configureRedisClient(client, logger, cr, followerPod.PodName) pong, err := redisClient.Ping(ctx).Result() redisClient.Close() if err != nil { @@ -203,13 +201,11 @@ func ExecuteRedisReplicationCommand(ctx context.Context, cr *redisv1beta2.RedisC } // checkRedisCluster will check the redis cluster have sufficient nodes or not -func checkRedisCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) [][]string { - var client *redis.Client - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - client = configureRedisClient(cr, cr.ObjectMeta.Name+"-leader-0") - defer client.Close() +func checkRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) [][]string { + redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0") + defer redisClient.Close() cmd := redis.NewStringCmd(ctx, "cluster", "nodes") - err := client.Process(ctx, cmd) + err := redisClient.Process(ctx, cmd) if err != nil { logger.Error(err, "Redis command failed with this error") } @@ -231,14 +227,13 @@ func checkRedisCluster(ctx context.Context, cr *redisv1beta2.RedisCluster) [][]s } // ExecuteFailoverOperation will execute redis failover operations -func ExecuteFailoverOperation(ctx context.Context, cr *redisv1beta2.RedisCluster) error { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - err := executeFailoverCommand(ctx, cr, "leader") +func ExecuteFailoverOperation(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { + err := executeFailoverCommand(ctx, client, logger, cr, "leader") if err != nil { logger.Error(err, "Redis command failed for leader nodes") return err } - err = executeFailoverCommand(ctx, cr, "follower") + err = executeFailoverCommand(ctx, client, logger, cr, "follower") if err != nil { logger.Error(err, "Redis command failed for follower nodes") return err @@ -247,13 +242,12 @@ func ExecuteFailoverOperation(ctx context.Context, cr *redisv1beta2.RedisCluster } // executeFailoverCommand will execute failover command -func executeFailoverCommand(ctx context.Context, cr *redisv1beta2.RedisCluster, role string) error { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, role string) error { replicas := cr.Spec.GetReplicaCounts(role) podName := fmt.Sprintf("%s-%s-", cr.ObjectMeta.Name, role) for podCount := 0; podCount <= int(replicas)-1; podCount++ { logger.V(1).Info("Executing redis failover operations", "Redis Node", podName+strconv.Itoa(podCount)) - client := configureRedisClient(cr, podName+strconv.Itoa(podCount)) + client := configureRedisClient(client, logger, cr, podName+strconv.Itoa(podCount)) defer client.Close() cmd := redis.NewStringCmd(ctx, "cluster", "reset") err := client.Process(ctx, cmd) @@ -282,10 +276,9 @@ func executeFailoverCommand(ctx context.Context, cr *redisv1beta2.RedisCluster, } // CheckRedisNodeCount will check the count of redis nodes -func CheckRedisNodeCount(ctx context.Context, cr *redisv1beta2.RedisCluster, nodeType string) int32 { +func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 { var redisNodeType string - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - clusterNodes := checkRedisCluster(ctx, cr) + clusterNodes := checkRedisCluster(ctx, client, logger, cr) count := len(clusterNodes) switch nodeType { @@ -311,11 +304,9 @@ func CheckRedisNodeCount(ctx context.Context, cr *redisv1beta2.RedisCluster, nod } // CheckRedisClusterState will check the redis cluster state -func CheckRedisClusterState(ctx context.Context, cr *redisv1beta2.RedisCluster) int { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - clusterNodes := checkRedisCluster(ctx, cr) +func CheckRedisClusterState(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) int { + clusterNodes := checkRedisCluster(ctx, client, logger, cr) count := 0 - for _, node := range clusterNodes { if strings.Contains(node[2], "fail") || strings.Contains(node[7], "disconnected") { count++ @@ -326,34 +317,33 @@ func CheckRedisClusterState(ctx context.Context, cr *redisv1beta2.RedisCluster) } // configureRedisClient will configure the Redis Client -func configureRedisClient(cr *redisv1beta2.RedisCluster, podName string) *redis.Client { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, } - var client *redis.Client + var redisClient *redis.Client if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } - client = redis.NewClient(&redis.Options{ + redisClient = redis.NewClient(&redis.Options{ Addr: getRedisServerIP(redisInfo) + ":6379", Password: pass, DB: 0, - TLSConfig: getRedisTLSConfig(cr, redisInfo), + TLSConfig: getRedisTLSConfig(client, logger, cr, redisInfo), }) } else { - client = redis.NewClient(&redis.Options{ + redisClient = redis.NewClient(&redis.Options{ Addr: getRedisServerIP(redisInfo) + ":6379", Password: "", DB: 0, - TLSConfig: getRedisTLSConfig(cr, redisInfo), + TLSConfig: getRedisTLSConfig(client, logger, cr, redisInfo), }) } - return client + return redisClient } // executeCommand will execute the commands in pod @@ -448,39 +438,37 @@ func generateRedisManagerLogger(namespace, name string) logr.Logger { } // configureRedisClient will configure the Redis Client -func configureRedisReplicationClient(cr *redisv1beta2.RedisReplication, podName string) *redis.Client { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func configureRedisReplicationClient(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) *redis.Client { redisInfo := RedisDetails{ PodName: podName, Namespace: cr.Namespace, } - var client *redis.Client + var redisClient *redis.Client if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil { - pass, err := getRedisPassword(cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) + pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key) if err != nil { logger.Error(err, "Error in getting redis password") } - client = redis.NewClient(&redis.Options{ + redisClient = redis.NewClient(&redis.Options{ Addr: getRedisServerIP(redisInfo) + ":6379", Password: pass, DB: 0, - TLSConfig: getRedisReplicationTLSConfig(cr, redisInfo), + TLSConfig: getRedisReplicationTLSConfig(client, logger, cr, redisInfo), }) } else { - client = redis.NewClient(&redis.Options{ + redisClient = redis.NewClient(&redis.Options{ Addr: getRedisServerIP(redisInfo) + ":6379", Password: "", DB: 0, - TLSConfig: getRedisReplicationTLSConfig(cr, redisInfo), + TLSConfig: getRedisReplicationTLSConfig(client, logger, cr, redisInfo), }) } - return client + return redisClient } // Get Redis nodes by it's role i.e. master, slave and sentinel -func GetRedisNodesByRole(ctx context.Context, cr *redisv1beta2.RedisReplication, redisRole string) []string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) +func GetRedisNodesByRole(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisRole string) []string { statefulset, err := GetStatefulSet(cr.Namespace, cr.Name) if err != nil { logger.Error(err, "Failed to Get the Statefulset of the", "custom resource", cr.Name, "in namespace", cr.Namespace) @@ -492,7 +480,7 @@ func GetRedisNodesByRole(ctx context.Context, cr *redisv1beta2.RedisReplication, for i := 0; i < int(replicas); i++ { podName := statefulset.Name + "-" + strconv.Itoa(i) - podRole := checkRedisServerRole(ctx, cr, podName) + podRole := checkRedisServerRole(ctx, client, logger, cr, podName) if podRole == redisRole { pods = append(pods, podName) } @@ -502,10 +490,8 @@ func GetRedisNodesByRole(ctx context.Context, cr *redisv1beta2.RedisReplication, } // Check the Redis Server Role i.e. master, slave and sentinel -func checkRedisServerRole(ctx context.Context, cr *redisv1beta2.RedisReplication, podName string) string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - - redisClient := configureRedisReplicationClient(cr, podName) +func checkRedisServerRole(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, podName string) string { + redisClient := configureRedisReplicationClient(client, logger, cr, podName) defer redisClient.Close() info, err := redisClient.Info(ctx, "replication").Result() if err != nil { @@ -525,13 +511,10 @@ func checkRedisServerRole(ctx context.Context, cr *redisv1beta2.RedisReplication } // checkAttachedSlave would return redis pod name which has slave -func checkAttachedSlave(ctx context.Context, cr *redisv1beta2.RedisReplication, masterPods []string) string { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - +func checkAttachedSlave(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string) string { for _, podName := range masterPods { - connected_slaves := "" - redisClient := configureRedisReplicationClient(cr, podName) + redisClient := configureRedisReplicationClient(client, logger, cr, podName) defer redisClient.Close() info, err := redisClient.Info(ctx, "replication").Result() if err != nil { @@ -558,11 +541,9 @@ func checkAttachedSlave(ctx context.Context, cr *redisv1beta2.RedisReplication, } -func CreateMasterSlaveReplication(ctx context.Context, cr *redisv1beta2.RedisReplication, masterPods []string, slavePods []string) error { - logger := generateRedisManagerLogger(cr.Namespace, cr.ObjectMeta.Name) - +func CreateMasterSlaveReplication(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, masterPods []string, slavePods []string) error { var realMasterPod string - realMasterPod = checkAttachedSlave(ctx, cr, masterPods) + realMasterPod = checkAttachedSlave(ctx, client, logger, cr, masterPods) if len(slavePods) < 1 { realMasterPod = masterPods[0] @@ -580,7 +561,7 @@ func CreateMasterSlaveReplication(ctx context.Context, cr *redisv1beta2.RedisRep for i := 0; i < len(masterPods); i++ { if masterPods[i] != realMasterPod { - redisClient := configureRedisReplicationClient(cr, masterPods[i]) + redisClient := configureRedisReplicationClient(client, logger, cr, masterPods[i]) defer redisClient.Close() logger.V(1).Info("Setting the", "pod", masterPods[i], "to slave of", realMasterPod) err := redisClient.SlaveOf(ctx, realMasterPodIP, "6379").Err() diff --git a/k8sutils/secrets.go b/k8sutils/secrets.go index 4636b54b0..40f3853fe 100644 --- a/k8sutils/secrets.go +++ b/k8sutils/secrets.go @@ -4,24 +4,20 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "strings" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" logf "sigs.k8s.io/controller-runtime/pkg/log" ) var log = logf.Log.WithName("controller_redis") -// getRedisPassword method will return the redis password -func getRedisPassword(namespace, name, secretKey string) (string, error) { - logger := secretLogger(namespace, name) - client, err := GenerateK8sClient(GenerateK8sConfig) - if err != nil { - logger.Error(err, "Could not generate kubernetes client") - return "", err - } +// getRedisPassword method will return the redis password from the secret +func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace, name, secretKey string) (string, error) { secretName, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { logger.Error(err, "Failed in getting existing secret for redis") @@ -35,110 +31,89 @@ func getRedisPassword(namespace, name, secretKey string) (string, error) { return "", nil } -func secretLogger(namespace string, name string) logr.Logger { - reqLogger := log.WithValues("Request.Secret.Namespace", namespace, "Request.Secret.Name", name) - return reqLogger -} - -func getRedisTLSConfig(cr *redisv1beta2.RedisCluster, redisInfo RedisDetails) *tls.Config { - client, err := GenerateK8sClient(GenerateK8sConfig) - if err != nil { - return nil - } +func getRedisTLSConfig(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, redisInfo RedisDetails) *tls.Config { if cr.Spec.TLS != nil { - reqLogger := log.WithValues("Request.Namespace", cr.Namespace, "Request.Name", cr.ObjectMeta.Name) - secretName, err := client.CoreV1().Secrets(cr.Namespace).Get(context.TODO(), cr.Spec.TLS.Secret.SecretName, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(cr.Namespace).Get(context.TODO(), cr.Spec.TLS.Secret.SecretName, metav1.GetOptions{}) if err != nil { - reqLogger.Error(err, "Failed in getting TLS secret for redis") + logger.Error(err, "Failed in getting TLS secret for redis cluster") + logger.V(1).Error(err, "Failed in getting TLS secret for redis cluster", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisClusterName", cr.Name) + return nil } - var ( - tlsClientCert []byte - tlsClientKey []byte - tlsCaCertificate []byte - tlsCaCertificates *x509.CertPool - tlsClientCertificates []tls.Certificate - ) - for key, value := range secretName.Data { - if key == cr.Spec.TLS.CaKeyFile || key == "ca.crt" { - tlsCaCertificate = value - } else if key == cr.Spec.TLS.CertKeyFile || key == "tls.crt" { - tlsClientCert = value - } else if key == cr.Spec.TLS.KeyFile || key == "tls.key" { - tlsClientKey = value - } + tlsClientCert, certExists := secret.Data["tls.crt"] + tlsClientKey, keyExists := secret.Data["tls.key"] + tlsCaCertificate, caExists := secret.Data["ca.crt"] + + if !certExists || !keyExists || !caExists { + logger.Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") + return nil } cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) if err != nil { - reqLogger.Error(err, "Couldn't load TLS client key pair") + logger.Error(err, "Couldn't load TLS client key pair") + logger.V(1).Error(err, "Couldn't load TLS client key pair", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisClusterName", cr.Name) + return nil } - tlsClientCertificates = append(tlsClientCertificates, cert) - tlsCaCertificates = x509.NewCertPool() + tlsCaCertificates := x509.NewCertPool() ok := tlsCaCertificates.AppendCertsFromPEM(tlsCaCertificate) if !ok { - reqLogger.V(1).Info("Failed to load CA Certificates from Secret") + logger.Error(errors.New("failed to load CA Certificates from secret"), "Invalid CA Certificates") + logger.V(1).Error(err, "Invalid CA Certificates", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisClusterName", cr.Name) + return nil } return &tls.Config{ - Certificates: tlsClientCertificates, + Certificates: []tls.Certificate{cert}, ServerName: redisInfo.PodName, RootCAs: tlsCaCertificates, - MinVersion: 2, - ClientAuth: 0, + MinVersion: tls.VersionTLS12, + ClientAuth: tls.NoClientCert, } } return nil } -func getRedisReplicationTLSConfig(cr *redisv1beta2.RedisReplication, redisInfo RedisDetails) *tls.Config { - client, err := GenerateK8sClient(GenerateK8sConfig) - if err != nil { - return nil - } +func getRedisReplicationTLSConfig(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication, redisInfo RedisDetails) *tls.Config { if cr.Spec.TLS != nil { - reqLogger := log.WithValues("Request.Namespace", cr.Namespace, "Request.Name", cr.ObjectMeta.Name) - secretName, err := client.CoreV1().Secrets(cr.Namespace).Get(context.TODO(), cr.Spec.TLS.Secret.SecretName, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(cr.Namespace).Get(context.TODO(), cr.Spec.TLS.Secret.SecretName, metav1.GetOptions{}) if err != nil { - reqLogger.Error(err, "Failed in getting TLS secret for redis") + logger.Error(err, "Failed in getting TLS secret for redis replication") + logger.V(1).Error(err, "Failed in getting TLS secret for redis replication", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisReplicationName", cr.Name) + return nil } - var ( - tlsClientCert []byte - tlsClientKey []byte - tlsCaCertificate []byte - tlsCaCertificates *x509.CertPool - tlsClientCertificates []tls.Certificate - ) - for key, value := range secretName.Data { - if key == cr.Spec.TLS.CaKeyFile || key == "ca.crt" { - tlsCaCertificate = value - } else if key == cr.Spec.TLS.CertKeyFile || key == "tls.crt" { - tlsClientCert = value - } else if key == cr.Spec.TLS.KeyFile || key == "tls.key" { - tlsClientKey = value - } + tlsClientCert, certExists := secret.Data["tls.crt"] + tlsClientKey, keyExists := secret.Data["tls.key"] + tlsCaCertificate, caExists := secret.Data["ca.crt"] + + if !certExists || !keyExists || !caExists { + logger.Error(errors.New("required TLS keys are missing in the secret"), "Missing TLS keys in the secret") + return nil } cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) if err != nil { - reqLogger.Error(err, "Couldn't load TLS client key pair") + logger.Error(err, "Couldn't load TLS client key pair") + logger.V(1).Error(err, "Couldn't load TLS client key pair", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisReplicationName", cr.Name) + return nil } - tlsClientCertificates = append(tlsClientCertificates, cert) - tlsCaCertificates = x509.NewCertPool() + tlsCaCertificates := x509.NewCertPool() ok := tlsCaCertificates.AppendCertsFromPEM(tlsCaCertificate) if !ok { - reqLogger.V(1).Info("Failed to load CA Certificates from Secret") + logger.Error(errors.New("failed to load CA Certificates from secret"), "Invalid CA Certificates") + logger.V(1).Error(err, "Invalid CA Certificates", "secretName", cr.Spec.TLS.Secret.SecretName, "namespace", cr.Namespace, "redisReplicationName", cr.Name) + return nil } return &tls.Config{ - Certificates: tlsClientCertificates, + Certificates: []tls.Certificate{cert}, ServerName: redisInfo.PodName, RootCAs: tlsCaCertificates, - MinVersion: 2, - ClientAuth: 0, + MinVersion: tls.VersionTLS12, + ClientAuth: tls.NoClientCert, } } return nil diff --git a/k8sutils/secrets_test.go b/k8sutils/secrets_test.go new file mode 100644 index 000000000..e8ae83fe3 --- /dev/null +++ b/k8sutils/secrets_test.go @@ -0,0 +1,382 @@ +package k8sutils + +import ( + "os" + "path/filepath" + "testing" + + common "github.com/OT-CONTAINER-KIT/redis-operator/api" + redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sClientFake "k8s.io/client-go/kubernetes/fake" +) + +func Test_getRedisPassword(t *testing.T) { + tests := []struct { + name string + setup func() *k8sClientFake.Clientset + namespace string + secretName string + secretKey string + expected string + expectedErr bool + }{ + { + name: "successful retrieval", + setup: func() *k8sClientFake.Clientset { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "password": []byte("secret-password"), + }, + } + client := k8sClientFake.NewSimpleClientset(secret.DeepCopyObject()) + return client + }, + namespace: "default", + secretName: "redis-secret", + secretKey: "password", + expected: "secret-password", + expectedErr: false, + }, + { + name: "secret not found", + setup: func() *k8sClientFake.Clientset { + client := k8sClientFake.NewSimpleClientset() + return client + }, + namespace: "default", + secretName: "non-existent", + secretKey: "password", + expected: "", + expectedErr: true, + }, + { + name: "secret exists but key is missing", + setup: func() *k8sClientFake.Clientset { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "anotherKey": []byte("some-value"), + }, + } + client := k8sClientFake.NewSimpleClientset(secret.DeepCopyObject()) + return client + }, + namespace: "default", + secretName: "redis-secret", + secretKey: "missingKey", + expected: "", + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := tt.setup() + logger := testr.New(t) + got, err := getRedisPassword(client, logger, tt.namespace, tt.secretName, tt.secretKey) + + if tt.expectedErr { + require.Error(t, err, "Expected an error but didn't get one") + } else { + require.NoError(t, err, "Expected no error but got one") + assert.Equal(t, tt.expected, got, "Expected and actual values do not match") + } + }) + } +} + +func Test_getRedisTLSConfig(t *testing.T) { + tests := []struct { + name string + setup func() *k8sClientFake.Clientset + redisCluster *redisv1beta2.RedisCluster + redisInfo RedisDetails + expectTLS bool + }{ + { + name: "TLS enabled and successful configuration", + setup: func() *k8sClientFake.Clientset { + tlsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-tls-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "ca.crt")), + "tls.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "tls.crt")), + "tls.key": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "tls.key")), + }, + } + client := k8sClientFake.NewSimpleClientset(tlsSecret) + return client + }, + redisCluster: &redisv1beta2.RedisCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisClusterSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: true, + }, + { + name: "TLS enabled but secret not found", + setup: func() *k8sClientFake.Clientset { + client := k8sClientFake.NewSimpleClientset() + return client + }, + redisCluster: &redisv1beta2.RedisCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisClusterSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: false, + }, + { + name: "TLS enabled but incomplete secret", + setup: func() *k8sClientFake.Clientset { + tlsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-tls-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "ca.crt")), + // Missing tls.crt and tls.key + }, + } + client := k8sClientFake.NewSimpleClientset(tlsSecret) + return client + }, + redisCluster: &redisv1beta2.RedisCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisClusterSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := tt.setup() + logger := testr.New(t) + tlsConfig := getRedisTLSConfig(client, logger, tt.redisCluster, tt.redisInfo) + + if tt.expectTLS { + require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") + require.NotEmpty(t, tlsConfig.Certificates, "TLS Certificates should not be empty") + require.NotNil(t, tlsConfig.RootCAs, "Root CAs should not be nil") + } else { + assert.Nil(t, tlsConfig, "Expected no TLS configuration but got one") + } + }) + } +} + +func Test_getRedisReplicationTLSConfig(t *testing.T) { + tests := []struct { + name string + setup func() *k8sClientFake.Clientset + redisReplication *redisv1beta2.RedisReplication + redisInfo RedisDetails + expectTLS bool + }{ + { + name: "TLS enabled and successful configuration", + setup: func() *k8sClientFake.Clientset { + tlsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-tls-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "ca.crt")), + "tls.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "tls.crt")), + "tls.key": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "tls.key")), + }, + } + client := k8sClientFake.NewSimpleClientset(tlsSecret) + return client + }, + redisReplication: &redisv1beta2.RedisReplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisReplicationSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: true, + }, + { + name: "TLS enabled but secret not found", + setup: func() *k8sClientFake.Clientset { + client := k8sClientFake.NewSimpleClientset() + return client + }, + redisReplication: &redisv1beta2.RedisReplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisReplicationSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: false, + }, + { + name: "TLS enabled but incomplete secret", + setup: func() *k8sClientFake.Clientset { + tlsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-tls-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": helperReadFile(filepath.Join("..", "tests", "testdata", "secrets", "ca.crt")), + // Missing tls.crt and tls.key + }, + } + client := k8sClientFake.NewSimpleClientset(tlsSecret) + return client + }, + redisReplication: &redisv1beta2.RedisReplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "redis-cluster", + Namespace: "default", + }, + Spec: redisv1beta2.RedisReplicationSpec{ + TLS: &redisv1beta2.TLSConfig{ + TLSConfig: common.TLSConfig{ + CaKeyFile: "ca.crt", + CertKeyFile: "tls.crt", + KeyFile: "tls.key", + Secret: corev1.SecretVolumeSource{ + SecretName: "redis-tls-secret", + }, + }, + }, + }, + }, + redisInfo: RedisDetails{ + PodName: "redis-pod", + Namespace: "default", + }, + expectTLS: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := tt.setup() + logger := testr.New(t) + tlsConfig := getRedisReplicationTLSConfig(client, logger, tt.redisReplication, tt.redisInfo) + + if tt.expectTLS { + require.NotNil(t, tlsConfig, "Expected TLS configuration but got nil") + require.NotEmpty(t, tlsConfig.Certificates, "TLS Certificates should not be empty") + require.NotNil(t, tlsConfig.RootCAs, "Root CAs should not be nil") + } else { + assert.Nil(t, tlsConfig, "Expected no TLS configuration but got one") + } + }) + } +} + +func helperReadFile(filename string) []byte { + data, err := os.ReadFile(filename) + if err != nil { + panic(err) + } + return data +} diff --git a/k8sutils/status.go b/k8sutils/status.go index 3cc32353d..d652ad7c7 100644 --- a/k8sutils/status.go +++ b/k8sutils/status.go @@ -27,6 +27,10 @@ func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.Redis cr.Status.ReadyFollowerReplicas = readyFollowerReplicas client, err := GenerateK8sDynamicClient(GenerateK8sConfig) + if err != nil { + logger.Error(err, "Failed to generate k8s dynamic client") + return err + } gvr := schema.GroupVersionResource{ Group: "redis.redis.opstreelabs.in", Version: "v1beta2", diff --git a/tests/testdata/secrets/ca.crt b/tests/testdata/secrets/ca.crt new file mode 100644 index 000000000..483acebbe --- /dev/null +++ b/tests/testdata/secrets/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/zCCAeegAwIBAgIULZgkEyKSTUyz9TPluGHMKp3kXMkwDQYJKoZIhvcNAQEL +BQAwDzENMAsGA1UEAwwETXlDQTAeFw0yMzExMTAyMjQzNDdaFw0yNDExMDkyMjQz +NDdaMA8xDTALBgNVBAMMBE15Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCdvMDZNyA9XM7TLcWzV3GfT8obuQgrCPyAEREEngprCMe1JjtilYCAthZD +062FG6KtQYf5Ph+TCpxrm1x8cdbHBOMoogjJ3tOrKgg/sDqDIBJY5qqeoti1Sps5 +UGLlw8fmgAQIJ3Jie9dlgYtk7HkEFSfuHqHhozsInHkrHDhFAyYNKRf/UwNZvkcB +2st8SJJaC56Pxn5SP97kfBUc0K9dwsFIo5l8y/WWJ31M3CLNuIzBP6JgGdIHq8Fe +pvW3YtWOBRptjse5soZrXW3I6k1SqkkWMM6dvSJobmN5uarn2fFfQI+kYpX1Hx67 +I9sUYeDUVDyeVDyE9y5U7q3kLacNAgMBAAGjUzBRMB0GA1UdDgQWBBROkKWbx7zB +6KAGXCmh0SGNsgZeaTAfBgNVHSMEGDAWgBROkKWbx7zB6KAGXCmh0SGNsgZeaTAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBMvROzFeMPm52B8A9f +pinakVzXbLlQnhmYvevnIZ6jXKCxycp0Ompu/TqqyfpKJ0gJChPCVF2nWHkI6suR +FYI9m3ZXKUqafYlienFbJXmJxWt9V+jt4kBbIja+/ETdl8Vmbc7MIQ7eZS6XMJa9 +tkEB4yCOj72p3DmC/bJSa4zp0ng/AoATA7GPmm+l1hEMDLd+mtosMtev0DWDyF0j +acrlBvRv4Eq6kFuBBzcPqKC2DuFWIU/ZCBkwn2tScPEf0UjZUXl74k9Taz37Av9Z +KQByaF94pYf/7kPRIn2Zw0xXRTJ0wD9bi4Q6YfnN/ntrcs1CNKWb4MMbU2gnz0RN +zCjC +-----END CERTIFICATE----- diff --git a/tests/testdata/secrets/tls.crt b/tests/testdata/secrets/tls.crt new file mode 100644 index 000000000..b60b071aa --- /dev/null +++ b/tests/testdata/secrets/tls.crt @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICqTCCAZECFCMWUrMjKUyph+9nVeNaq9ulIuSFMA0GCSqGSIb3DQEBCwUAMA8x +DTALBgNVBAMMBE15Q0EwHhcNMjMxMTEwMjI0NDEyWhcNMjQxMTA5MjI0NDEyWjAT +MREwDwYDVQQDDAhNeVNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBANsG8xaUF22cRbBZaMCjcuC3D8HHFPmmJI6xCF/MVg9+0afVG9uGpjpcOa8Q +wPD9MRHx8+abAUpR2JwWeYzCwLem+oY+gNPpSzshNUimlonrFnhq3d9OKpLHPC56 +5z1SUctBKlEqHvsV+Y+0xto2tXNDfdrjlKm+DaydFfe9G9BUaZGtjqljD2Yq95Qn +7Qf8bL2Sc5TLSztg/BH+zJmSsC8t4O4qs3iolaZ4kpCrDu4vy2X2V9ybqFPNCFyp +wZB1jLq2RzCcdxFUfYm0a83+LDvqgezXRwB29ZGsbTtb/L/HFkwIt+tqNeVWw+hI +HzRZ7lqI5W+8CpiM2xtJ8qpgs20CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEARjOw +dKDN9fIC8pabrZaXUPUFHwaB3jKrv5B/k4IkI+7VMz7t1mQ7fP+Ryuyinws3VpLh +DEeMyZ+a4gzcMvuxz9RL/p6B9gIVpebknfmTNZPqvSCg2kY9HCTUpsh5AawaGInN +iZ2Iw/MKLJEVxOxNRPzeKE6bHqvO6dijUGrB++XugrGVwMQjMZsJuu6bN7CYRgsA +OoRKohDkhaKsW/RkGIYDIxqm+zsPv69PTALlUXJtgnlkvNc6F5Fd5pwuW8TJ/wLe +eyQM56RaW2OInIOO4ehJkgRnLdukkECvYEwlZD3RgGFvUIGZFZXDS7FvpzZaUmdz +XtNWJjYUsc8d8iljTQ== +-----END CERTIFICATE----- diff --git a/tests/testdata/secrets/tls.key b/tests/testdata/secrets/tls.key new file mode 100644 index 000000000..485f0189c --- /dev/null +++ b/tests/testdata/secrets/tls.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDbBvMWlBdtnEWw +WWjAo3Lgtw/BxxT5piSOsQhfzFYPftGn1RvbhqY6XDmvEMDw/TER8fPmmwFKUdic +FnmMwsC3pvqGPoDT6Us7ITVIppaJ6xZ4at3fTiqSxzwueuc9UlHLQSpRKh77FfmP +tMbaNrVzQ33a45Spvg2snRX3vRvQVGmRrY6pYw9mKveUJ+0H/Gy9knOUy0s7YPwR +/syZkrAvLeDuKrN4qJWmeJKQqw7uL8tl9lfcm6hTzQhcqcGQdYy6tkcwnHcRVH2J +tGvN/iw76oHs10cAdvWRrG07W/y/xxZMCLfrajXlVsPoSB80We5aiOVvvAqYjNsb +SfKqYLNtAgMBAAECggEAM82ifBIdsm6WKv4SoRFnj8+sWeQoyV0q41bqyKGvLy4L +D1t/ob3ongAHIqlfQQBZdUmZKs85kGboSQ6lxA2iAC3trgeld7mDciJKFHtWOpuQ +Ln30KSc3OY0G5mVqQN4x+1VX2WeReUh6xKr4p07uPqtVXoqaNEV/vXZP5k7jf7Ov +UHYKkyOvv4Zo+W+0JUrgTi9LO64OMPHkArHmZbaYvLensyoGH64yXWr9x16mQhaV +gBuXNzYHJchdVN51FyaF9xmNzFFD9j6kKjm5FiTuUbuw5NMwgNKuj9eRy5Cqf+ao +JkjEUZHq4aXmj/1VwAJ9c9ra0eaxUiVgbN8dZGycXwKBgQDzZe416Jh+pYGaTCyl +aac6uyNiMwTHDvjj5r/faa33leGYmn9rZrcQyNdh7LNkU4NTJnpl5ZlLGtny4wMd +MiKIXT2RwjL/WdRbuGjoYF9MJpWVB9Hu3c+UkeL7F5qppZ3sVk3oPG4dW3gA10pL +we7m+MQfVbJwLL0HxV0isHLxowKBgQDmXf+f8udN72xBtf9MybXs8tMx70KCX43X +b0RK5Qt76DYWQSjen3XVPKvIEOfPMMlcvxDNm+v0byMTif0E8Q4jCFVTY3pEhK4p +nXiL2FgXD6AcS0rWJBI0JHUl4otCfqF+5ouGgXBoYdLNHBM+I+hALn2r2qzbMJEu +QhbS38a3rwKBgG6+sysuIKyHK7gD7tB4iRFs8oWMxyC3TEGNzUGe+PvM42+m5FD+ +1E67w1wX8eu1H+ymdkyEskH8/qvH5LPVCudW3VvDq6aJvdjZyEnrB5FDgQ3lF/0C +SZ/E3Sz4KXQFGhzdi+ceD9AlvS1Mx84+eC/5gmrreBwYDw5JG75b4IDXAoGBAIqT +GtUdqhRQpN92WmfXos8xVuff1DNWxZ1FemBPHbRggECs26fnZltqTq9ftAIHh+l5 +qeL1G2ADhqcXR5O0adubBLDP01nqMlYoOr5s2rislpTOmers8eJZ1/p9J2ZNhFow +1teHf5Xa2pK1g8HKmcgZ71D0jyyfL4YTDWW2ZPRTAoGBAOZeKTvyjpYHDtV4fRbZ +WTwyMda9qjnQvDovPcm5ci1SEjJUCusJKNwoZiU9/xuU1P9t234qmNsmQV5Tpplg +aH139Yw8kz7u9z1o2ajO6rG7rQ5qUgYXHgEhIoRnkCfx4uekeaO5lgdwIDR0zC1I +BfJBeduDgVHEyYxEU78tprfc +-----END PRIVATE KEY-----