Skip to content

Commit

Permalink
fix some of the logging issues i saw earlier and simplify things a bit
Browse files Browse the repository at this point in the history
  • Loading branch information
Caio Begotti committed Dec 16, 2019
1 parent 91e88e4 commit adfae0c
Show file tree
Hide file tree
Showing 3 changed files with 123 additions and 81 deletions.
2 changes: 1 addition & 1 deletion cmd/plugin/cli/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ $ kubectl pod-dive elasticsearch-curator-1576112400-97htk -n logging`,

/*
if namespace == nil || *namespace == "" {
log.Instructions("No namespace given with -n/--namespace, this implies cluster scope!")
log.Info("No namespace given with -n/--namespace, this implies cluster scope!")
}
*/

Expand Down
4 changes: 2 additions & 2 deletions pkg/logger/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ func NewLogger() *Logger {
return &Logger{}
}

func (l *Logger) Info(msg string, args ...interface{}) {
func (l *Logger) Notice(msg string, args ...interface{}) {
c := color.New(color.FgHiCyan)
c.Println(fmt.Sprintf(msg, args...))
}
Expand All @@ -23,7 +23,7 @@ func (l *Logger) Error(err error) {
c.Println(fmt.Sprintf("%#v", err))
}

func (l *Logger) Instructions(msg string, args ...interface{}) {
func (l *Logger) Info(msg string, args ...interface{}) {
white := color.New(color.FgHiWhite)
white.Println(fmt.Sprintf(msg, args...))
}
198 changes: 120 additions & 78 deletions pkg/plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ import (
func RunPlugin(configFlags *genericclioptions.ConfigFlags, outputChan chan string) error {
config, err := configFlags.ToRESTConfig()
if err != nil {
return errors.Wrap(err, "Failed to read kubeconfig")
return errors.Wrap(err, "Failed to read kubeconfig, exiting.")
}

clientset, err := kubernetes.NewForConfig(config)
Expand All @@ -56,34 +56,41 @@ func RunPlugin(configFlags *genericclioptions.ConfigFlags, outputChan chan strin

podName := <-outputChan
podFieldSelector := "metadata.name=" + podName
log.Instructions("Diving after pod %s:", podName)
log.Info("Diving after pod %s:", podName)

// BEGIN tree separator
log.Instructions("")
log.Info("")

// seek the whole cluster, in all namespaces, for the pod name
podFind, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{FieldSelector: podFieldSelector})
podFind, err := clientset.CoreV1().Pods("").List(
metav1.ListOptions{FieldSelector: podFieldSelector})
if err != nil || len(podFind.Items) == 0 {
return errors.Wrap(err, "Failed to list cluster pods, set a config context or verify the API server.")
return errors.Wrap(err,
"Failed to list cluster pods, set a config context or verify the API server.")
}

// we can save one API call here, making it much faster and smaller, hopefully
// podObject, err := clientset.CoreV1().Pods(podFind.Items[0].Namespace).Get(podFind.Items[0].Name, metav1.GetOptions{})
// podObject, err := clientset.CoreV1().Pods(podFind.Items[0].Namespace).Get(
// podFind.Items[0].Name, metav1.GetOptions{})
// if err != nil {
// return errors.Wrap(err, "Failed to get pod info")
// }
// return errors.Wrap(err, "Failed to get pod info")
// }
podObject := podFind.Items[0]

// basically to create the ascii tree of siblings below
nodeObject, err := clientset.CoreV1().Nodes().Get(podObject.Spec.NodeName, metav1.GetOptions{})
nodeObject, err := clientset.CoreV1().Nodes().Get(
podObject.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return errors.Wrap(err, "Failed to get nodes info")
return errors.Wrap(err,
"Failed to get nodes info, verify the connection to their pool.")
}

nodeFieldSelector := "spec.nodeName=" + nodeObject.Name
nodePods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{FieldSelector: nodeFieldSelector})
nodePods, err := clientset.CoreV1().Pods("").List(
metav1.ListOptions{FieldSelector: nodeFieldSelector})
if err != nil {
return errors.Wrap(err, "Failed to get sibling pods info")
return errors.Wrap(err,
"Failed to get sibling pods info, API server could not be reached.")
}

// this will be used to show whether the pod is running inside a master node or not
Expand Down Expand Up @@ -111,81 +118,116 @@ func RunPlugin(configFlags *genericclioptions.ConfigFlags, outputChan chan strin
// of each level... at least currently it's quite doable to strip them out with
// sed as they are always grouped by either [] or () so the actual tree is intact
if nodeLabels["kubernetes.io/role"] == "master" {
log.Instructions("[node] %s [%s, %s]", podObject.Spec.NodeName, nodeLabels["kubernetes.io/role"], nodeCondition)
log.Info("[node] %s [%s, %s]",
podObject.Spec.NodeName,
nodeLabels["kubernetes.io/role"],
nodeCondition)
} else {
log.Instructions("[node] %s [%s]", podObject.Spec.NodeName, nodeCondition)
log.Info("[node] %s [%s]",
podObject.Spec.NodeName,
nodeCondition)
}
// FIXME: if ReplicaSet, go over it all again
// FIXME: put everything outside getownerreferences()
// FIXME: log.Info("%s", strings.ToLower(podObject.Status.Phase))
for _, existingOwnerRef := range podObject.GetOwnerReferences() {
log.Instructions("[namespace] ├─┬─ %s", podObject.Namespace)
log.Instructions("[type] │ └─┬─ %s", strings.ToLower(existingOwnerRef.Kind))
log.Instructions("[workload] │ └─┬─ %s [N replicas]", existingOwnerRef.Name)
log.Instructions("[pod] │ └─┬─ %s [%s]", podObject.GetName(), podObject.Status.Phase)

for num, val := range podObject.Status.ContainerStatuses {
if num == 0 {
// print header if start of the tree
if num == len(podObject.Status.ContainerStatuses)-1 {
// terminate ascii tree if this is the last item
log.Info("[namespace] ├─┬─ %s", podObject.Namespace)

if podObject.GetOwnerReferences() == nil {
log.Info("[type] │ └─┬─ pod")
log.Info("[workload] │ └─┬─ [no replica set]]")
} else {
for _, existingOwnerRef := range podObject.GetOwnerReferences() {
if strings.ToLower(existingOwnerRef.Kind) == "replicaset" {
rsObject, err := clientset.AppsV1().ReplicaSets(
podObject.GetNamespace()).Get(
existingOwnerRef.Name,
metav1.GetOptions{})
if err != nil {
return errors.Wrap(err,
"Failed to retrieve replica sets data, AppsV1 API was not available.")
}

log.Info("[type] │ └─┬─ %s", strings.ToLower(existingOwnerRef.Kind))
if rsObject.Status.Replicas == 1 {
log.Info("[workload] │ └─┬─ %s [%d replica]",
existingOwnerRef.Name,
rsObject.Status.Replicas)
} else {
log.Info("[workload] │ └─┬─ %s [%d replicas]",
existingOwnerRef.Name,
rsObject.Status.Replicas)
}
} else {
log.Info("[type] │ └─┬─ %s", strings.ToLower(existingOwnerRef.Kind))
log.Info("[workload] │ └─┬─ %s [? replicas]", existingOwnerRef.Name)
}
}
}

// we have to convert v1.PodPhase to string first, before we lowercase it
log.Info("[pod] │ └─┬─ %s [%s]",
podObject.GetName(),
strings.ToLower(string(podObject.Status.Phase)))

for num, val := range podObject.Status.ContainerStatuses {
if num == 0 {
// print header if start of the tree
if num == len(podObject.Status.ContainerStatuses)-1 {
// terminate ascii tree if this is the last item
if val.RestartCount == 1 {
// with singular
log.Info("[containers] │ └─── %s [%d restart]", val.Name, val.RestartCount)
} else {
// with plural
log.Info("[containers] │ └─── %s [%d restarts]", val.Name, val.RestartCount)
}
} else {
// connect the ascii tree with next link
if val.RestartCount == 1 {
log.Info("[containers] │ ├─── %s [%d restart]", val.Name, val.RestartCount)
} else {
log.Info("[containers] │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
}
}
} else {
// clean tree space for N itens
if num == len(podObject.Status.ContainerStatuses)-1 {
if len(podObject.Spec.InitContainers) == 0 {
if val.RestartCount == 1 {
// with singular
log.Instructions("[containers] │ └─── %s [%d restart]", val.Name, val.RestartCount)
log.Info(" │ └─── %s [%d restart]", val.Name, val.RestartCount)
} else {
// with plural
log.Instructions("[containers] │ └─── %s [%d restarts]", val.Name, val.RestartCount)
log.Info(" │ └─── %s [%d restarts]", val.Name, val.RestartCount)
}
} else {
// connect the ascii tree with next link
if val.RestartCount == 1 {
log.Instructions("[containers] │ ├─── %s [%d restart]", val.Name, val.RestartCount)
log.Info(" │ ├─── %s [%d restart]", val.Name, val.RestartCount)
} else {
log.Instructions("[containers] │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
log.Info(" │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
}
}
} else {
// clean tree space for N itens
if num == len(podObject.Status.ContainerStatuses)-1 {
if len(podObject.Spec.InitContainers) == 0 {
if val.RestartCount == 1 {
log.Instructions(" │ └─── %s [%d restart]", val.Name, val.RestartCount)
} else {
log.Instructions(" │ └─── %s [%d restarts]", val.Name, val.RestartCount)
}
} else {
if val.RestartCount == 1 {
log.Instructions(" │ ├─── %s [%d restart]", val.Name, val.RestartCount)
} else {
log.Instructions(" │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
}
}
if val.RestartCount == 1 {
log.Info(" │ ├─── %s [%d restart]", val.Name, val.RestartCount)
} else {
if val.RestartCount == 1 {
log.Instructions(" │ ├─── %s [%d restart]", val.Name, val.RestartCount)
} else {
log.Instructions(" │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
}
log.Info(" │ ├─── %s [%d restarts]", val.Name, val.RestartCount)
}
}
}
}

// no need to manually link init containers as there will
// always be at leats one container inside the pod above
// so they can all be appended here in the ascii tree safely
for num, val := range podObject.Status.InitContainerStatuses {
if num == len(podObject.Status.InitContainerStatuses)-1 {
if val.RestartCount == 1 {
log.Instructions(" │ └─── %s [init, %d restart]", val.Name, val.RestartCount)
} else {
log.Instructions(" │ └─── %s [init, %d restarts]", val.Name, val.RestartCount)
}
// no need to manually link init containers as there will
// always be at leats one container inside the pod above
// so they can all be appended here in the ascii tree safely
for num, val := range podObject.Status.InitContainerStatuses {
if num == len(podObject.Status.InitContainerStatuses)-1 {
if val.RestartCount == 1 {
log.Info(" │ └─── %s [init, %d restart]", val.Name, val.RestartCount)
} else {
if val.RestartCount == 1 {
log.Instructions(" │ ├─── %s [init, %d restart]", val.Name, val.RestartCount)
} else {
log.Instructions(" │ ├─── %s [init, %d restarts]", val.Name, val.RestartCount)
}
log.Info(" │ └─── %s [init, %d restarts]", val.Name, val.RestartCount)
}
} else {
if val.RestartCount == 1 {
log.Info(" │ ├─── %s [init, %d restart]", val.Name, val.RestartCount)
} else {
log.Info(" │ ├─── %s [init, %d restarts]", val.Name, val.RestartCount)
}
}
}
Expand All @@ -206,27 +248,27 @@ func RunPlugin(configFlags *genericclioptions.ConfigFlags, outputChan chan strin
for num, val := range siblingsPods {
if num == 0 {
if num == len(siblingsPods)-1 {
log.Instructions("[siblings] └─── %s", val)
log.Info("[siblings] └─── %s", val)
} else {
log.Instructions("[siblings] ├─── %s", val)
log.Info("[siblings] ├─── %s", val)
}
} else {
if num == len(siblingsPods)-1 {
log.Instructions(" └─── %s", val)
log.Info(" └─── %s", val)
} else {
log.Instructions(" ├─── %s", val)
log.Info(" ├─── %s", val)
}
}
}

// END tree separator
log.Instructions("")
log.Info("")

// basic reasons for pods not being in a running state
for _, containerStatuses := range podObject.Status.ContainerStatuses {
if containerStatuses.LastTerminationState.Waiting != nil {
log.Instructions("Stuck:")
log.Instructions(" %s %s [code %s]",
log.Info("Stuck:")
log.Info(" %s %s [code %s]",
containerStatuses.Name,
strings.ToLower(containerStatuses.LastTerminationState.Waiting.Reason),
containerStatuses.LastTerminationState.Waiting.Message)
Expand All @@ -235,9 +277,9 @@ func RunPlugin(configFlags *genericclioptions.ConfigFlags, outputChan chan strin

if containerStatuses.LastTerminationState.Terminated != nil {
if containerStatuses.LastTerminationState.Terminated.Reason != "Completed" {
log.Instructions("Terminations:")
log.Info("Terminations:")

log.Instructions(" %s %s [code %d]",
log.Info(" %s %s [code %d]",
containerStatuses.Name,
strings.ToLower(containerStatuses.LastTerminationState.Terminated.Reason),
containerStatuses.LastTerminationState.Terminated.ExitCode)
Expand Down

0 comments on commit adfae0c

Please sign in to comment.