https://sysdig.com/?s=What%E2%80%99s+new+in+Kubernetes++
https://kubernetes.io/docs/reference/kubectl/cheatsheet/
kubectl top no
kubectl top po
kubectl get pod <my-pod-name> -ojson | jq '.status.containerStatuses[] | { "image": .image, "imageID": .imageID }'
kubectl logs <my-pod> --previous
kubectl get po -v=6
kubectl get secret <my-secret> -n<my-namespace> -ojsonpath='{.data.token}' | base64 -d
kubectl get secret <my-secret> -ojsonpath='{.data.jmxremote\\.password}' | base64 -d
kubectl get secrets <my-secret> -ojson -n<my-src-namespace> | jq '.metadata.namespace = "<my-dest-namespace>"' | kubectl create -f -
kubectl get deploy,sts,cm,secret,pvc,svc -oname -lrelease=<my-helm-release> | while read name; do kubectl delete $name; done
kubectl wait po <my-po> --for=condition=Ready
watch kubectl get po -lrelease=<my-helm-release>
DEPLOYMENT_STARTDATE=`jq -n 'now'`
kubectl get po -lrelease=<my-helm-release> -ojson | jq -r --arg deployment_startdate $DEPLOYMENT_STARTDATE '.items[] | select(.metadata.creationTimestamp | fromdate | tostring > $deployment_startdate) | .metadata.name'
kubectl get po -ojson | jq -r '.items[] | select(.spec.containers[].env[]?.valueFrom.secretKeyRef.key=="<MY_VAR_ENV_NAME>") | .metadata.name'
kubectl get deploy -ojson | jq -r '.items[] | select(.spec.template.spec.containers[].env[]?.valueFrom.secretKeyRef.key=="<MY_VAR_ENV_NAME>") | .metadata.name'
kubectl set env deployment/registry STORAGE_DIR=/local
kubectl rollout restart deploy <my-deploy>
kubectl auth can-i exec pod
kubectl get cj -oname | while read name; do kubectl $name -p '{"spec":{"suspend":true}}'; done
kubectl get job <my-job> -o json | jq 'del(.spec.selector)' | jq 'del(.spec.template.metadata.labels)' | kubectl replace --force -f -
NS=mynamespace
kubectl -n $NS patch pvc $(kubectl -n $NS get pvc --no-headers | grep Terminating | awk '{print $1}') -p '{"metadata":{"finalizers":null}}'
kubectl get deploy -lrelease=si-labo -ojson | jq .items[].spec.template.spec.containers[0].image
kubectl get deploy -ojson | jq -r '.. | .image? // empty' | sort -u
kubectl get po -owide -A | grep -v 'Running\|Completed'
kubectl get po --field-selector=status.phase=Failed -A -owide
kubectl get po -A -ojson | jq -r '.items[] | select(.status.reason=="Evicted") | .metadata.namespace + " " + .spec.nodeName + " " + (.spec.priority|tostring)+ " " + .metadata.name + " : " + .status.message' | sort -k2,2 -k3nr
kubectl get po -ojson | jq '.items[] | select(.spec.affinity.podAntiAffinity!=null) | .metadata.name'
kubectl get po -ojson | jq '.items[] | select(.status.qosClass=="Guaranteed") | .metadata.name'
kubectl get pc -ojson | jq -r '.items[] | .metadata.name + " : " + (.value|tostring)' | sort -k3nr
kubectl get po -ojson | jq -r '.items[] | .metadata.namespace + " : " + .spec.nodeName + " : " + .metadata.name + " : " + .spec.priorityClassName+ " : " + (.spec.priority|tostring)' | sort -k9nr -k5
kubectl get po --sort-by='.status.containerStatuses[0].restartCount'
kubectl get po --sort-by=.status.startTime
kubectl get po -A -ojson | jq -r '.items[] | select(.status.containerStatuses[0].lastState.terminated.reason=="OOMKilled") | .metadata.namespace + " " + (.status.containerStatuses[0].restartCount|tostring) + " " + .metadata.name' | sort -k1,1r -k2nr
kubectl get po -ojson -A | jq '.items[] | select(.spec.containers[].securityContext.privileged==true) | .metadata.namespace + " : " + .metadata.name'
kubectl get po -ojson -A | jq '.items[] | select(.spec.hostIPC==true) | .metadata.namespace + " : " + .metadata.name'
kubectl get po -ojson -A | jq '.items[] | select(.spec.hostNetwork==true) | .metadata.namespace + " : " + .metadata.name'
kubectl get po -ojson -A | jq '.items[] | select(.spec.hostPID==true) | .metadata.namespace + " : " + .metadata.name'
KUBE_API_URL=<KUBE API URL>
for ep in version healthz livez readyz; do curl -k $KUBE_API_URL/$ep?verbose; done
kubectl get clusterrolebindings -o json | jq '.items[] | select(.subjects? // [] | any(.kind == "User" and .name == "system:anonymous" or .kind == "Group" and .name == "system:unauthenticated"))'
kubectl delete pod <pod> --grace-period=0 --force
kubectl get po -owide | grep 'Terminating' | awk -F ' ' '{print $1}' | while read name; do kubectl delete po $name --grace-period=0 --force; done
kubectl get jobs -ojson | jq -r '.items[] | select(.metadata.annotations["helm.sh/hook"] and (.metadata.annotations["helm.sh/hook"]|contains("pre")) and .metadata.labels.release=="<my-helm-release>") | .metadata.name'
kubectl get po -ojson | jq -r '.items[] | select(.metadata.ownerReferences[].name == "<my-hook-name>") | .metadata.name'
kubectl get jobs -ojson | jq -r '.items[] | select(.metadata.annotations["helm.sh/hook"] and .status.succeeded==1) | .metadata.name' | while read name; do kubectl delete jobs $name ; done
kubectl get no -ojson | jq -r '.items[] | select(.spec.taints!=null and (.spec.taints[0].key|contains("pressure"))) | .metadata.name + " : " + .spec.taints[0].key'
kubectl get no --no-headers | awk '{print $1}' | xargs -I {} sh -c 'echo {}; kubectl describe node {} | grep Allocated -A 5 | grep -ve Event -ve Allocated -ve percent -ve -- ; echo'
for i in {01..12}; do echo dbk-k8s-worker-dev-${i}v; kubectl describe node dbk-k8s-worker-dev-${i}v|grep -A6 'Allocated resources:'; done
kubectl drain <node> --ignore-daemonsets --force --delete-local-data
ctr --namespace k8s.io i ls
ctr --namespace k8s.io i push -u <user>:<password> <image>
https://helm.sh/docs/intro/cheatsheet/
helm template . --output-dir=output-dir
helm status <release> --show-resources
helm get values <release> -a
helm get manifest <release>
flux get all -A
flux get all -n <namespace> --status-selector="ready=false"
flux tree ks <kustomization> -n<namespace>
flux reconcile ks <kustomization> -n<namespace> --with-source
flux reconcile hr <release> -n<namespace> --with-source
flux diff ks <kustomization> --path=<local path to kustomization> -n<namespace>
Search for drifts in reconciliation (helm revisions should increase periodically in case of an unwanted drift)
flux get hr -ndev --no-header | awk '{print $1}' | while read name; do helm history $name --max 1; done
Search for replicas specified in helm manifests (no defined replicas allow to scale without any detected drift)
flux get hr -ndev --no-header | awk '{print $1}' | while read name; do bash -c "echo $name && helm get manifest $name | grep replica"; done
https://console.cloud.google.com/gcr/images/google-containers/GLOBAL
https://github.com/ahmetb/kubectl-aliases
https://github.com/ashleyschuett/kubernetes-secret-decode
kubectl get secret my-secret -o yaml | ksd
https://github.com/junegunn/fzf
kubectl get po | fzf
https://github.com/wercker/stern
https://engineering.opsgenie.com/advanced-kubernetes-objects-53f5e9bc0c28
https://thenewstack.io/taking-kubernetes-api-spin/
https://medium.com/@cotton_ori/how-to-terminate-a-side-car-container-in-kubernetes-job-2468f435ca99
- https://efekahraman.github.io/2018/04/docker-awareness-in-java
- https://blog.csanchez.org/2017/05/31/running-a-jvm-in-a-container-without-getting-killed/
- https://blogs.oracle.com/java-platform-group/java-se-support-for-docker-cpu-and-memory-limits
- https://banzaicloud.com/blog/java-resource-limits/
https://applatix.com/case-docker-docker-kubernetes-part-2/
https://cloudowski.com/articles/three-qos-classes-in-kubernetes/
https://github.com/helm/charts/tree/master/stable/prometheus-operator https://github.com/coreos/prometheus-operator/tree/master/Documentation https://sysdig.com/blog/kubernetes-monitoring-prometheus-operator-part3/
https://github.com/cloudworkz/kube-eagle