Kubernetes: mudanças entre as edições
Ir para navegação
Ir para pesquisar
(→PODs) |
|||
(21 revisões intermediárias pelo mesmo usuário não estão sendo mostradas) | |||
Linha 1: | Linha 1: | ||
=Authentication= | =Authentication= | ||
==Create Secret TLS== | ==Create Secret TLS== | ||
<pre> | <pre> | ||
kubectl create secret tls my-tls-cert --key /path/to/tls.key --cert /path/to/tls.crt | kubectl create secret tls my-tls-cert --key /path/to/tls.key --cert /path/to/tls.crt | ||
</pre> | </pre> | ||
==Query the master to validade the token== | ==Query the master to validade the token== | ||
Linha 129: | Linha 21: | ||
] | ] | ||
} | } | ||
</pre> | </pre> | ||
=Cloud Provider= | =Cloud Provider= | ||
Linha 166: | Linha 39: | ||
kubectl create job --from=cronjob/pgdump pgdump-manual-001 | kubectl create job --from=cronjob/pgdump pgdump-manual-001 | ||
</pre> | </pre> | ||
=Events= | |||
Get events sorted by creation timeStamp. | |||
<syntaxhighlight lang=bash> | |||
kubectl get events --sort-by='.lastTimestamp' | |||
</syntaxhighlight> | |||
=[[Helm|Helm]]= | =[[Helm|Helm]]= | ||
Linha 275: | Linha 154: | ||
*https://jvns.ca/blog/2017/08/05/how-kubernetes-certificates-work/ | *https://jvns.ca/blog/2017/08/05/how-kubernetes-certificates-work/ | ||
=Micro Clusters= | =Micro Clusters= | ||
==[https://kind.sigs.k8s.io/ Kind]== | |||
<syntaxhighlight lang=bash> | |||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64 | |||
chmod +x ./kind | |||
sudo mv ./kind /usr/local/bin/kind | |||
cat <<EOF | kind create cluster --config=- | |||
kind: Cluster | |||
apiVersion: kind.x-k8s.io/v1alpha4 | |||
nodes: | |||
- role: worker | |||
- role: control-plane | |||
kubeadmConfigPatches: | |||
- | | |||
kind: InitConfiguration | |||
nodeRegistration: | |||
kubeletExtraArgs: | |||
node-labels: "ingress-ready=true" | |||
extraPortMappings: | |||
- containerPort: 80 | |||
hostPort: 80 | |||
protocol: TCP | |||
- containerPort: 443 | |||
hostPort: 443 | |||
protocol: TCP | |||
EOF | |||
kind get kubeconfig > $HOME/.kube/configs/kind | |||
kcfg kind | |||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx | |||
helm repo update | |||
kubectl create ns ingress | |||
helm install ingress -n ingress ingress-nginx/ingress-nginx | |||
</syntaxhighlight> | |||
===Metrics=== | |||
*https://gist.github.com/sanketsudake/a089e691286bf2189bfedf295222bd43 | |||
add '--kubelet-insecure-tls' to deplyoment:<br> | |||
<pre> | |||
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml | |||
</pre> | |||
=Network= | =Network= | ||
==Ingress== | ==Ingress== | ||
*https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md | *https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md | ||
*https://docs.microsoft.com/pt-br/azure/aks/ingress-static-ip | *https://docs.microsoft.com/pt-br/azure/aks/ingress-static-ip | ||
===Ingress Class=== | |||
====Troubleshoot - ingress does not contain a valid IngressClass==== | |||
*[https://robearlam.com/blog/nginx-ingress-breaking-change-ingress.class-now-required Nginx Ingress - Breaking change, Ingress.class now required] | |||
Error | |||
<pre> | |||
$ stern -n ingress ingress-nginx-controller | |||
7 store.go:361] "Ignoring ingress because of error while validating ingress class" ingress="myapp/myapp" error="ingress does not contain a valid IngressClass" | |||
</pre> | |||
Solution | |||
<pre> | |||
$ kubectl -n myapp edit ingress myapp | |||
</pre> | |||
<syntaxhighlight lang=yaml highlight=13> | |||
apiVersion: networking.k8s.io/v1 | |||
kind: Ingress | |||
metadata: | |||
name: myapp | |||
annotations: | |||
nginx.ingress.kubernetes.io/proxy-buffer-size: "32k" | |||
nginx.ingress.kubernetes.io/affinity: "cookie" | |||
nginx.ingress.kubernetes.io/rewrite-target: / | |||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600" | |||
nginx.ingress.kubernetes.io/proxy-read-timeout: "600" | |||
nginx.ingress.kubernetes.io/proxy-send-timeout: "600" | |||
nginx.ingress.kubernetes.io/proxy-body-size: "512m" | |||
kubernetes.io/ingress.class: "nginx" | |||
... | |||
... | |||
</syntaxhighlight> | |||
==Access pod by hostname== | ==Access pod by hostname== | ||
Linha 286: | Linha 234: | ||
==LoadBalance== | ==LoadBalance== | ||
*[https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/ Azure LoadBalancer] | |||
=Nodes= | =Nodes= | ||
==Request and limits from describe== | |||
<syntaxhighlight lang=bash highlight=32> | |||
function GET_USAGE() { | |||
if [ $# -ne 3 ] | |||
then | |||
echo ERRO ARGS | |||
break | |||
fi | |||
case $3 in | |||
request) | |||
export COL=3 | |||
;; | |||
limit) | |||
export COL=5 | |||
;; | |||
*) | |||
echo ERROR request/limit | |||
break | |||
;; | |||
esac | |||
echo $1 | base64 -d | grep $2 | awk '{print $'$COL'}' | |||
} | |||
export FIRST=0 | |||
kubectl get nodes --no-headers |\ | |||
awk '{print $1}' |\ | |||
while read NODE | |||
do | |||
echo $NODE >&2 | |||
if [ $FIRST -eq 0 ] | |||
then | |||
echo NODE CPU_REQUEST MEMORY_REQUEST CPU_LIMIT MEMORY_LIMIT | |||
export FIRST=1 | |||
fi | |||
DATA=$(kubectl describe node $NODE| egrep "^ cpu|^ memory"|grep "%"|base64) | |||
echo \ | |||
$NODE \ | |||
$(GET_USAGE $DATA cpu request) \ | |||
$(GET_USAGE $DATA memory request) \ | |||
$(GET_USAGE $DATA cpu limit) \ | |||
$(GET_USAGE $DATA memory limit) | |||
done |\ | |||
column -t | |||
</syntaxhighlight> | |||
OUTPUT | |||
<pre> | |||
NODE CPU_REQUEST MEMORY_REQUEST CPU_LIMIT MEMORY_LIMIT | |||
node000000 (20%) (14%) (90%) (30%) | |||
node000002 (30%) (49%) (95%) (50%) | |||
node000003 (50%) (38%) (96%) (60%) | |||
</pre> | |||
==Drain== | ==Drain== | ||
<pre> | <pre> | ||
kubectl drain | kubectl drain --ignore-daemonsets --delete-emptydir-data <node-name> | ||
</pre> | </pre> | ||
===Auto Drain=== | |||
Used to speed nodes upgrade | |||
<syntaxhighlight lang=bash> | |||
#!/bin/bash | |||
function AUTO_DRAIN_NODE() { | |||
if [ $# -eq 0 ] | |||
then | |||
echo Falta de argumentos | |||
echo $0 \<version\> | |||
exit 1 | |||
else | |||
while true | |||
do | |||
date | |||
sleep 0.5;kubectl get pods -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed" | |||
echo \ | |||
pods out=$(sleep 0.5;kubectl get pods -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed" | wc -l) \ | |||
nodes not outdated=$(sleep 0.5;kubectl get nodes --no-headers | grep -w $1 | wc -l) | |||
sleep 5 | |||
sleep 0.5;kubectl get nodes | \ | |||
grep $1 | \ | |||
grep SchedulingDisabled | \ | |||
awk '{print $1}'| \ | |||
while read NODE | |||
do | |||
echo Draining node $NODE | |||
sleep 0.5;kubectl drain --ignore-daemonsets --delete-emptydir-data --skip-wait-for-delete-timeout=0 --timeout=10s $NODE 2>/dev/null| \ | |||
grep "evicting pod" | \ | |||
sort -u | \ | |||
awk '{print $3}'| \ | |||
while IFS=/ read NS POD | |||
do | |||
echo Deleting POD $POD from namespace $NS \($NODE\) | |||
sleep 0.5;kubectl -n $NS delete pod $POD | |||
done | |||
done | |||
done | |||
fi | |||
} | |||
AUTO_DRAIN_NODE $1 | |||
</syntaxhighlight> | |||
==Run== | |||
<pre> | |||
kubectl run -it -n namespace ubuntu-sandbox --rm --image=ubuntu -- bash | |||
</pre> | |||
==Taint== | ==Taint== | ||
<pre> | <pre> | ||
Linha 351: | Linha 400: | ||
*https://coreos.com/rkt/ | *https://coreos.com/rkt/ | ||
*https://www.docker.com/ | *https://www.docker.com/ | ||
*https://hub.docker.com/ | |||
---- | |||
Get Pods sorted | |||
<pre> | |||
kubectl get pods -A --sort-by=.status.startTime | |||
kubectl get pods -A --sort-by=.metadata.creationTimestamp | |||
</pre> | |||
==[https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/ Port Forward]== | ==[https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/ Port Forward]== | ||
<pre> | <pre> | ||
Linha 389: | Linha 446: | ||
=Scale= | =Scale= | ||
*[https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node What types of pods can prevent CA from removing a node?] | |||
<pre> | <pre> | ||
kubectl scale --replicas=3 deployment/foo | kubectl scale --replicas=3 deployment/foo | ||
Linha 424: | Linha 482: | ||
=Tools= | =Tools= | ||
*http://fabric8.io/guide/getStarted/helm.html | *[[KubeOMatic|KubeOMatic]] | ||
*[http://fabric8.io/guide/getStarted/helm.html fabric8 with helm] | |||
*[https://krew.sigs.k8s.io/ Krew, the plugin manager for kubectl command-line tool] | |||
*[https://github.com/kubernetes-up-and-running/kuard KUARD] | |||
*[https://github.com/kvaps/kubectl-node-shell Kubectl Node Shell] | |||
*[[NGINX|NGINX]] | *[[NGINX|NGINX]] | ||
*[https://k9ss.io/ K9S] [https://github.com/derailed/k9s Github k9s] | *[https://k9ss.io/ K9S] | ||
**[https://github.com/derailed/k9s Github k9s] | |||
*[https://github.com/rancher/rancher Rancher] | *[https://github.com/rancher/rancher Rancher] | ||
*[https://github.com/wercker/stern STERN] | *[https://github.com/wercker/stern STERN] | ||
*[https://cloud.google.com/tekton/ Tekton] | *[https://cloud.google.com/tekton/ Tekton] | ||
**[https://github.com/tektoncd/pipeline/blob/main/docs/tutorial.md Tutorial] | **[https://github.com/tektoncd/pipeline/blob/main/docs/tutorial.md Tutorial] | ||
==API== | ==API== | ||
*[https://cloud.spring.io/spring-cloud-static/spring-cloud-kubernetes/1.0.1.RELEASE/single/spring-cloud-kubernetes.html Spring Cloud Kubernetes] | *[https://cloud.spring.io/spring-cloud-static/spring-cloud-kubernetes/1.0.1.RELEASE/single/spring-cloud-kubernetes.html Spring Cloud Kubernetes] |
Edição atual tal como às 01h26min de 13 de dezembro de 2024
Authentication
Create Secret TLS
kubectl create secret tls my-tls-cert --key /path/to/tls.key --cert /path/to/tls.crt
Query the master to validade the token
#curl https://k8s-master-01/api --header "Authorization: Bearer $(cat token)" --insecure -m 5 { "kind": "APIVersions", "versions": [ "v1" ], "serverAddressByClientCIDRs": [ { "clientCIDR": "0.0.0.0/0", "serverAddress": "192.168.83.55:443" } ] }
Cloud Provider
VMWARE
- vSphere Cloud Provider
- Configuring vSphere Volumes for Kubernetes
- PersistentVolume Storage Options on vSphere
- vSAN as Persistent Storage for a Kubernetes Cluster
- Deploying a Kubernetes Cluster on vSphere with CSI and CPI
- Dynamic Provisioning and StorageClass API
- LAB GUIDE - KUBERNETES AND STORAGE WITH THE VSPHERE CLOUD PROVIDER - STEP BY STEP
Create a Cluster
CronJob
kubectl create job --from=cronjob/<name of cronjob> <name of job> kubectl create job --from=cronjob/pgdump pgdump-manual-001
Events
Get events sorted by creation timeStamp.
kubectl get events --sort-by='.lastTimestamp'
Helm
Kubelet
PID CGROUP limit in kubelet
$ kubectl proxy --port=8001 &
Dump configuration from the configz endpoint.
$ NODE_NAME="the-name-of-the-node-you-are-reconfiguring"
$ curl -sSL "http://localhost:8001/api/v1/nodes/${NODE_NAME}/proxy/configz" | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME}
Edit Dumpped file and add kind and apiVersion key.
Add PodPidsLimit limit key.
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"staticPodPath": "/etc/kubernetes/manifests",
"syncFrequency": "1m0s",
"PodPidsLimit": "300",
"fileCheckFrequency": "20s",
"httpCheckFrequency": "20s",
"address": "0.0.0.0",
"port": 10250,
"tlsCertFile": "/etc/kubernetes/certs/kubeletserver.crt",
"tlsPrivateKeyFile": "/etc/kubernetes/certs/kubeletserver.key",
"tlsCipherSuites": [
.
.
.
Add new configmap to cluster
$ kubectl -n kube-system create configmap my-node-config --from-file=kubelet=kubelet_configz_${NODE_NAME} --append-hash -o yaml
Edit node and add "configSource" key in spec
$ kubectl edit node ${NODE_NAME}
YAML
configSource:
configMap:
name: CONFIG_MAP_NAME # replace CONFIG_MAP_NAME with the name of the ConfigMap
namespace: kube-system
kubeletConfigKey: kubelet
Check configuration for node
$ kubectl get no ${NODE_NAME} -o json | jq '.status.config'
OUTPUT:
{
"active": {
"configMap": {
"kubeletConfigKey": "kubelet",
"name": "my-node-config-xxxxxxxxxx",
"namespace": "kube-system",
"resourceVersion": "0000000000",
"uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
},
"assigned": {
"configMap": {
"kubeletConfigKey": "kubelet",
"name": "my-node-config-xxxxxxxxxx",
"namespace": "kube-system",
"resourceVersion": "0000000000",
"uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
},
"lastKnownGood": {
"configMap": {
"kubeletConfigKey": "kubelet",
"name": "my-node-config-xxxxxxxxxx",
"namespace": "kube-system",
"resourceVersion": "0000000000",
"uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
}
}
DOCKER Approach
$ docker run -it --pids-limit 100 <Image_ID>
Links
- https://kubernetes.io/
- kubectl Cheat Sheet
- https://training.play-with-kubernetes.com/
- https://labs.play-with-k8s.com/
- Kubernetes Failure Stories
- https://crossplane.io/
- Matchbox Kubernetes network boot
- Debugging network stalls on Kubernetes
- https://kubernetes.io/docs/home/
Certification
- https://www.cncf.io/certification/expert/cka/
- https://kubernetes.io/docs/tasks/
- https://github.com/arush-sal/cka-practice-environment
- https://github.com/kelseyhightower/kubernetes-the-hard-way
- https://jvns.ca/blog/2017/08/05/how-kubernetes-certificates-work/
Micro Clusters
Kind
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: worker
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF
kind get kubeconfig > $HOME/.kube/configs/kind
kcfg kind
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
kubectl create ns ingress
helm install ingress -n ingress ingress-nginx/ingress-nginx
Metrics
add '--kubelet-insecure-tls' to deplyoment:
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml
Network
Ingress
- https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
- https://docs.microsoft.com/pt-br/azure/aks/ingress-static-ip
Ingress Class
Troubleshoot - ingress does not contain a valid IngressClass
Error
$ stern -n ingress ingress-nginx-controller 7 store.go:361] "Ignoring ingress because of error while validating ingress class" ingress="myapp/myapp" error="ingress does not contain a valid IngressClass"
Solution
$ kubectl -n myapp edit ingress myapp
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp
annotations:
nginx.ingress.kubernetes.io/proxy-buffer-size: "32k"
nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-body-size: "512m"
kubernetes.io/ingress.class: "nginx"
...
...
Access pod by hostname
FQDN:
auto-generated-name.my-svc.my-namespace.svc.cluster.local
LoadBalance
Nodes
Request and limits from describe
function GET_USAGE() {
if [ $# -ne 3 ]
then
echo ERRO ARGS
break
fi
case $3 in
request)
export COL=3
;;
limit)
export COL=5
;;
*)
echo ERROR request/limit
break
;;
esac
echo $1 | base64 -d | grep $2 | awk '{print $'$COL'}'
}
export FIRST=0
kubectl get nodes --no-headers |\
awk '{print $1}' |\
while read NODE
do
echo $NODE >&2
if [ $FIRST -eq 0 ]
then
echo NODE CPU_REQUEST MEMORY_REQUEST CPU_LIMIT MEMORY_LIMIT
export FIRST=1
fi
DATA=$(kubectl describe node $NODE| egrep "^ cpu|^ memory"|grep "%"|base64)
echo \
$NODE \
$(GET_USAGE $DATA cpu request) \
$(GET_USAGE $DATA memory request) \
$(GET_USAGE $DATA cpu limit) \
$(GET_USAGE $DATA memory limit)
done |\
column -t
OUTPUT
NODE CPU_REQUEST MEMORY_REQUEST CPU_LIMIT MEMORY_LIMIT node000000 (20%) (14%) (90%) (30%) node000002 (30%) (49%) (95%) (50%) node000003 (50%) (38%) (96%) (60%)
Drain
kubectl drain --ignore-daemonsets --delete-emptydir-data <node-name>
Auto Drain
Used to speed nodes upgrade
#!/bin/bash
function AUTO_DRAIN_NODE() {
if [ $# -eq 0 ]
then
echo Falta de argumentos
echo $0 \<version\>
exit 1
else
while true
do
date
sleep 0.5;kubectl get pods -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed"
echo \
pods out=$(sleep 0.5;kubectl get pods -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed" | wc -l) \
nodes not outdated=$(sleep 0.5;kubectl get nodes --no-headers | grep -w $1 | wc -l)
sleep 5
sleep 0.5;kubectl get nodes | \
grep $1 | \
grep SchedulingDisabled | \
awk '{print $1}'| \
while read NODE
do
echo Draining node $NODE
sleep 0.5;kubectl drain --ignore-daemonsets --delete-emptydir-data --skip-wait-for-delete-timeout=0 --timeout=10s $NODE 2>/dev/null| \
grep "evicting pod" | \
sort -u | \
awk '{print $3}'| \
while IFS=/ read NS POD
do
echo Deleting POD $POD from namespace $NS \($NODE\)
sleep 0.5;kubectl -n $NS delete pod $POD
done
done
done
fi
}
AUTO_DRAIN_NODE $1
Run
kubectl run -it -n namespace ubuntu-sandbox --rm --image=ubuntu -- bash
Taint
kubectl taint nodes <node-name> key1=value1:NoSchedule-
Persistent Storage
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: task-pv-claim
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
kind: PersistentVolume
apiVersion: v1
metadata:
name: task-pv-storage
labels:
type: local
spec:
storageClassName: managed-standard
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
kubectl get pv task-pv-storage
kind: Pod
apiVersion: v1
metadata:
name: task-pv-pod
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: task-pv-storage
containers:
- name: task-pv-container
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage
PODs
Get Pods sorted
kubectl get pods -A --sort-by=.status.startTime kubectl get pods -A --sort-by=.metadata.creationTimestamp
Port Forward
kubectl port-forward pod/kuard2 8000:8080
kubectl port-forward service/servicename 8000:8080
Node afinity
spec
.
.
.
strategy:
.
.
.
template:
metadata:
.
.
.
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: agentpool
operator: In
values:
- svc01
- svc02
Scale
kubectl scale --replicas=3 deployment/foo
Host Affinity
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- cluster-nodepoolname-vmisntance00.domain
- cluster-nodepoolname-vmisntance01.domain
- cluster-nodepoolname-vmisntance02.domain
- cluster-nodepoolname-vmisntance03.domain
By agentpool
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: agentpool
operator: In
values:
- ssd
Tools
- KubeOMatic
- fabric8 with helm
- Krew, the plugin manager for kubectl command-line tool
- KUARD
- Kubectl Node Shell
- NGINX
- K9S
- Rancher
- STERN
- Tekton