Kubernetes

De Wiki Clusterlab.com.br
Ir para navegação Ir para pesquisar

Authentication

Get service accounts

#kubectl get serviceaccounts

NAME      SECRETS   AGE
default   1         6d
python    1         30m

Create service account

#kubectl create serviceaccount jenkins

Get service account details

#kubectl get serviceaccount jenkins -o yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  creationTimestamp: 2018-05-10T18:21:22Z
  name: jenkins
  namespace: default
  resourceVersion: "1210298"
  selfLink: /api/v1/namespaces/default/serviceaccounts/jenkins
  uid: 1acdecf6-547e-cd34-ab56-000d3a0984c9
secrets:
- name: jenkins-token-3f5ty

Create Secret TLS

kubectl create secret tls my-tls-cert --key /path/to/tls.key --cert /path/to/tls.crt

Get secret

#kubectl get secret jenkins-token-3f5ty -o yaml
apiVersion: v1
data:
  ca.crt: U1J0dXFmTUc4a0FTdGpmQTBOekdSN1Q2ZzVnc2YzcTFCQk1IRDNlRGZlc3Y3TUw5WlBJc1FKTnp5
aDZpc1phTlQ1Mk1ya2JTX21KbmQ5Z3FYNmxYYmN1bTlKOFlNV3Z5NUdOY05uVnJmeVVtMFJOYXB4
RFNXTkZuUGxfc011QjZiUVU2Ync4M3Z2N1FlSjE0X0EyOFhsRW5qeU5mbDdoVzcxR1ZZS2RVNnpr
TkwwX0RJanB0M1ZPbDlnVXVoMF81X2tmeGVibEYxYnpRSXdGQ2JBMWJfMmpUbGJpY0NnRXY2eDd5
Z05XQ0dXV1MxWEI4SkI4YW5PbVpPeHBSWFpQcDZ4YVdJb3R1cUtUbkpvUnFBQ2tadjNtblE5TEU1
V0lsWlUxWVNScFB2cldkY3ZqVDVBeHJ4M1ZGWVNDNXd3eFp2djBQcXN2RkxtVFhfanhodGFwYUhj
dVFQUGVYUVRpM1lUMklNMVJUeVdESzd5OGJlZEFnT0FSSVRzY1F2N3RqbE9jZTVDOEE5YnhLQVNK
Z0JscVZlcE4xc2loRVVrekhTZHBsVUZpS2pQcFZFSXc4UTc0S0VNMVRpNDFBbXkzV2NKNGNwSXYw
QkVRRDROX2lGUmVaZXVycERuSndOVm1Ya0dBbXZ2RTdiTXBDYzZ0UWZVWk5tazBtTnY1WmJGM1Vp
ZG9YMXhwbEc3QmRoTkpudDBvM3lnMlVVSWxWNTdjSmxMZ2trNXJvTHYxNzJmeWNvb1hJYWxvR3JG
WW5hQXpFUmFJdnNLTWlnanQwTlpCSVpZOGVldjBKeDBnaHZUZnhIQ2twZjJRTG9PZ2VWcEJYbnNs
MHBwR294TDljdENTYUdnRVNWRXAzZ1k3T1QyNkJYQVdnQzhGUVE5VDJrTktCU0hrM0IzS0o1MDZZ
MG45dFZBRWpRVDdDQnpYVDI1Vnk2VTJYU3VhMUY2Y01uYzZtTFl3VmNndXRncmVvNmszWDN6UElU
dWlraDZhN2ZXWEQwY2NIenFTb1FhVUhJQlRfWTlIbEROTlppN2J4QTNCZkZNcEltZG04WEZyUmpE
R0lpOVJyNGFRSVlBRVpXb29VNlp4cklRNHd1Z01udlNNeG5PblZVVkRPSlVLU3F2eFF0UzlYM1Fw
VmlXSktwcFhla3puQWxXeTJNZm5PQW1MSzhoVm80cGxRX0Z0bDNoR0YyN1Njd0NmSlhJMFI5RXhD
NEdiZ0U2RDFDZURrenl3UU1XeHpxc0xMNTY3R0RLQ1ZTMFVWVEJDQzhPUHY2YlRVMGdCZVBIVTFl
ZHZ6ZDAydU1GVzNZTnVKcTU0TFZSV3BVTEVzTTZwMgo=
  token: eXhjUzZsakI5dHlqNHBkbjl3ZDU4bVhvbkVaMzhmcHRrcmdtT1dDY2R6QlZyVTVlOVpZUnZKRHIx
NG1zWDUzdkxzaFBFX0habXpZVTh3VHBoMFJLaUdLR0lUeXdHMXRjOE1GQTBKMUVVd0J0N2NWV0RL
UlpoZDMzWTRTNFU4anlZN0xjMzRaeXhhbjVka1lZMWd6Z2NzeXRCNTNRVEtEc244eWtvT3IxbjYx
VVQ2Yl9iMjRHTF93blR1QTlsSU9sMTZabkxXazBzZ1VicVlOZWFETUpteEUxYUtnb1RlaVBZakda
eF9TZWNFemxzaTBXd3BzWGx5WU42VG5GclQwdF9XeDFoN3pYcnNFMXZTV1lFN29XSWVRMk9qeTV2
ZE5icWpra2NTX2dvbXRBbW91RUt3ckFKTEhDOTJ6dVlvcFVXT1ljQnk5UENDQldWelg0UVdDX2ZG
bW5kams4WF9VbzliaVFVS0JhRFlqeTkyRXZDbXl4aEhsY003SDJFWXBMbUJCOUMybGFqWFFVYUxs
emJiWmJrcVlBa1BtRmFXaG1QX2JTMnVZV0RKNjFfc3Z2MUY4RzdjeG9RR2dIeHJIUkV1bVV0Znpr
WFY1Zk1BOWYwV25sN01OY2h3WGtNNXlhbVFla29qOFBxQm1xWkNUQndFWklUckpRUFdpX3BuVFpI
REtGNFhxTEJIeDFOTDRldEs4TzZVMjhTN21VelJ5dHcyREtHb2RQTG4zRV83MGh5YUVXVUtEbE55
Vnp0djFqNHZ4ZzNYazQ2bkt2WF83N2YwekZBV3JtcHV2YUphMGxTOUtPa3ZVMkdLMHJzVWJyb2ls
YXdiMDRPdzN3RDMyY2h3RHFEaDRiUW1hYldkSUpXQUt4RHVjVnluOVc5OF9LWEwyajhMUHU0YTVl
azdIVW9UekRGYmN2TXFxTHJ1STlmaG5zZWVZbkpWc05LOHhhZnFxb0JZODNrMlpZOWVkY2UyRGtf
Z3pTN1ZiQjVHdzlrZEtuRXB1RWFzamE2VjhDOUpiZTVPS2U3THNYUkFLazVBVGNnVExCV2VmYXZs
eENfdTF5MWRnVDlqd2o1TFMyR1JxczU5eng3Z3UzQ2tRS3ViZHdRajZONmZWYVp2UnBqTjEwNjkz
RlJJTWIwYkpyU3VRX2d3YjA4cGJ5TzFiRlU2TU9kRHNqaUNuYlRfV3V5Q2pTdEdjMmNvclpKNk1M
aFcxZUFkRHlaVzBJc3I3TUo5RUFmM1FxdjFVNXdsZ3B1eEZyUFFSVGRlUnE5NGhDODRkTmVKdFEy
OFJtWEI2TXJJb2lEa2x4b2w1QzRZdHRGNHhNdG1rSAo=
kind: Secret
metadata:
  annotations:
    kubernetes.io/service-account.name: jenkins
    kubernetes.io/service-account.uid: 1acdecf6-547e-cd34-ab56-000d3a0984c9
  creationTimestamp: 2018-05-10T18:21:22Z
  name: jenkins-token-3f5ty
  namespace: default
  resourceVersion: "1210297"
  selfLink: /api/v1/namespaces/default/secrets/jenkins-token-3f5ty
  uid: 1acdecf6-547e-cd34-ab56-000d3a0984c9
type: kubernetes.io/service-account-token

Save token into a file

#echo "eXhjUzZsakI5dHlqNHBkbjl3ZDU4bVhvbkVaMzhmcHRrcmdtT1dDY2R6QlZyVTVlOVpZUnZKRHIx
NG1zWDUzdkxzaFBFX0habXpZVTh3VHBoMFJLaUdLR0lUeXdHMXRjOE1GQTBKMUVVd0J0N2NWV0RL
UlpoZDMzWTRTNFU4anlZN0xjMzRaeXhhbjVka1lZMWd6Z2NzeXRCNTNRVEtEc244eWtvT3IxbjYx
VVQ2Yl9iMjRHTF93blR1QTlsSU9sMTZabkxXazBzZ1VicVlOZWFETUpteEUxYUtnb1RlaVBZakda
eF9TZWNFemxzaTBXd3BzWGx5WU42VG5GclQwdF9XeDFoN3pYcnNFMXZTV1lFN29XSWVRMk9qeTV2
ZE5icWpra2NTX2dvbXRBbW91RUt3ckFKTEhDOTJ6dVlvcFVXT1ljQnk5UENDQldWelg0UVdDX2ZG
bW5kams4WF9VbzliaVFVS0JhRFlqeTkyRXZDbXl4aEhsY003SDJFWXBMbUJCOUMybGFqWFFVYUxs
emJiWmJrcVlBa1BtRmFXaG1QX2JTMnVZV0RKNjFfc3Z2MUY4RzdjeG9RR2dIeHJIUkV1bVV0Znpr
WFY1Zk1BOWYwV25sN01OY2h3WGtNNXlhbVFla29qOFBxQm1xWkNUQndFWklUckpRUFdpX3BuVFpI
REtGNFhxTEJIeDFOTDRldEs4TzZVMjhTN21VelJ5dHcyREtHb2RQTG4zRV83MGh5YUVXVUtEbE55
Vnp0djFqNHZ4ZzNYazQ2bkt2WF83N2YwekZBV3JtcHV2YUphMGxTOUtPa3ZVMkdLMHJzVWJyb2ls
YXdiMDRPdzN3RDMyY2h3RHFEaDRiUW1hYldkSUpXQUt4RHVjVnluOVc5OF9LWEwyajhMUHU0YTVl
azdIVW9UekRGYmN2TXFxTHJ1STlmaG5zZWVZbkpWc05LOHhhZnFxb0JZODNrMlpZOWVkY2UyRGtf
Z3pTN1ZiQjVHdzlrZEtuRXB1RWFzamE2VjhDOUpiZTVPS2U3THNYUkFLazVBVGNnVExCV2VmYXZs
eENfdTF5MWRnVDlqd2o1TFMyR1JxczU5eng3Z3UzQ2tRS3ViZHdRajZONmZWYVp2UnBqTjEwNjkz
RlJJTWIwYkpyU3VRX2d3YjA4cGJ5TzFiRlU2TU9kRHNqaUNuYlRfV3V5Q2pTdEdjMmNvclpKNk1M
aFcxZUFkRHlaVzBJc3I3TUo5RUFmM1FxdjFVNXdsZ3B1eEZyUFFSVGRlUnE5NGhDODRkTmVKdFEy
OFJtWEI2TXJJb2lEa2x4b2w1QzRZdHRGNHhNdG1rSAo=" | base64 -d > token

Query the master to validade the token

#curl https://k8s-master-01/api --header "Authorization: Bearer $(cat token)" --insecure -m 5

{
  "kind": "APIVersions",
  "versions": [
    "v1"
  ],
  "serverAddressByClientCIDRs": [
    {
      "clientCIDR": "0.0.0.0/0",
      "serverAddress": "192.168.83.55:443"
    }
  ]
}

Add permission to the user jenkins

#kubectl describe clusterrolebindings cluster-admin

Basic Commands

List

PODs on ALL Namespaces

kubectl get pods --all-namespaces

All Namespaces

kubectl get namespaces

Remote execution/console login

kubectl exec -it podname -n namespace -- bash

Cloud Provider

VMWARE

Create a Cluster

CronJob

kubectl create job --from=cronjob/<name of cronjob> <name of job>
kubectl create job --from=cronjob/pgdump pgdump-manual-001

Helm

Kubelet

PID CGROUP limit in kubelet

$ kubectl proxy --port=8001 &

Dump configuration from the configz endpoint.

$ NODE_NAME="the-name-of-the-node-you-are-reconfiguring"
$ curl -sSL "http://localhost:8001/api/v1/nodes/${NODE_NAME}/proxy/configz" | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME}

Edit Dumpped file and add kind and apiVersion key.
Add PodPidsLimit limit key.

{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "staticPodPath": "/etc/kubernetes/manifests",
  "syncFrequency": "1m0s",
  "PodPidsLimit":  "300",
  "fileCheckFrequency": "20s",
  "httpCheckFrequency": "20s",
  "address": "0.0.0.0",
  "port": 10250,
  "tlsCertFile": "/etc/kubernetes/certs/kubeletserver.crt",
  "tlsPrivateKeyFile": "/etc/kubernetes/certs/kubeletserver.key",
  "tlsCipherSuites": [
.
.
.

Add new configmap to cluster

$ kubectl -n kube-system create configmap my-node-config --from-file=kubelet=kubelet_configz_${NODE_NAME} --append-hash -o yaml

Edit node and add "configSource" key in spec

$ kubectl edit node ${NODE_NAME}

YAML

configSource:
  configMap:
    name: CONFIG_MAP_NAME # replace CONFIG_MAP_NAME with the name of the ConfigMap
    namespace: kube-system
    kubeletConfigKey: kubelet

Check configuration for node

$ kubectl get no ${NODE_NAME} -o json | jq '.status.config'

OUTPUT:

{
  "active": {
    "configMap": {
      "kubeletConfigKey": "kubelet",
      "name": "my-node-config-xxxxxxxxxx",
      "namespace": "kube-system",
      "resourceVersion": "0000000000",
      "uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
    }
  },
  "assigned": {
    "configMap": {
      "kubeletConfigKey": "kubelet",
      "name": "my-node-config-xxxxxxxxxx",
      "namespace": "kube-system",
      "resourceVersion": "0000000000",
      "uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
    }
  },
  "lastKnownGood": {
    "configMap": {
      "kubeletConfigKey": "kubelet",
      "name": "my-node-config-xxxxxxxxxx",
      "namespace": "kube-system",
      "resourceVersion": "0000000000",
      "uid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
    }
  }
}

DOCKER Approach

$ docker run -it --pids-limit 100 <Image_ID>

Links

Certification

Micro Clusters

Kind

curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind

cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: worker
- role: control-plane
  kubeadmConfigPatches:
  - |
    kind: InitConfiguration
    nodeRegistration:
      kubeletExtraArgs:
        node-labels: "ingress-ready=true"
  extraPortMappings:
  - containerPort: 80
    hostPort: 80
    protocol: TCP
  - containerPort: 443
    hostPort: 443
    protocol: TCP
EOF
kind get kubeconfig > $HOME/.kube/configs/kind
kcfg kind
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
kubectl create ns ingress
helm install ingress -n ingress ingress-nginx/ingress-nginx

Metrics

add '--kubelet-insecure-tls' to deplyoment:

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml

Network

Ingress

Ingress Class

Troubleshoot - ingress does not contain a valid IngressClass

Error

$ stern -n ingress ingress-nginx-controller
7 store.go:361] "Ignoring ingress because of error while validating ingress class" ingress="myapp/myapp" error="ingress does not contain a valid IngressClass"

Solution

$ kubectl -n myapp edit ingress myapp
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: myapp
  annotations:
    nginx.ingress.kubernetes.io/proxy-buffer-size: "32k"
    nginx.ingress.kubernetes.io/affinity: "cookie"
    nginx.ingress.kubernetes.io/rewrite-target: /
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-body-size: "512m"
    kubernetes.io/ingress.class: "nginx"
  ...
  ...

Access pod by hostname

FQDN:
auto-generated-name.my-svc.my-namespace.svc.cluster.local

LoadBalance

Nodes

Request and limits from describe

function GET_USAGE() {
    if [ $# -ne 3 ]
    then
        echo ERRO ARGS
        break
    fi
    case $3 in
        request)
            export COL=3
            ;;
        limit)
            export COL=5
            ;;
        *)
            echo ERROR request/limit
            break
            ;;
    esac
    echo $1 | base64 -d | grep $2 | awk '{print $'$COL'}'
}
export FIRST=0
kubectl get nodes --no-headers |\
awk '{print $1}' |\
while read NODE
do 
    echo $NODE >&2
    if [ $FIRST -eq 0 ]
    then
        echo NODE CPU_REQUEST MEMORY_REQUEST CPU_LIMIT  MEMORY_LIMIT
        export FIRST=1
    fi
    DATA=$(kubectl describe node $NODE| egrep "^  cpu|^  memory"|grep "%"|base64)
    echo \
        $NODE \
        $(GET_USAGE $DATA cpu request) \
        $(GET_USAGE $DATA memory request) \
        $(GET_USAGE $DATA cpu limit) \
        $(GET_USAGE $DATA memory limit)
done |\
column -t

OUTPUT

NODE        CPU_REQUEST  MEMORY_REQUEST  CPU_LIMIT  MEMORY_LIMIT
node000000  (20%)        (14%)           (90%)      (30%)
node000002  (30%)        (49%)           (95%)      (50%)
node000003  (50%)        (38%)           (96%)      (60%)

Drain

kubectl drain --ignore-daemonsets --delete-emptydir-data <node-name>

Auto Drain

Used to speed nodes upgrade

#!/bin/bash

function AUTO_DRAIN_NODE() {
    if [ $# -eq 0 ]
    then
        echo Falta de argumentos
        echo $0 \<version\>
        exit 1
    else
        while true
        do
            date
sleep 0.5;kubectl get pods  -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed"
            echo \
pods out=$(sleep 0.5;kubectl get pods  -o wide -A --no-headers | egrep -v "1/1|2/2|3/3|4/4|5/5|6/6|Completed"  | wc -l) \
nodes not outdated=$(sleep 0.5;kubectl get nodes --no-headers | grep -w $1 | wc -l)
            sleep 5
            sleep 0.5;kubectl get nodes | \
            grep $1 | \
            grep SchedulingDisabled | \
            awk '{print $1}'| \
            while read NODE
            do
            echo Draining node $NODE
                sleep 0.5;kubectl drain --ignore-daemonsets --delete-emptydir-data --skip-wait-for-delete-timeout=0 --timeout=10s  $NODE 2>/dev/null| \
                grep "evicting pod" | \
                sort -u  | \
                awk '{print $3}'| \
                while IFS=/ read NS POD
                do
                echo Deleting POD $POD from namespace $NS \($NODE\)
                    sleep 0.5;kubectl -n $NS delete pod $POD
                done
            done
        done
    fi
}
AUTO_DRAIN_NODE $1

Run

kubectl run -it -n namespace ubuntu-sandbox --rm --image=ubuntu -- bash

Taint

kubectl taint nodes <node-name> key1=value1:NoSchedule-

Persistent Storage

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: task-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi
kind: PersistentVolume
apiVersion: v1
metadata:
  name: task-pv-storage
  labels:
    type: local
spec:
  storageClassName: managed-standard
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"

kubectl get pv task-pv-storage

kind: Pod
apiVersion: v1
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
       claimName: task-pv-storage
  containers:
    - name: task-pv-container
      image: nginx
      ports:
        - containerPort: 80
          name: "http-server"
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage

PODs

Port Forward

kubectl port-forward pod/kuard2 8000:8080
kubectl port-forward service/servicename 8000:8080

Node afinity

spec
  .
  .
  .
  strategy:
  .
  .
  .
  template:
    metadata:
      .
      .
      .
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: agentpool
                operator: In
                values:
                - svc01
                - svc02

Scale

kubectl scale --replicas=3 deployment/foo    

Host Affinity

spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: kubernetes.io/hostname
            operator: In
            values:
            - cluster-nodepoolname-vmisntance00.domain
            - cluster-nodepoolname-vmisntance01.domain
            - cluster-nodepoolname-vmisntance02.domain
            - cluster-nodepoolname-vmisntance03.domain

By agentpool

spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: agentpool
            operator: In
            values:
            - ssd

Tools


API