Kubernetes
Useful conveniences
alias k=kubectl
# for generating config files. usage:
# bash: k create deployment nginx --image=nginx:alpine $cfg > deploy.yaml
export cfg="--dry-run=client -o yaml"
# zsh: k create deployment nginx --image=nginx:alpine cfg > deploy.yaml
alias -g cfg="--dry-run=client -o yaml"
# bash: k delete deployment nginx $now
export now="--force --grace-period=0"
# zsh: k delete deployment nginx now
alias -g now="--force --grace-period=0"
# yq: parse yaml, colorize output. Install via package manager.
helm show values bitnami/apache | yq e
# Send a curl or wget request from within a temporary pod
function req() {
kubectl run tmp --image=nginx:alpine --rm -i --restart=Never -- "${1}"
}
k get pod,deploy,svc
k get all
k describe svc
Namespaces
k get namespaces
k get ns
k create ns new-namespace
k delete ns new-namespace
Pods
k get pods
# No resources found in default namespace.
k run pod-name --image=nginx
# pod/pod-name created
k get pods
# NAME READY STATUS RESTARTS AGE
# pod-name 0/1 ContainerCreating 0 3s
Jobs
k -n neptune create job new-job --image=busybox:1.31.0 cfg \
> job.yaml -- sh -c "sleep 2 && echo done"
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
name: new-job
namespace: neptune
spec:
template:
metadata:
creationTimestamp: null
spec:
containers:
- command:
- sh
- -c
- sleep 2 && echo done
image: busybox:1.31.0
name: new-job
resources: {}
restartPolicy: Never
status: {}
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
ServiceAccount
k get serviceaccounts
Using:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: neptune-10ab
name: neptune-10ab
namespace: neptune
spec:
replicas: 1
selector:
matchLabels:
app: neptune-10ab
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: neptune-10ab
spec:
serviceAccountName: neptune-sa-v2 # added to generated yaml
containers:
- image: httpd:2.4-alpine
name: httpd
resources: {}
status: {}
Defining:
apiVersion: v1
kind: ServiceAccount
metadata:
name: neptune-sa-v2
namespace: neptune
automountServiceAccountToken: false
# ...
Probes
readinessProbe
and livenessProbe
can be associated with a given container, to
wait for readiness and poll for health, respectively.
Key options: initialDelaySeconds
and periodSeconds
docs: Liveness, Readiness, Startup Probes
ReadinessProbe
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod6
name: pod6
spec:
containers:
- name: pod6
image: busybox:1.31.0
command:
- sh
- -c
- touch /tmp/ready && sleep 1d
resources: {}
readinessProbe:
exec:
command:
- sh
- -c
- cat /tmp/ready
initialDelaySeconds: 5
periodSeconds: 10
LivenessProbe
Liveness probes can be configured to execute a command, an HTTP / TCP / gRPC request:
# . . .
spec:
containers:
- name: pod6
image: busybox:1.31.0
command:
- sh
- -c
- touch /tmp/ready && sleep 1d
resources: {}
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
Deployments
k get deploy,pod
Rollouts
k rollout history deploy deployment-name
k rollout history deploy deployment-name --revision 2
k rollout undo deploy deployment-name
Secrets
k -n neptune get sa # get overview
k -n neptune get secrets # shows all secrets of namespace
k -n neptune get secrets -oyaml | grep annotations -A 1 # shows secrets with first annotation
k -n neptune describe secret neptune-secret-1 # shows decoded data
Generic
k -n moon create secret generic secret1 --from-literal user=test --from-literal pass=pwd
apiVersion: v1
data:
pass: cHdk
user: dGVzdA==
kind: Secret
metadata:
creationTimestamp: null
name: secret1
namespace: moon
Volume and ENV Secrets
apiVersion: v1
kind: Pod
metadata:
# . . .
spec:
volumes:
- name: secret2-volume
secret:
secretName: secret2 # volume secrets
containers:
- name: secret-handler
# . . .
volumeMounts:
- name: secret2-volume
mountPath: /tmp/secret2
env:
# . . .
- name: SECRET
valueFrom:
secretKeyRef:
name: secret1
user: user
Helm
List releases
helm -n mercury ls
helm -n mercury ls -a # include releases in a pending state
Uninstall a release
helm -n mercury uninstall internal-issue-report-apiv1
Upgrading a release
helm repo list
# NAME URL
# bitnami https://charts.bitnami.com/bitnami
helm repo update
# Hang tight while we grab the latest from your chart repositories...
# ...Successfully got an update from the "bitnami" chart repository
# Update Complete. ⎈Happy Helming!⎈
helm search repo nginx
# NAME CHART VERSION APP VERSION DESCRIPTION
# bitnami/nginx 9.5.2 1.21.1 Chart for the nginx server
helm -n mercury upgrade internal-issue-report-apiv2 bitnami/nginx
# Release "internal-issue-report-apiv2" has been upgraded. Happy Helming!
# NAME: internal-issue-report-apiv2
# LAST DEPLOYED: Tue Aug 31 17:40:42 2021
# NAMESPACE: mercury
# STATUS: deployed
# REVISION: 2
# TEST SUITE: None
# ...
Install a release with customized value setting:
helm -n mercury install internal-issue-report-apache bitnami/apache --set replicaCount=2
# NAME: internal-issue-report-apache
# LAST DEPLOYED: Tue Aug 31 17:57:23 2021
# NAMESPACE: mercury
# STATUS: deployed
# REVISION: 1
# TEST SUITE: None
# ...
helm -n mercury install internal-issue-report-apache bitnami/apache \
--set replicaCount=2 \
--set image.debug=true
Logging Sidecar
Refers to an associated container that shares with a primary container a volume
on which logs are stored. The primary container writes to the logs and the
sidecar reads and prints them to stdout to make them viewable via kubectl logs
.
apiVersion: apps/v1
kind: Deployment
# . . .
spec:
# . . .
template:
# . . .
spec:
# . . .
volumes:
- name: logs
emptyDir: {}
containers:
- name: cleaner-con
# . . .
# [ writes to /var/log/cleaner/cleaner.log ]
# . . .
volumeMounts:
- name: logs
mountPath: /var/log/cleaner
- name: logger-con
# . . .
command: ["sh", "-c", "tail -f /var/log/cleaner/cleaner.log"]
volumeMounts:
- name: logs
mountPath: /var/log/cleaner
Init Containers
An ephemeral container that does prep work for a primary one.
apiVersion: apps/v1
kind: Deployment
# . . .
spec:
# . . .
template:
# . . .
spec:
# . . .
volumes:
- name: logs
emptyDir: {}
initContainers:
- name: init-con
image: busybox:1.31.0
command: # . . .
volumeMounts:
- name: logs
mountPath: /tmp/web-content
containers:
# . . .
ClusterIP, NodePort
k -n pluto expose pod project-plt-6cc-api --name project-plt-6cc-svc --port 3333 --target-port 80
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
project: plt-6cc-api
name: project-plt-6cc-svc
namespace: pluto
spec:
ports:
- port: 3333
protocol: TCP
targetPort: 80
selector:
project: plt-6cc-api
NetworkPolicy
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: np1
namespace: venus
spec:
podSelector:
matchLabels:
id: frontend
policyTypes:
- Egress
egress:
- to: # 1st egress rule
- podSelector: # allow egress only to pods with api label
matchLabels:
id: api
- ports: # 2nd egress rule
- port: 53 # allow DNS UDP
protocol: UDP
- port: 53 # allow DNS TCP
protocol: TCP
Resource Management
Can define requests and limits of CPU and memory.
# . . .
containers:
- name: app
image: image-name:v3
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
Labels, Annotations
k -n sun get pod -l type=runner # only pods with label runner
k -n sun label pod -l type=worker protected=true # run for label worker
k -n sun label pod -l type=runner protected=true # run for label runner
k -n sun label pod -l "type in (worker,runner)" protected=true
k -n sun annotate pod -l protected=true protected="do not delete this pod"
k -n sun get pod --show-labels