Getting started with Kubernetes

Frankie Fan
6 min readOct 16, 2020

--

Kubernetes (K8s) is an open-source system for automating deployment, scaling, and management of containerized applications.

It groups containers that make up an application into logical units for easy management and discovery. Kubernetes builds upon 15 years of experience of running production workloads at Google, combined with best-of-breed ideas and practices from the community.

Install on linux cluster

Compile kubernetes

git clone https://github.com/kubernetes/kubernetes.git
cd kubernetes
make release

Install master node

#Add repos
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

#Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

#Install
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable --now kubelet

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

#Install network plugin
sudo kubeadm init --pod-network-cidr=192.168.0.0/16
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yamlrm
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

#Copy kubernetes config to user folder
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Install slave node

#Run on master node to get the join commands
sudo kubeadm token create --print-join-command

#Copy the join commands and run on slave node
kubeadm join <master-ip>:6443 --token 2kjo2d.8qno0vzvbgabp1e8 --discovery-token-ca-cert-hash sha256:2f1ebea7d7369a2d18b58f2926573e193e13ef7525649df1740ddb87963e1315

Install metrics server

git clone https://github.com/kodekloudhub/kubernetes-metrics-server.git
kubectl create -f kubernetes-metrics-server/
kubectl top nodes
kubectl top pods

Install prometheus

kubectl apply --filename https://raw.githubusercontent.com/giantswarm/kubernetes-prometheus/master/manifests-all.yaml
#Maybe you should download the manifests-all.yaml and customize it yourself. For example, you would add Grafana configuration in it.
#Create Traefik ingress yaml and apply

Install helm

#Download helm tar.gz file from https://github.com/helm/helm/releases
tar -zxvf helm-v2.14.1-linux-amd64.tar.gz
sudo cp linux-amd64/helm /usr/local/bin
helm init
helm version
kubectl get pod -n kube-system -l app=helm
kubectl create -f rbac.yml
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'

Traefik

Create Traefik toml

defaultEntryPoints = ["http","https"]
# Enable inner-cluster https navigation
insecureSkipVerify = true
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]
[[entryPoints.https.tls.certificates]]
certFile = "/etc/kubernetes/ssl/cert.pem"
keyFile = "/etc/kubernetes/ssl/privkey.pem"
[metrics]
[metrics.prometheus]
entryPoint = "traefik"
buckets = [0.1, 0.3, 1.2, 5.0]

Create Traefik yaml

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
replicas: 1
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
tolerations:
- operator: "Exists"
nodeSelector:
node: master
volumes:
- name: ssl
secret:
secretName: traefik-cert
- name: config
configMap:
name: traefik-conf
containers:
- image: traefik
name: traefik-ingress-lb
volumeMounts:
- mountPath: "/etc/kubernetes/ssl"
name: "ssl"
- mountPath: "/home/ec2-user/kube/traefik"
name: "config"
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
args:
- --api
- --kubernetes
- --logLevel=INFO
- --configfile=/home/ec2-user/kube/traefik/traefik.toml
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 443
name: https
- protocol: TCP
port: 8080
name: admin
type: NodePort

Create Traefik rbac

apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

Create Traefik web-ui ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: [domain]
http:
paths:
- backend:
serviceName: traefik-ingress-service
servicePort: 8080

Create Traefik HTTPS cert

sudo -s
cd /etc/letsencrypt/live/[domain]
#Update https cert keys and go to domain management console to modify the txt record
kubectl create secret generic traefik-cert --from-file=privkey.pem --from-file=cert.pem -n kube-system
cp cert.pem privkey.pem /etc/kubernetes/ssl/
#exit and go to the traefik k8s scripts folder
kubectl create configmap traefik-conf --from-file=traefik.toml -n kube-system
kubectl get cm -n kube-system

Renew Traefik HTTPS cert

kubectl delete secret traefik-cert -n kube-system
kubectl delete configmap traefik-conf -n kube-system
kubectl delete -f traefik.yml
#Re-run the above "Create Traefik HTTPS cert" steps
kubectl create -f traefik.yml

Kubernetes Dashboard for local

Create service account

#admin-user.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system

Create role binding

#admin-user-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

Apply yaml

#Apply service account
kubectl create -f admin-user.yaml

#Apply role binding
kubectl create -f admin-user-role-binding.yaml

Retrieve login token

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

Install dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml

#Create proxy to allow only localhost navigating
kubectl proxy
kubectl proxy --address='0.0.0.0' --accept-hosts='^*$'

Kubernetes Dashboard for public

Create certificates

mkdir $HOME/certs
cd $HOME/certs
openssl genrsa -out dashboard.key 2048
openssl rsa -in dashboard.key -out dashboard.key
openssl req -sha256 -new -key dashboard.key -out dashboard.csr -subj '/CN=localhost'
openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt
kubectl -n kube-system create secret generic kubernetes-dashboard-certs --from-file=$HOME/certs

Install dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

#Wait and check if the replica set is fulfilled
kubectl -n kube-system get rs

Create PSP

kubectl -n kube-system create -f - <<EOF
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: dashboard
spec:
privileged: false
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
volumes:
- '*'
EOF

Create role

kubectl -n kube-system create role psp:dashboard --verb=use --resource=podsecuritypolicy --resource-name=dashboard

Bind role and account

kubectl -n kube-system create rolebinding kubernetes-dashboard-policy --role=psp:dashboard --serviceaccount=kube-system:kubernetes-dashboard
kubectl --as=system:serviceaccount:kube-system:kubernetes-dashboard -n kube-system auth can-i use podsecuritypolicy/dashboard

Expose service on NodePort

#Edit the kubernetes-dashboard service and change the following options:
# spec.type from ClusterIP to NodePort
* spec.ports[0].nodePort from 32641 to whatever port you want it to be exposed on
kubectl -n kube-system edit service kubernetes-dashboard
kubectl -n kube-system get services

Navigate the dashboard publicly

#Configure the dns in the domain provider console
https://[domain]:30104/

Expose Traefik ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: dashboard-ingress
namespace: kube-system
spec:
rules:
- host: [domain]
http:
paths:
- path: /
backend:
serviceName: kubernetes-dashboard
servicePort: 443

Scenarios

Run a busybox container

kubectl run -i --tty --image busybox test --restart=Never --rm /bin/sh

User docker hub secret in pods

#docker login and get the file: ~/.docker/config.json
docker login
kubectl create secret generic docker-hub --from-file=.dockerconfigjson=~/.docker/config.json --type=kubernetes.io/dockerconfigjson --namespace=test

#Create a Pod that uses the Secret like bellow
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: test-server
namespace: test
spec:
replicas: 2
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: demo/test-server
imagePullSecrets:
- name: docker-hub

Default storage class

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Retain
mountOptions:
- debug
volumeBindingMode: Immediate

Create MongoDB with two DB auth settings

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: dev-mongodb
namespace: test
spec:
replicas: 1
template:
metadata:
labels:
app: dev-mongodb
spec:
hostname: mongodb
containers:
- name: dev-mongodb
image: hustakin/mongo-auth2:latest
env:
- name: MONGODB_ADMIN_USER
value: admin
- name: MONGODB_ADMIN_PASS
value: 111
- name: MONGODB_APP1_DATABASE
value: db1
- name: MONGODB_APP1_USER
value: user1
- name: MONGODB_APP1_PASS
value: pass1
- name: MONGODB_APP2_DATABASE
value: db2
- name: MONGODB_APP2_USER
value: user2
- name: MONGODB_APP2_PASS
value: pass2
ports:
- containerPort: 27017
protocol: TCP
name: db
volumeMounts:
- mountPath: /data/db
name: dev-mongo-persistent-storage
livenessProbe:
exec:
command:
- mongo
- --eval
- "db.adminCommand('ping')"
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
exec:
command:
- mongo
- --eval
- "db.adminCommand('ping')"
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
imagePullSecrets:
- name: docker-hub
volumes:
- name: dev-mongo-persistent-storage
persistentVolumeClaim:
claimName: dev-local-data-pvc
---
kind: Service
apiVersion: v1
metadata:
labels:
app: mongodb
name: mongodb
namespace: test
annotations:
traefik.ingress.kubernetes.io/affinity: "true"
traefik.ingress.kubernetes.io/session-cookie-name: "sticky"
spec:
type: ClusterIP
ports:
- protocol: TCP
port: 27017
name: db
selector:
app: dev-mongodb
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: dev-local-data-pv
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: node
operator: In
values:
- db
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: dev-local-data-pvc
namespace: test
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 50Gi

Useful commands

--

--

Frankie Fan
Frankie Fan

Written by Frankie Fan

Researcher | Architect | Full-Stack | @hustakin

No responses yet