Installation Kubernetes High-Availability with Kubeadm

Kubernetes?

Why Kubernetes High Availability?

vi /etc/hosts
...
10.61.61.10 k8s-LB
10.61.61.11 k8s-master1
10.61.61.12 k8s-master2
10.61.61.13 k8s-master3
10.61.61.14 k8s-worker1
10.61.61.15 k8s-worker2
...
sudo -i
ssh-keygen
cat /etc/hosts | grep master | awk {'print $2'} > target.txt
for node in $(cat target.txt); do ssh-copy-id root@$node; done
for node in $(cat target.txt); do ssh root@$node hostname; done''output
...
k8s-master1
k8s-master2
k8s-master3
...
sudo apt -y update; sudo apt -y upgrade; sudo apt -y update;
sudo apt install -y docker.io; sudo docker version
sudo systemctl enable docker
sudo systemctl start docker
sudo systemctl status docker
sudo apt install -y apt-transport-https; curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -cat <<EOF > kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo mv kubernetes.list /etc/apt/sources.list.d/kubernetes.list
sudo apt update; sudo apt install -y kubectl kubelet kubeadm
sudo swapon -s
sudo swapoff /dev/xxx
sudo swapon -s
sudo apt update; sudo apt upgrade -y; sudo apt install haproxy -y
sudo vim /etc/haproxy/haproxy.cfg
...
frontend kubernetes
bind 10.61.61.10:6443
option tcplog
mode tcp
default_backend kubernetes-master-nodes
backend kubernetes-master-nodes
mode tcp
balance roundrobin
option tcp-check
server k8s-master1 10.61.61.11:6443 check fall 3 rise 2
server k8s-master2 10.61.61.12:6443 check fall 3 rise 2
server k8s-master3 10.61.61.13:6443 check fall 3 rise 2
frontend https_frontend_kubernetes
bind 10.61.61.10:443
option tcplog
mode tcp
default_backend backend_k8s_nodes
backend backend_k8s_nodes
mode tcp
balance roundrobin
option tcp-check
server k8s-master1 10.61.61.11:6443 check fall 3 rise 2
server k8s-master2 10.61.61.12:6443 check fall 3 rise 2
server k8s-master3 10.61.61.13:6443 check fall 3 rise 2
...
haproxy -c -V -f /etc/haproxy/haproxy.cfg
''output
...
Configuration file is valid
...
sudo systemctl restart haproxy
nc -v 10.61.61.10 6443
''output
...
Connection to 10.61.61.10 6443 port [tcp/*] succeeded!
...
kubeadm init --pod-network-cidr=10.244.X.0/16 --control-plane-endpoint "IP_LOADBALANCER:6443" --upload-certs
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"kubectl get pod -n kube-system -w
''note: just wait until all pods already up.
kubeadm join IP_LOADBALANCER:6443 --token [TOKEN] \
--discovery-token-ca-cert-hash [TOKEN-ca-cert-hash] \
--control-plane --certificate-key [certificate-key]
kubectl get nodes
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes''note: It is executed so that you can run kubectl commands to display a list of nodes, or other kubectl commands on master2 & master3
git clone https://github.com/riowiraldhani/kubernetescd kubernetes/
ls
./createdeployments.sh
''output:
...
''list deployment
...
- ./exposedeployments.sh
''output:
...
''list service with nodePort
...
- verification
curl localhost:30606
...
Hello, world!
Version: 2.0.0
Hostname: helloapp-2-5ccf4846b5-pnwss
...
kubectl apply -f helloapp-ingress.yamlkubectl get ing --all-namespaces
kubectl apply -f ingress-controller.yaml
kubectl get svc -n ingress-nginx
kubectl edit svc -n ingress-nginx ingress-nginx-controller
...
spec:
clusterIP: 10.101.62.144
externalIPs:
- IP_LOADBALANCER
...
''note: add externalIPs
kubectl get ingress helloapp-ingress
''note: now ingress helloapp-ingress have Address
kubectl get ingress --all-namespaces
kubectl get svc -n ingress-nginx
''note: now, you have service with hostname. But, you need mapping the hostname with you'r ip master. example;
...
10.61.61.11 k8s-master1 helloword-v1.info
...
curl helloword-v1.info:[NodePort]
''output
...
Hello, world!
Version: 1.0.0
Hostname: helloapp-1-759f7597c5-8sf2r
...
''note: if you want expose or access this service with IP node load balancer you need add config on haproxy.''on node k8s-LBsudo vi /etc/haproxy/haproxy.cfg
...
frontend nginx
bind 10.61.61.10:5000
mode http
default_backend backend_nginx
backend backend_nginx
balance roundrobin
server k8s1 10.61.61.11:30606
server k8s2 10.61.61.12:30606
server k8s3 10.61.61.13:30606
frontend nginx-1
bind 10.61.61.10:5001
mode http
default_backend backend_nginx-1
backend backend_nginx-1
balance roundrobin
server k8s1 10.61.61.11:30734
server k8s2 10.61.61.12:30734
server k8s3 10.61.61.13:30734
...
haproxy -c -V -f /etc/haproxy/haproxy.cfg
sudo systemctl restart haproxy
''note: port 30606 & 30734 is port NodePort, you can get this with command kubectl get svc and look at column PORT(S)
''on node k8s-LB and other nodecurl 10.61.61.10:5000
...
Hello, world!
Version: 1.0.0
Hostname: helloapp-1-759f7597c5-8sf2r
...
curl 10.61.61.10:5001
...
<html><body><h1>It works!</h1></body></html>
...

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store