Installation

INSTALLATION

Here will be presented an procedure for installation of k8s with one master and 2 nodes.Ubuntu 20.04.1 LTS was used for all boxes.

Set hostnames on control plane and nodes

Set hostname on control plane node

hostnamectl set-hostname k8s-control

Set hostname on worker1

hostnamectl set-hostname k8s-worker1

Set hostname on worker2

hostnamectl set-hostname k8s-worker2

Configure /etc/hosts on all nodes

On each node configure same configuration in format: {PRIVATE_IP} {HOSTNAME}

<control plane node private IP> k8s-control
<worker node 1 private IP> k8s-worker1
<worker node 2 private IP> k8s-worker2

Configure containerd

On all nodes, set up containerd. You will need to load some kernel modules and modify some system settings as part of this process.

cat << EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sudo sysctl --system

Install containerd

Install and configure containerd

sudo apt-get update && sudo apt-get install -y containerd
sudo mkdir -p /etc/containerd
sudo containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's|SystemdCgroup = true|SystemdCgroup = false|g' /etc/containerd/config.toml
sudo systemctl restart containerd

Disable swap

On all nodes, disable swap.

sudo swapoff -a

Install needed packages on master and nodes

sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat << EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet=1.26.0-00 kubeadm=1.26.0-00 kubectl=1.26.0-00
sudo apt-mark hold kubelet kubeadm kubectl

Initialize cluster

On the control plane node only, initialize the cluster and set up kubectl access

sudo kubeadm init --pod-network-cidr 192.168.0.0/16 --kubernetes-version 1.26.0
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

verify installation

k version
k get nodes

Install network add-on

Install the Calico network add-on (CIDR 192.168.0.0/16)

k create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/tigera-operator.yaml
k create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/custom-resources.yaml
k taint nodes --all node-role.kubernetes.io/control-plane-
k taint nodes --all node-role.kubernetes.io/master-

Install Weave network add-on (CIDR 10.32.0.0/12)

k apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml

Install flannel network add-on (CIDR 10.244.0.0/16)

kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml

Add nodes to the cluster as root user

Get the join command (this command is also printed during kubeadm init.Feel free to simply copy it from there).

kubeadm token create --print-join-command

Copy the join command from the control plane node. Run it on each worker node as root (i.e. with sudo ).

sudo kubeadm join ...

Install HA k8s cluster(2M+1W)

In this case we will need a LoadBalancer in front of 2 master nodes.

kubeadm init --control-plane-endpoint "${LB_IP}:${LB_PORT}" --upload-certs --pod-network-cidr 192.168.0.0/16 --kubernetes-version 1.25.0

Install and configure haproxy

Install haproxy on LB/Worker

apt-get install -y haproxy
cat <<EOF >> /etc/haproxy/haproxy.cfg
frontend kubernetes
    bind ${LOADBALANCER}:6443
    option tcplog
    mode tcp
    default_backend kubernetes-master-nodes

backend kubernetes-master-nodes
    mode tcp
    balance roundrobin
    option tcp-check
    server master-1 ${MASTER_1}:6443 check fall 3 rise 2
    server master-2 ${MASTER_2}:6443 check fall 3 rise 2
EOF
systemctl enable --now haproxy

Add control-plane boxes as root user

Get the join command (this command is also printed during kubeadm init.Feel free to simply copy it from there).

export TOKEN=$(kubeadm token create --print-join-command)

append to this command output from followiwing command

export CERT=$(kubeadm certs certificate-key)

final command will look like

echo "$(kubeadm token create --print-join-command) --control-plane --certificate-key $(kubeadm certs certificate-key)"

Enable encryption at rest

Generate an encryption key:

ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)

Create the encryption-config.yaml encryption config file:

mkdir -p /etc/kubernetes/enc
cat > /etc/kubernetes/enc/encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF

Configure kube-apiserver to use as:

spec:
  containers:
  - command:
    - kube-apiserver
    ...
    - --encryption-provider-config=/etc/kubernetes/enc/enc.yaml  # <-- add this line
    volumeMounts:
    ...
    - name: enc                           # <-- add this line
      mountPath: /etc/kubernetes/enc      # <-- add this line
      readonly: true                      # <-- add this line
    ...
  volumes:
  ...
  - name: enc                             # <-- add this line
    hostPath:                             # <-- add this line
      path: /etc/kubernetes/enc           # <-- add this line
      type: DirectoryOrCreate             # <-- add this line

and restart kube-apiserver

Install metallb lb

Install

k apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml

Define IPAddressPool and L2Advertisement for the LB

cat <<EOF | k apply -f -
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: example
  namespace: metallb-system
spec:
  addresses:
  - 192.168.10.0/24
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: empty
  namespace: metallb-system
EOF

Verify installation

# get all pods
> k get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS        AGE
calico-kube-controllers-6b77fff45-l52v8   1/1     Running   0               26m
calico-node-g65g9                         1/1     Running   2 (3h41m ago)   4d2h
calico-node-tp2k9                         1/1     Running   2 (3h41m ago)   4d2h
calico-node-wrxx8                         1/1     Running   2 (3h41m ago)   4d2h
coredns-64897985d-2q8sn                   1/1     Running   0               26m
coredns-64897985d-vnj6z                   1/1     Running   0               42m
etcd-k8s-control                          1/1     Running   0               45m
kube-apiserver-k8s-control                1/1     Running   0               45m
kube-controller-manager-k8s-control       1/1     Running   0               45m
kube-proxy-4c8vq                          1/1     Running   0               46m
kube-proxy-kvpz4                          1/1     Running   0               46m
kube-proxy-x86lh                          1/1     Running   0               46m
kube-scheduler-k8s-control                1/1     Running   0               45m
metrics-server-574849569f-kcfpw           1/1     Running   0               22m
metrics-server-847dcc659d-d8bn5           0/1     Running   0               22m

# get all nodes
> k get nodes
NAME          STATUS   ROLES           AGE    VERSION
k8s-control   Ready    control-plane   4d2h   v1.24.0
k8s-worker1   Ready    <none>          4d2h   v1.24.0
k8s-worker2   Ready    <none>          4d2h   v1.24.0


# check secret encryption
> k create secret generic secret1 -n default --from-literal=mykey=mydata
sudo ETCDCTL_API=3 etcdctl \
   --cacert=/etc/kubernetes/pki/etcd/ca.crt   \
   --cert=/etc/kubernetes/pki/etcd/server.crt \
   --key=/etc/kubernetes/pki/etcd/server.key  \
   get /registry/secrets/default/secret1 | hd