logo

Maycke's IT Guides

	  
#################################### KUBERNETES ###########################################
~Kubernetes is an open-source system to automate instance creation, container's application 
~ scalability and management.

~script in which part of this guide was based on:
https://www.itsgeekhead.com/tuts/kubernetes-129-ubuntu-22-04-3/ 


####### basic kubernetes configuration:~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

# IMPORTANT: the Pods' network IPs can not overlap with any other network IP range 
#  on your device!

### server preparation:~~~~~~~~~~~~~~~~~~~~~
## if you don't have a DNS server configured to support the nodes, for example in a homelab, 
##  use the command below to make it easier to add the server/node name to the file
##  /etc/hosts:
printf "\n10.5.5.5 server_name\n\n" >> /etc/hosts
#in this example, my cluster will have only one node (control-plane), my server named 
# 'server_name' with the IP 10.5.5.5

## add these kernel modules (because without them, containerd runtime wouldn't be able to
##  create and manage containers in the system):
printf "overlay\nbr_netfilter\n" >> /etc/modules-load.d/conainerd.conf

## add those modules to kernel-live (another option would be to reboot the server now):
modprobe overlay
modprobe br_netfilter

## add this configuration file (you may choose another file name, if you wish) with this 
##  content to the directory /etc/sysctl.d to allow network traffic in the kubernetes 
##  network, so that packets can be sent to the Pods:
printf "net.bridge.bridge-nf-call-iptables = 1\nnet.ipv4.ip_forward = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\n" >> /etc/sysctl.d/99-kubernetes-cri.conf

## apply the previous options to the system (rebooting it would also work):
sysctl --system

### containerd config:~~~~~~~~~~~~~~~~~~~~~
## download the last containerd version and save it on the /tmp directory:
wget https://github.com/containerd/containerd/releases/download/v1.7.16/containerd-1.7.16-linux-amd64.tar.gz -P /tmp/
## extract it to the /usr/local directory:
tar Cxzvf /usr/local /tmp/containerd-1.7.16-linux-amd64.tar.gz

## download the file that allows containerd to be used as a service to the 
##  directory /etc/systemd/system:
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -P /etc/systemd/system/
## enable the containerd service:
systemctl daemon-reload
systemctl enable --now containerd

## download and install the container runtime for containerd:
wget https://github.com/opencontainers/runc/releases/download/v1.1.12/runc.amd64 -P /tmp/
install -m 755 /tmp/runc.amd64 /usr/local/sbin/runc

## download the CNI plugins and extract them to /opt/cni/bin:
wget https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-amd64-v1.4.1.tgz -P /tmp/
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin /tmp/cni-plugins-linux-amd64-v1.4.1.tgz

## generate the configuration file for containerd:
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml

## switch the containerd configuration to be compatible with the 'SystemdCgroup' used 
##  by kubernetes:
nano /etc/containerd/config.toml             #set 'SystemdCgroup' to 'true'

## restart containerd:
systemctl restart containerd

### install kubernetes:~~~~~~~~~~~~~~~~~~~~~
## remover swap:
swapoff -a
nano /etc/fstab                             #comment the line with 'swap'

## preparing dependencies:
apt update
apt install -y apt-transport-https ca-certificates curl gpg

## download the public key from kubernetes repository:
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg

## add kubernetes to the repository:
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
chmod 644 /etc/apt/sources.list.d/kubernetes.list

## update the repositories and install kubernetes:
apt update
apt install -y kubelet kubeadm kubectl 

## prevent automatic updates, to ensure compatibility among them:
apt-mark hold kubelet kubeadm kubectl

## double-check the swap configuration and be sure the swap value is 0:
free -m

# confirm the version installed:
kubectl version --client
# respose I got from that command:
#Client Version: v1.30.0
#Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3


#----------------- START of the config ONLY for CONTROL-PLANE---------------#
#### use the NEXT COMMANDS ONLY on the control node / master node / CONTROL-PLANE 
#### of your cluster!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

### start the cluster, the IP is for the Pods network and CANNOT overlap with any
###  other network on your device (the internal network used in this guide was
###  10.5.5.0/24):
kubeadm init --pod-network-cidr 10.10.0.0/16 --kubernetes-version 1.30.0 --node-name server_name

### the result of the command "kubeadm init…" will be the command "kubeadm join…" 
###  to be inserted on the other servers, members of the cluster, the worker-nodes 
###  (if there are any...)
### also in that result is the next command to be run here, which is necessary before
###  using the new cluster. If you are logged in as root, this is the command:
export KUBECONFIG=/etc/kubernetes/admin.conf

# now, if you run the command below, the nodes of this cluster will be listed, but 
#  you can see they show the status "not ready". That is because we still need to 
#  implement the solution for container networking in the cluster:
kubectl get nodes

### create an instance with the solution for container networking. The chosen solution in
###  this guide is Calico, it can be implemented in 2 parts:
## create node for the first part:
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml
## download node for the second part:
wget https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/custom-resources.yaml
## edit the downloaded file:
nano custom-resources.yaml      #switch the IP to match the one used on the 'kubeadm init' command
## create node for the second part:
kubectl create -f custom-resources.yaml

### monitor the nodes untill all of them are 'running':
watch kubectl get pods --all-namespaces
# (optional) or check the the static view of the pods:
kubectl get pods --all-namespaces

### by standard, the cluster won't allow Pods to be deployed on control-plane for 
###  safety reasons, therefore you need to remove the taints on control-plane
###  to be able to start pods in it (in case you intend to do it...):
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl taint nodes --all node-role.kubernetes.io/master-
#since in this example we'll only have this node, the command above is indispensable!

### confirm that you have a node in the cluster (and the node's status is now 'Ready'):
kubectl get nodes -o wide

# (optional) obtain again the command 'kubectl join…' to use on other nodes:
kubeadm token create --print-join-command
#use the command 'kubectl join…' ONLY on other nodes!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

# (optional) after using the command 'kubectl join...' on other nodes, they will
#  show up on the list obtained by the command 'kubectl get nodes'. To rename them
#  as 'worker nodes', use:
kubectl label NODE_NAME node-role.kubernetes.io/worker=worker
#----------------- END of the config ONLY for CONTROL-PLANE---------------#


# IMPORTANT: for a homelab, having only 1 control-plane node is ok, but for a real
#  production environment, it would be interesting to have more of them, based on 
#  the size of your cluster, obviously always keeping more worker nodes than 
#  control-plane nodes, in a way that if you lose a control-plane node (if it fails),
#  you'll have other control-nodes online and won't lose your configurations!
#########################################################################################

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EXTRAS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


##### DEPLOYMENT TEST~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# follow the instructions in that link to make a deployment test and see 
#  your kubernetes working!:
https://kubernetes.io/docs/concepts/workloads/controllers/deployment/


##### How to make KUBERNETES work after REBOOT in a 1-NODE-CLUSTER~~~~~~~~~~~~~~~~~~~~~~~~
## OBS: the data edited/saved in a Pod will be lost, create a persistent volume if you wish
##  to keep updates after reboots (that will not be covered in here)!!!

### after rebooting the server/control-plane node in a 1-node-cluster, to make kubernetes 
###  work again, follow these steps:
# (optional) check kubelet status and confirm it is active:
systemctl status kubelet

# (optional) double-check the swap configuration and be sure the swap value is 0:
free -m

## repeat the command done AFTER the "kubeadm init..." for the user you chose, if it was root:
export KUBECONFIG=/etc/kubernetes/admin.conf

# (optional) check containerd status and confirm it is active:
systemctl status containerd

## confirm the node is 'Ready':
kubectl get nodes --output=wide

# (optional) investigate errors, if you got any during the steps above:
journalctl -u kubelet.service
journalctl
#########################################################################################
      
	

~~~~~~~~~~Kubernetes Master Node Script:~~~~~~~~~~

ATTENTION: Always read a script before you run it!!!


To run a basic Kubernetes Master Node configuration script, run the following command line as ROOT in your server's terminal:

     wget -nc https://www.maycke.com.br/guides/raw/kubernetes.sh && chmod 700 kubernetes.sh && ./kubernetes.sh && rm kubernetes.sh && export KUBECONFIG=/etc/kubernetes/admin.conf
#########################################################################################