Step by step on how to create kubernetes cluster on RHEL 7 server
Install preparation
Disable swap
# Edit /etc/fstab file , remove line has swap,
vi /etc/fstab
# /dev/mapper/rhel00-swap swap swap defaults 0 0
Update network configuration
# Update vi /etc/hosts file, add static IP to your hostname
vi /etc/hosts
# Example
192.168.0.100 rhel7-k8s
Install ip table (RHEL 8 only)
# If your server is RHEL 8, install below tool
sudo dnf install -y iproute-tc
# Create the .conf file to load the modules at bootup
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# Set up required sysctl params, these persist across reboots.
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sudo sysctl --system
Open required ports
# Open below ports for kubernetes (https://kubernetes.io/docs/reference/ports-and-protocols/)
sudo firewall-cmd --zone=public --add-service=etcd-client --permanent
sudo firewall-cmd --zone=public --add-service=etcd-server --permanent
# kubelet API
sudo firewall-cmd --zone=public --add-port=10250/tcp --permanent
# kube-scheduler
sudo firewall-cmd --zone=public --add-port=10259/tcp --permanent
# kube-controller-manager
sudo firewall-cmd --zone=public --add-port=10257/tcp --permanent
# NodePort Services
sudo firewall-cmd --zone=public --add-port=30000-32767/tcp --permanent
# apply changes
sudo firewall-cmd --reload
Install container runtime (CRI)
# Install CRI-O
export VERSION=1.24
export OS=CentOS_7 # RHEL8 -> CentOS_8
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo
yum install cri-o
Enable CRI-O service
systemctl daemon-reload
sudo systemctl enable --now crio
sudo systemctl start crio
Installing kubeadm, kubelet and kubectl
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
# Set SELinux in permissive mode (effectively disabling it)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
# Install required tools
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
# Enable kubelet now
sudo systemctl enable --now kubelet
Create kubernetes cluster with kubeadm
Option 1:Install pod network add-on (Calico)
sudo kubeadm init --pod-network-cidr=192.168.0.0/16
# Example output
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Apply calico configuration.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
# Remove the taints on the master
kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-
# Verify if your node is ready
kubectl get nodes -o wide
Option 2: Install pod network add-on (Flannel)
# Install flannel if you don't have
sudo yum install flannel
# Init network
sudo kubeadm init --pod-network-cidr=10.244.0.0/16
# Apply flannel yaml
kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml
# Remove taints from node
kubectl taint nodes --all node-role.kubernetes.io/control-plane- node-role.kubernetes.io/master-
# Verify if your node is ready
kubectl get nodes -o wide
# Output
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
rhel7-k8s Ready control-plane 2m48s v1.24.3 192.168.0.111 <none> Red Hat Enterprise Linux 3.10.0-1160.76.1.el7.x86_64 cri-o://1.24.2