single node kubernetes cluster using vagrant.

main
Erik Brakkee 4 weeks ago
parent 45880cbfa5
commit 1f86f1a3e6

@ -0,0 +1,44 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
#NUM_MASTER_NODE = ENV['MASTERS'].to_i
#NUM_WORKER_NODE = ENV['WORKERS'].to_i
NUM_MASTER_NODE = 1
NUM_WORKER_NODE = 0
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
#config.vm.box = "generic/ubuntu2004"
config.vm.box = "generic/ubuntu2004" # "generic/debian12"
config.vm.provider :libvirt do |libvirt|
libvirt.default_prefix = ""
libvirt.memory = 3096
libvirt.cpus = 2
end
config.vm.provider "virtualbox" do |vbox|
vbox.memory = 3096
vbox.cpus = 2
end
(1..NUM_MASTER_NODE).each do |i|
config.vm.define "master#{i}" do |node|
node.vm.hostname = "master#{i}"
node.vm.provision "shell", path: "disable-swap.sh"
node.vm.provision "shell", path: "setup-k8s-node.sh"
node.vm.provision "shell", path: "setup-master.sh"
#node.vm.network "private_network", ip: IP_NW + "#{1+ i}"
end
end
(1..NUM_WORKER_NODE).each do |i|
config.vm.define "worker#{i}" do |node|
node.vm.hostname = "worker#{i}"
node.vm.provision "shell", path: "disable-swap.sh"
end
end
end

@ -0,0 +1,4 @@
# kubelet requires swap off
swapoff -a
# keep swap off after reboot
sudo sed -i '/\bswap\b/ s/^\(.*\)$/#\1/g' /etc/fstab

@ -0,0 +1,87 @@
#!/bin/bash
apt update
apt install containerd -y
systemctl stop containerd
containerd config default | sed 's/SystemdCgroup.*=.*false/SystemdCgroup = true/g' > /etc/containerd/config.toml
systemctl start containerd
systemctl enable containerd
mkdir -p /etc/containerd
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
apt-get install -y apt-transport-https ca-certificates curl gpg
mkdir -p /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
VERSION=1.29.2-1.1
sudo apt-get install -y kubelet=$VERSION kubeadm=$VERSION kubectl=$VERSION
sudo apt-mark hold kubelet kubeadm kubectl
apt-get install apt-file apparmor-utils auditd etcd-client jq strace -y
# kube-bench
curl -L https://github.com/aquasecurity/kube-bench/releases/download/v0.7.2/kube-bench_0.7.2_linux_amd64.deb -o kube-bench_0.7.2_linux_amd64.deb
dpkg -i kube-bench_0.7.2_linux_amd64.deb
# trivy
sudo apt-get install wget apt-transport-https gnupg lsb-release
wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | gpg --dearmor | sudo tee /usr/share/keyrings/trivy.gpg > /dev/null
echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list
sudo apt-get update
sudo apt-get install trivy
# falco
curl -fsSL https://falco.org/repo/falcosecurity-packages.asc | \
sudo gpg --dearmor -o /usr/share/keyrings/falco-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/falco-archive-keyring.gpg] https://download.falco.org/packages/deb stable main" | \
tee -a /etc/apt/sources.list.d/falcosecurity.list
apt-get update -y
# non-eBPF
#apt install -y dkms make linux-headers-$(uname -r)
# If you use falcoctl driver loader to build the eBPF probe locally you need also clang toolchain
#apt install -y clang llvm
# You can install also the dialog package if you want it
#apt install -y dialog
apt-get install -y falco
mkdir -p /usr/share/falco/plugins
curl -Lo /tmp/falcorules.tar.gz https://download.falco.org/rules/falco-incubating-rules-3.0.1.tar.gz
(
cd /etc/falco/rules.d
tar xvfz /tmp/falcorules.tar.gz
)
if grep ubuntu /etc/os-release
then
falcoctl driver config --type ebpf
falcoctl driver install
systemctl start falco-bpf
systemctl enable falco-bpf
else
falcoctl driver config --type modern_ebpf
#falcoctl artifact install k8saudit-rules
systemctl enable falco-modern-bpf
systemctl restart falco-modern-bpf
fi

@ -0,0 +1,69 @@
#!/bin/bash
VERSION=1.29.2
kubeadm config images pull --kubernetes-version $VERSION
kubeadm init --kubernetes-version $VERSION
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
echo "Waiting until cluster available..."
while ! kubectl get nodes >& /dev/null
do
echo .
sleep 1
done
#kubectl taint node master1 node-role.kubernetes.io/control-plane-
echo '
. /etc/bash_completion
. <( kubectl completion bash )
. <( kubectl completion bash | sed 's/kubectl/k/g' )
alias k=kubectl
' >> ~/.bashrc
echo "Waiting for kube-proxy..."
while ! kubectl get pods -n kube-system | grep kube-proxy | grep Running >& /dev/null
do
echo .
sleep 1
done
echo "Installing network plugin..."
while ! kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml >& /dev/null
do
echo .
sleep 1
done
echo "Waiting until coredns is running..."
while ! kubectl get pods -n kube-system | grep coredns | grep Running >& /dev/null
do
echo .
sleep 1
done
kubectl taint node "$HOSTNAME" node-role.kubernetes.io/control-plane-
echo "Installing OPA gatekeeper"
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.15.0/deploy/gatekeeper.yaml
kubectl scale -n gatekeeper-system deployment gatekeeper-controller-manager --replicas 1
echo "Done setting up master"
exit 0
curl https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/calico.yaml -O
while ! kubectl apply -f calico.yaml
do
sleep 1
done

@ -0,0 +1,11 @@
sudo dnf group install -y "virtualization hypervisor"
sudo dnf group install -y "virtualization tools"
sudo systemctl enable --now libvirtd
sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo
sudo dnf install -y vagrant
sudo dnf config-manager --set-enabled crb
sudo dnf install -y libvirt-devel
vagrant plugin install vagrant-libvirt
Loading…
Cancel
Save