diff --git a/single-node-kubernetes/Vagrantfile b/single-node-kubernetes/Vagrantfile new file mode 100644 index 0000000..da74103 --- /dev/null +++ b/single-node-kubernetes/Vagrantfile @@ -0,0 +1,44 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +#NUM_MASTER_NODE = ENV['MASTERS'].to_i +#NUM_WORKER_NODE = ENV['WORKERS'].to_i + +NUM_MASTER_NODE = 1 +NUM_WORKER_NODE = 0 + +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + #config.vm.box = "generic/ubuntu2004" + config.vm.box = "generic/ubuntu2004" # "generic/debian12" + config.vm.provider :libvirt do |libvirt| + libvirt.default_prefix = "" + libvirt.memory = 3096 + libvirt.cpus = 2 + end + config.vm.provider "virtualbox" do |vbox| + vbox.memory = 3096 + vbox.cpus = 2 + end + + (1..NUM_MASTER_NODE).each do |i| + config.vm.define "master#{i}" do |node| + node.vm.hostname = "master#{i}" + node.vm.provision "shell", path: "disable-swap.sh" + node.vm.provision "shell", path: "setup-k8s-node.sh" + node.vm.provision "shell", path: "setup-master.sh" + #node.vm.network "private_network", ip: IP_NW + "#{1+ i}" + end + end + (1..NUM_WORKER_NODE).each do |i| + config.vm.define "worker#{i}" do |node| + node.vm.hostname = "worker#{i}" + node.vm.provision "shell", path: "disable-swap.sh" + end + end +end diff --git a/single-node-kubernetes/disable-swap.sh b/single-node-kubernetes/disable-swap.sh new file mode 100644 index 0000000..7fea093 --- /dev/null +++ b/single-node-kubernetes/disable-swap.sh @@ -0,0 +1,4 @@ +# kubelet requires swap off +swapoff -a +# keep swap off after reboot +sudo sed -i '/\bswap\b/ s/^\(.*\)$/#\1/g' /etc/fstab diff --git a/single-node-kubernetes/setup-k8s-node.sh b/single-node-kubernetes/setup-k8s-node.sh new file mode 100755 index 0000000..bd4d491 --- /dev/null +++ b/single-node-kubernetes/setup-k8s-node.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +apt update +apt install containerd -y +systemctl stop containerd +containerd config default | sed 's/SystemdCgroup.*=.*false/SystemdCgroup = true/g' > /etc/containerd/config.toml +systemctl start containerd +systemctl enable containerd +mkdir -p /etc/containerd + + +cat < /dev/null +echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list +sudo apt-get update +sudo apt-get install trivy + +# falco +curl -fsSL https://falco.org/repo/falcosecurity-packages.asc | \ + sudo gpg --dearmor -o /usr/share/keyrings/falco-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/falco-archive-keyring.gpg] https://download.falco.org/packages/deb stable main" | \ +tee -a /etc/apt/sources.list.d/falcosecurity.list +apt-get update -y + +# non-eBPF +#apt install -y dkms make linux-headers-$(uname -r) +# If you use falcoctl driver loader to build the eBPF probe locally you need also clang toolchain +#apt install -y clang llvm +# You can install also the dialog package if you want it +#apt install -y dialog +apt-get install -y falco + +mkdir -p /usr/share/falco/plugins + +curl -Lo /tmp/falcorules.tar.gz https://download.falco.org/rules/falco-incubating-rules-3.0.1.tar.gz +( + cd /etc/falco/rules.d + tar xvfz /tmp/falcorules.tar.gz +) + +if grep ubuntu /etc/os-release +then + falcoctl driver config --type ebpf + falcoctl driver install + systemctl start falco-bpf + systemctl enable falco-bpf +else + falcoctl driver config --type modern_ebpf + #falcoctl artifact install k8saudit-rules + systemctl enable falco-modern-bpf + systemctl restart falco-modern-bpf +fi + diff --git a/single-node-kubernetes/setup-master.sh b/single-node-kubernetes/setup-master.sh new file mode 100755 index 0000000..4ee3942 --- /dev/null +++ b/single-node-kubernetes/setup-master.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +VERSION=1.29.2 +kubeadm config images pull --kubernetes-version $VERSION +kubeadm init --kubernetes-version $VERSION +mkdir -p $HOME/.kube +cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + +echo "Waiting until cluster available..." +while ! kubectl get nodes >& /dev/null +do + echo . + sleep 1 +done + + + +#kubectl taint node master1 node-role.kubernetes.io/control-plane- + +echo ' +. /etc/bash_completion +. <( kubectl completion bash ) +. <( kubectl completion bash | sed 's/kubectl/k/g' ) +alias k=kubectl +' >> ~/.bashrc + +echo "Waiting for kube-proxy..." +while ! kubectl get pods -n kube-system | grep kube-proxy | grep Running >& /dev/null +do + echo . + sleep 1 +done + + +echo "Installing network plugin..." +while ! kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml >& /dev/null +do + echo . + sleep 1 +done + +echo "Waiting until coredns is running..." +while ! kubectl get pods -n kube-system | grep coredns | grep Running >& /dev/null +do + echo . + sleep 1 +done + +kubectl taint node "$HOSTNAME" node-role.kubernetes.io/control-plane- + +echo "Installing OPA gatekeeper" +kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.15.0/deploy/gatekeeper.yaml +kubectl scale -n gatekeeper-system deployment gatekeeper-controller-manager --replicas 1 + +echo "Done setting up master" + +exit 0 + +curl https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/calico.yaml -O +while ! kubectl apply -f calico.yaml +do + sleep 1 +done + + + + + + diff --git a/single-node-kubernetes/setup.md b/single-node-kubernetes/setup.md new file mode 100644 index 0000000..5168e91 --- /dev/null +++ b/single-node-kubernetes/setup.md @@ -0,0 +1,11 @@ +sudo dnf group install -y "virtualization hypervisor" +sudo dnf group install -y "virtualization tools" +sudo systemctl enable --now libvirtd + +sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo +sudo dnf install -y vagrant +sudo dnf config-manager --set-enabled crb +sudo dnf install -y libvirt-devel +vagrant plugin install vagrant-libvirt + +