logging node name for important operations

terminating application when watching pods stops
Dockerfile and docker compose file added.
helm chart added
This commit is contained in:
Erik Brakkee 2025-03-02 23:33:17 +01:00
parent e8d4adaf53
commit be5ceb1ee5
6 changed files with 131 additions and 45 deletions

23
Dockerfile Normal file
View File

@ -0,0 +1,23 @@
FROM alpine:3.20.3 as builder
RUN apk update && apk add go
RUN mkdir -p /opt/fetcher/bin
WORKDIR /opt/fetcher
ENV CGO_ENABLED=0
ENV GOTOOLCHAIN=auto
COPY go.mod go.sum /opt/fetcher/
RUN go mod download
COPY cmd /opt/fetcher/cmd/
RUN go build -o bin ./cmd/...
RUN find . -type f
FROM alpine:3.20.3
RUN apk update && apk add ca-certificates
COPY --from=builder /opt/fetcher/bin/fetcher /opt/fetcher/bin/
ENTRYPOINT ["/opt/fetcher/bin/fetcher" ]

View File

@ -8,7 +8,6 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"os"
"regexp"
"strings"
"time"
)
@ -28,54 +27,30 @@ func NewFetcher(clientset *kubernetes.Clientset, config *Config, watcher *Watche
}
func (fetcher *Fetcher) canonicalizeImageName(image string) string {
pattern := `^(?:(?P<registry>[a-zA-Z0-9][-a-zA-Z0-9.]*[a-zA-Z0-9](?::[0-9]+)?)/)?(?P<repository>(?:[a-z0-9]+(?:(?:[._]|__|[-]+)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]+)[a-z0-9]+)*)*)?)?(?::(?P<tag>[\w][\w.-]{0,127}))?(?:@(?P<digest>[a-z][a-z0-9]*(?:[+.-][a-z][a-z0-9]*)*:[a-zA-Z0-9*+,-./:;=@_]{32,}))?$`
re := regexp.MustCompile(pattern)
matches := re.FindStringSubmatch(image)
subexpNames := re.SubexpNames()
if matches == nil {
panic(fmt.Errorf("Invalid image reference: %s\n", image))
parts := strings.Split(image, "/")
if len(parts) < 1 {
panic(fmt.Errorf("Could not disect image name '%s'", image))
}
result := make(map[string]string)
for i, name := range subexpNames {
if i != 0 && name != "" && i < len(matches) {
result[name] = matches[i]
}
if len(parts) == 1 {
parts = []string{"docker.io", "library", parts[0]}
}
klog.V(3).Infof("Image: %s\n", image)
klog.V(3).Infof(" Registry: %s\n", result["registry"])
klog.V(3).Infof(" Repository: %s\n", result["repository"])
klog.V(3).Infof(" Tag: %s\n", result["tag"])
klog.V(3).Infof(" Digest: %s\n", result["digest"])
registry := result["registry"]
repository := result["repository"]
tag := result["tag"]
digest := result["digest"]
// Check if image has a tag
if digest == "" && tag == "" {
tag = "latest"
if !strings.Contains(parts[len(parts)-1], ":") {
parts[len(parts)-1] = parts[len(parts)-1] + ":latest"
}
// Check if image has a host
if registry == "" {
registry := ""
if strings.Contains(parts[0], ".") {
registry = parts[0]
parts = parts[1:]
} else {
registry = "docker.io"
}
// Handle the case when remainder doesn't specify library but it's not a docker.io official image
if registry == "docker.io" && !strings.Contains(repository, "/") {
repository = "library/" + repository
if registry == "docker.io" && len(parts) == 1 {
parts[0] = "library/" + parts[0]
}
fullimage := registry + "/" + repository
if tag != "" {
fullimage += ":" + tag
}
if digest != "" {
fullimage += "@" + digest
}
return fullimage
return registry + "/" + strings.Join(parts, "/")
}
func (fetcher *Fetcher) wasReady(pod *v1.Pod, controllers map[string]bool) bool {
@ -135,6 +110,11 @@ func (fetcher *Fetcher) getContainers(clientset *kubernetes.Clientset) map[strin
func (fetcher *Fetcher) pullAndPin() error {
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
nodeName = "UNKNOWN"
}
// Create the image manager
containerd, err := NewContainerd(fetcher.config.SocketPath, fetcher.config.ContainerdNamespace)
if err != nil {
@ -154,7 +134,7 @@ func (fetcher *Fetcher) pullAndPin() error {
// unpin images that are not used
for container, pinned := range imgs {
if !containers[container] && pinned {
klog.Infof("Unpinning %s\n", container)
klog.Infof("%s: Unpinning %s\n", nodeName, container)
err := containerd.Unpin(container)
if err != nil {
klog.Warningf(" error: %v", err)
@ -165,7 +145,7 @@ func (fetcher *Fetcher) pullAndPin() error {
// Pull images that are used
for container := range containers {
if _, found := imgs[container]; !found {
klog.Infof("Pulling %s\n", container)
klog.Infof("%s: Pulling %s\n", nodeName, container)
err := containerd.Pull(container)
if err != nil {
klog.Warningf("error: %v", err)
@ -181,7 +161,7 @@ func (fetcher *Fetcher) pullAndPin() error {
// Pin images that are used and present
for container := range containers {
if pinned, found := imgs[container]; found && !pinned {
klog.Infof("Pinning %s\n", container)
klog.Infof("%s: Pinning %s\n", nodeName, container)
err := containerd.Pin(container)
if err != nil {
klog.Warningf(" error: %v", err)

View File

@ -95,8 +95,10 @@ func (watcher *Watcher) watchPods(
_, controller := cache.NewInformerWithOptions(options)
stop := make(chan struct{})
defer close(stop)
go controller.Run(stop)
go func() {
controller.Run(stop)
panic(fmt.Errorf("Watching for pod changes stopped"))
}()
// Wait for the cache to sync
if !cache.WaitForCacheSync(stop, controller.HasSynced) {
panic(fmt.Errorf("failed to sync cache"))

10
compose.yaml Normal file
View File

@ -0,0 +1,10 @@
services:
kube-fetcher:
image: $REGISTRY/kube-fetcher:1.0.0
build:
context: .
dockerfile: Dockerfile

View File

@ -0,0 +1,39 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-fetcher
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: kube-fetcher
template:
metadata:
labels:
app: kube-fetcher
spec:
serviceAccountName: kube-fetcher
containers:
- name: fetcher
image: cat.wamblee.org/kube-fetcher:1.0.0
imagePullPolicy: Always
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
runAsUser: 0
args:
- --ready-duration=1m
- --v=3
volumeMounts:
- mountPath: /run/containerd/containerd.sock
name: containerd-sock
volumes:
- name: containerd-sock
hostPath:
path: /run/containerd/containerd.sock
type: Socket

32
helm/templates/sa.yaml Normal file
View File

@ -0,0 +1,32 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-fetcher
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-fetcher
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
---
# ClusterRoleBinding to connect the ServiceAccount to the ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-fetcher
subjects:
- kind: ServiceAccount
name: kube-fetcher
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: kube-fetcher
apiGroup: rbac.authorization.k8s.io