diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index e4b1c6f2c..3eb0f6c7f 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -106,9 +106,12 @@ jobs: kubectl cluster-info - - name: Test + - name: Unit Tests run: make test + - name: E2E Tests + run: make e2e + - name: Generate Tags id: generate_tag run: | diff --git a/.gitignore b/.gitignore index 73da63e55..5beaa6281 100644 --- a/.gitignore +++ b/.gitignore @@ -9,11 +9,13 @@ _gopath/ .vscode vendor dist -Reloader +/reloader +/Reloader !**/chart/reloader *.tgz styles/ site/ /mkdocs.yml yq -bin \ No newline at end of file +bin +*.test diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..8644bc04f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,74 @@ +version: "2" + +run: + go: "1.25" + timeout: 5m + allow-parallel-runners: true + +linters: + default: none + enable: + # Core linters + - errcheck + - govet + - staticcheck + - ineffassign + - unused + + # Code quality + - revive + - misspell + - unconvert + - unparam + - nakedret + - copyloopvar + + # Bug prevention + - bodyclose + - durationcheck + - errorlint + + # Test framework + - ginkgolinter + + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + + govet: + enable-all: true + disable: + - shadow + - fieldalignment + + errcheck: + check-type-assertions: true + exclude-functions: + - (io.Closer).Close + - (*os.File).Close + + nakedret: + max-func-lines: 30 + + exclusions: + generated: lax + rules: + - linters: + - errcheck + path: _test\.go + paths: + - third_party$ + - vendor$ + +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/stakater/Reloader + exclusions: + generated: lax diff --git a/.goreleaser.yml b/.goreleaser.yml index 08953b788..b49ad2293 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,5 +1,7 @@ builds: -- env: +- main: ./cmd/reloader + binary: reloader + env: - CGO_ENABLED=0 goos: - windows @@ -11,6 +13,11 @@ builds: - arm - arm64 - ppc64le + ldflags: + - -s -w + - -X github.com/stakater/Reloader/internal/pkg/metadata.Version={{.Version}} + - -X github.com/stakater/Reloader/internal/pkg/metadata.Commit={{.Commit}} + - -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate={{.Date}} archives: - name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" snapshot: diff --git a/Dockerfile b/Dockerfile index 53cc26d8b..0391463c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,9 +23,8 @@ COPY go.sum go.sum RUN go mod download # Copy the go source -COPY main.go main.go +COPY cmd/ cmd/ COPY internal/ internal/ -COPY pkg/ pkg/ # Build RUN CGO_ENABLED=0 \ @@ -34,10 +33,11 @@ RUN CGO_ENABLED=0 \ GOPROXY=${GOPROXY} \ GOPRIVATE=${GOPRIVATE} \ GO111MODULE=on \ - go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \ - -X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \ - -X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE}" \ - -installsuffix 'static' -mod=mod -a -o manager ./ + go build -ldflags="-s -w \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Version=${VERSION} \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Commit=${COMMIT} \ + -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate=${BUILD_DATE}" \ + -installsuffix 'static' -mod=mod -a -o manager ./cmd/reloader # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 435973004..b33a79908 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,6 +1,7 @@ ARG BUILDER_IMAGE ARG BASE_IMAGE +# First stage: Build the binary (using the standard Dockerfile as builder) FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE} AS SRC FROM ${BASE_IMAGE:-registry.access.redhat.com/ubi9/ubi:latest} AS ubi diff --git a/Makefile b/Makefile index 8444e1f76..be013e41c 100644 --- a/Makefile +++ b/Makefile @@ -20,10 +20,17 @@ BUILD= GOCMD = go GOFLAGS ?= $(GOFLAGS:) -LDFLAGS = GOPROXY ?= GOPRIVATE ?= +# Version information for ldflags +GIT_COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") +BUILD_DATE ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +LDFLAGS = -s -w \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Version=$(VERSION) \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Commit=$(GIT_COMMIT) \ + -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate=$(BUILD_DATE) + ## Location to install dependencies to LOCALBIN ?= $(shell pwd)/bin $(LOCALBIN): @@ -31,18 +38,9 @@ $(LOCALBIN): ## Tool Binaries KUBECTL ?= kubectl -KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) -ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) YQ ?= $(LOCALBIN)/yq ## Tool Versions -KUSTOMIZE_VERSION ?= v5.3.0 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 -ENVTEST_VERSION ?= release-0.17 -GOLANGCI_LINT_VERSION ?= v2.6.1 - YQ_VERSION ?= v4.27.5 YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)" @@ -57,53 +55,19 @@ $(YQ): @chmod +x $(YQ) @echo "yq downloaded successfully to $(YQ)." -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) - -.PHONY: envtest -envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. -$(ENVTEST): $(LOCALBIN) - $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) - -.PHONY: golangci-lint -golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. -$(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) - -# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist -# $1 - target path with name of binary (ideally with version) -# $2 - package url which can be installed -# $3 - specific version of package -define go-install-tool -@[ -f $(1) ] || { \ -set -e; \ -package=$(2)@$(3) ;\ -echo "Downloading $${package}" ;\ -GOBIN=$(LOCALBIN) go install $${package} ;\ -mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ -} -endef - default: build test install: "$(GOCMD)" mod download run: - go run ./main.go + go run ./cmd/reloader build: - "$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}" + "$(GOCMD)" build ${GOFLAGS} -ldflags '${LDFLAGS}' -o "${BINARY}" ./cmd/reloader -lint: golangci-lint ## Run golangci-lint on the codebase - $(GOLANGCI_LINT) run ./... +lint: ## Run golangci-lint on the codebase + go tool golangci-lint run ./... build-image: docker buildx build \ @@ -140,7 +104,11 @@ manifest: docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH) test: - "$(GOCMD)" test -timeout 1800s -v ./... + "$(GOCMD)" test -timeout 1800s -v -short ./cmd/... ./internal/... + +.PHONY: docker-build +docker-build: ## Build Docker image + $(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile . stop: @docker stop "${BINARY}" @@ -151,8 +119,8 @@ apply: deploy: binary-image push apply .PHONY: k8s-manifests -k8s-manifests: $(KUSTOMIZE) ## Generate k8s manifests using Kustomize from 'manifests' folder - $(KUSTOMIZE) build ./deployments/kubernetes/ -o ./deployments/kubernetes/reloader.yaml +k8s-manifests: ## Generate k8s manifests using Kustomize from 'manifests' folder + go tool kustomize build ./deployments/kubernetes/ -o ./deployments/kubernetes/reloader.yaml .PHONY: update-manifests-version update-manifests-version: ## Generate k8s manifests using Kustomize from 'manifests' folder diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go new file mode 100644 index 000000000..83b78c4dc --- /dev/null +++ b/cmd/reloader/main.go @@ -0,0 +1,227 @@ +package main + +import ( + "context" + "fmt" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "syscall" + "time" + + "github.com/go-logr/logr" + "github.com/go-logr/zerologr" + "github.com/rs/zerolog" + "github.com/spf13/cobra" + "k8s.io/client-go/discovery" + controllerruntime "sigs.k8s.io/controller-runtime" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/metadata" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/openshift" +) + +// Environment variable names for pod identity in HA mode. +const ( + podNameEnv = "POD_NAME" + podNamespaceEnv = "POD_NAMESPACE" +) + +// cfg holds the configuration for this reloader instance. +var cfg *config.Config + +func main() { + if err := newReloaderCommand().Execute(); err != nil { + os.Exit(1) + } +} + +func newReloaderCommand() *cobra.Command { + cfg = config.NewDefault() + + cmd := &cobra.Command{ + Use: "reloader", + Short: "A watcher for your Kubernetes cluster", + RunE: run, + } + + config.BindFlags(cmd.PersistentFlags(), cfg) + return cmd +} + +func run(cmd *cobra.Command, args []string) error { + if err := config.ApplyFlags(cfg); err != nil { + return fmt.Errorf("applying flags: %w", err) + } + + if err := cfg.Validate(); err != nil { + return fmt.Errorf("validating config: %w", err) + } + + if cfg.EnableHA { + if err := validateHAEnvs(); err != nil { + return err + } + cfg.LeaderElection.Identity = os.Getenv(podNameEnv) + if cfg.LeaderElection.Namespace == "" { + cfg.LeaderElection.Namespace = os.Getenv(podNamespaceEnv) + } + } + + log, err := configureLogging(cfg.LogFormat, cfg.LogLevel) + if err != nil { + return fmt.Errorf("configuring logging: %w", err) + } + + controllerruntime.SetLogger(log) + + log.Info("Starting Reloader") + + if cfg.WatchedNamespace != "" { + log.Info("watching single namespace", "namespace", cfg.WatchedNamespace) + } else { + log.Info("watching all namespaces") + } + + if len(cfg.NamespaceSelectors) > 0 { + log.Info("namespace-selector is set", "selectors", cfg.NamespaceSelectorStrings) + } + + if len(cfg.ResourceSelectors) > 0 { + log.Info("resource-label-selector is set", "selectors", cfg.ResourceSelectorStrings) + } + + if cfg.WebhookURL != "" { + log.Info("webhook-url is set, will only send webhook, no resources will be reloaded", "url", cfg.WebhookURL) + } + + if cfg.EnableHA { + log.Info( + "high-availability mode enabled", + "leaderElectionID", cfg.LeaderElection.LockName, + "leaderElectionNamespace", cfg.LeaderElection.Namespace, + ) + } + + collectors := metrics.SetupPrometheusEndpoint() + + if config.ShouldAutoDetectOpenShift() { + restConfig := controllerruntime.GetConfigOrDie() + discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) + if err != nil { + log.V(1).Info("Failed to create discovery client for DeploymentConfig detection", "error", err) + } else if openshift.HasDeploymentConfigSupport(discoveryClient, log) { + cfg.DeploymentConfigEnabled = true + } + } + + controller.AddOptionalSchemes(cfg.ArgoRolloutsEnabled, cfg.DeploymentConfigEnabled) + + mgr, err := controller.NewManager( + controller.ManagerOptions{ + Config: cfg, + Log: log, + Collectors: &collectors, + }, + ) + if err != nil { + return fmt.Errorf("creating manager: %w", err) + } + + if err := controller.SetupReconcilers(mgr, cfg, log, &collectors); err != nil { + return fmt.Errorf("setting up reconcilers: %w", err) + } + + // Skip metadata publisher when ConfigMaps are ignored (no RBAC permissions) + if !cfg.IsResourceIgnored("configmaps") { + if err := mgr.Add(metadata.Runnable(mgr.GetClient(), cfg, log)); err != nil { + log.Error(err, "Failed to add metadata publisher") + // Non-fatal, continue starting + } + } else { + log.Info("skipping metadata publisher (configmaps ignored)") + } + + if cfg.EnablePProf { + go startPProfServer(log) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + log.Info("Received signal, shutting down", "signal", sig) + cancel() + }() + + log.Info("Starting controller manager") + if err := controller.RunManager(ctx, mgr, log); err != nil { + return fmt.Errorf("manager exited with error: %w", err) + } + + log.Info("Reloader shutdown complete") + return nil +} + +func configureLogging(logFormat, logLevel string) (logr.Logger, error) { + // Parse log level + var level zerolog.Level + switch logLevel { + case "trace": + level = zerolog.TraceLevel + case "debug": + level = zerolog.DebugLevel + case "info", "": + level = zerolog.InfoLevel + case "warn", "warning": + level = zerolog.WarnLevel + case "error": + level = zerolog.ErrorLevel + default: + return logr.Logger{}, fmt.Errorf("unsupported log level: %q", logLevel) + } + + var zl zerolog.Logger + switch logFormat { + case "json": + zl = zerolog.New(os.Stdout).Level(level).With().Timestamp().Logger() + case "": + // Human-readable console output + zl = zerolog.New( + zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: time.RFC3339, + }, + ).Level(level).With().Timestamp().Logger() + default: + return logr.Logger{}, fmt.Errorf("unsupported log format: %q", logFormat) + } + + return zerologr.New(&zl), nil +} + +func validateHAEnvs() error { + podName := os.Getenv(podNameEnv) + podNamespace := os.Getenv(podNamespaceEnv) + + if podName == "" { + return fmt.Errorf("%s not set, cannot run in HA mode", podNameEnv) + } + if podNamespace == "" { + return fmt.Errorf("%s not set, cannot run in HA mode", podNamespaceEnv) + } + return nil +} + +func startPProfServer(log logr.Logger) { + log.Info("Starting pprof server", "addr", cfg.PProfAddr) + if err := http.ListenAndServe(cfg.PProfAddr, nil); err != nil { + log.Error(err, "Failed to start pprof server") + } +} diff --git a/deployments/kubernetes/chart/reloader/Chart.yaml b/deployments/kubernetes/chart/reloader/Chart.yaml index 536fd6be5..8c4c4508f 100644 --- a/deployments/kubernetes/chart/reloader/Chart.yaml +++ b/deployments/kubernetes/chart/reloader/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: reloader description: Reloader chart that runs on kubernetes -version: 2.2.7 +version: 2.3.0 appVersion: v1.4.12 keywords: - Reloader diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml index 9f655aa91..11e8a5d43 100644 --- a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml +++ b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml @@ -56,12 +56,12 @@ rules: {{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }} - apiGroups: - "argoproj.io" - - "" resources: - rollouts verbs: - list - get + - watch - update - patch {{- end }} @@ -76,6 +76,7 @@ rules: - get - update - patch + - watch {{- if .Values.reloader.ignoreCronJobs }}{{- else }} - apiGroups: - "batch" @@ -84,6 +85,9 @@ rules: verbs: - list - get + - watch + - update + - patch {{- end }} {{- if .Values.reloader.ignoreJobs }}{{- else }} - apiGroups: @@ -95,6 +99,7 @@ rules: - delete - list - get + - watch {{- end}} {{- if .Values.reloader.enableHA }} - apiGroups: diff --git a/deployments/kubernetes/chart/reloader/templates/deployment.yaml b/deployments/kubernetes/chart/reloader/templates/deployment.yaml index 16564b209..048526e83 100644 --- a/deployments/kubernetes/chart/reloader/templates/deployment.yaml +++ b/deployments/kubernetes/chart/reloader/templates/deployment.yaml @@ -173,10 +173,12 @@ spec: ports: - name: http containerPort: 9090 + - name: health + containerPort: 8080 livenessProbe: httpGet: - path: /live - port: http + path: /healthz + port: health timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }} failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }} periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }} @@ -184,8 +186,8 @@ spec: initialDelaySeconds: {{ .Values.reloader.deployment.livenessProbe.initialDelaySeconds | default "10" }} readinessProbe: httpGet: - path: /metrics - port: http + path: /readyz + port: health timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }} failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }} periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }} @@ -235,7 +237,7 @@ spec: - "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}" {{- end }} {{- if (include "reloader-namespaceSelector" .) }} - - "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\"" + - "--namespace-selector={{ include "reloader-namespaceSelector" . }}" {{- end }} {{- if .Values.reloader.resourceLabelSelector }} - "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}" diff --git a/deployments/kubernetes/chart/reloader/templates/role.yaml b/deployments/kubernetes/chart/reloader/templates/role.yaml index 70a681571..c6cfed646 100644 --- a/deployments/kubernetes/chart/reloader/templates/role.yaml +++ b/deployments/kubernetes/chart/reloader/templates/role.yaml @@ -47,12 +47,12 @@ rules: {{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }} - apiGroups: - "argoproj.io" - - "" resources: - rollouts verbs: - list - get + - watch - update - patch {{- end }} @@ -67,6 +67,7 @@ rules: - get - update - patch + - watch - apiGroups: - "batch" resources: @@ -74,6 +75,9 @@ rules: verbs: - list - get + - watch + - update + - patch - apiGroups: - "batch" resources: @@ -83,6 +87,7 @@ rules: - delete - list - get + - watch {{- if .Values.reloader.enableHA }} - apiGroups: - "coordination.k8s.io" diff --git a/go.mod b/go.mod index 05edeccd1..ed633b39d 100644 --- a/go.mod +++ b/go.mod @@ -4,70 +4,271 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 - github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 - github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 - github.com/parnurzeal/gorequest v0.3.0 - github.com/prometheus/client_golang v1.22.0 - github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.10.1 - github.com/stretchr/testify v1.10.0 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/kubectl v0.32.3 - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + github.com/go-logr/logr v1.4.3 + github.com/go-logr/zerologr v1.2.3 + github.com/openshift/api v0.0.0-20260107103503-6d35063ca179 + github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae + github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/client_model v0.6.2 + github.com/rs/zerolog v1.34.0 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 + github.com/spf13/viper v1.21.0 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.35.0 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + codeberg.org/chavacava/garif v0.2.0 // indirect + codeberg.org/polyfloyd/go-errorlint v1.9.0 // indirect + dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect + dev.gaijin.team/go/golib v0.6.0 // indirect + github.com/4meepo/tagalign v1.4.3 // indirect + github.com/Abirdcfly/dupword v0.1.7 // indirect + github.com/AdminBenni/iota-mixing v1.0.0 // indirect + github.com/AlwxSin/noinlineerr v1.0.5 // indirect + github.com/Antonboom/errname v1.1.1 // indirect + github.com/Antonboom/nilnil v1.1.1 // indirect + github.com/Antonboom/testifylint v1.6.4 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect + github.com/Djarvur/go-err113 v0.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/MirrexOne/unqueryvet v1.4.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect + github.com/alecthomas/chroma/v2 v2.21.1 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.6 // indirect + github.com/alexkohler/prealloc v1.0.1 // indirect + github.com/alfatraining/structtag v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.2.0 // indirect + github.com/ashanbrown/forbidigo/v2 v2.3.0 // indirect + github.com/ashanbrown/makezero/v2 v2.1.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.7.0 // indirect + github.com/bombsimon/wsl/v5 v5.3.0 // indirect + github.com/breml/bidichk v0.3.3 // indirect + github.com/breml/errchkjson v0.4.1 // indirect + github.com/butuzov/ireturn v0.4.0 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/catenacyber/perfsprint v0.10.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/charithe/durationcheck v0.0.11 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/ckaznocha/intrange v0.3.1 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.7 // indirect + github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.21.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/ettle/strcase v0.2.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.6 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/ghostiam/protogetter v0.3.18 // indirect + github.com/go-critic/go-critic v0.14.3 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godoc-lint/godoc-lint v0.11.1 // indirect + github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/golangci/asciicheck v0.5.0 // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect + github.com/golangci/go-printf-func-name v0.1.1 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect + github.com/golangci/golangci-lint/v2 v2.8.0 // indirect + github.com/golangci/golines v0.14.0 // indirect + github.com/golangci/misspell v0.7.0 // indirect + github.com/golangci/plugin-module-register v0.1.2 // indirect + github.com/golangci/revgrep v0.8.0 // indirect + github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect + github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gordonklaus/ineffassign v0.2.0 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect + github.com/jgautheron/goconst v1.8.2 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jjti/go-spancheck v0.6.5 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.2 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect + github.com/kulti/thelper v0.7.1 // indirect + github.com/kunwardeep/paralleltest v1.0.15 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.5 // indirect + github.com/ldez/gomoddirectives v0.8.0 // indirect + github.com/ldez/grignotin v0.10.1 // indirect + github.com/ldez/structtags v0.6.1 // indirect + github.com/ldez/tagliatelle v0.7.2 // indirect + github.com/ldez/usetesting v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.2 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/macabu/inamedparam v0.2.0 // indirect + github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect + github.com/manuelarte/funcorder v0.5.0 // indirect + github.com/maratori/testableexamples v1.0.1 // indirect + github.com/maratori/testpackage v1.1.2 // indirect + github.com/matoous/godox v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mgechev/revive v1.13.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/moul/http2curl v1.0.0 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/moricho/tparallel v0.3.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.63.0 // indirect - github.com/prometheus/procfs v0.16.0 // indirect - github.com/smartystreets/goconvey v1.7.2 // indirect - github.com/spf13/pflag v1.0.9 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.21.2 // indirect + github.com/onsi/ginkgo/v2 v2.27.3 // indirect + github.com/onsi/gomega v1.38.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/quasilyte/go-ruleguard v0.4.5 // indirect + github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ryancurrah/gomodguard v1.4.1 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect + github.com/securego/gosec/v2 v2.22.11 // indirect + github.com/sergi/go-diff v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sonatard/noctx v0.4.0 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.3.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.11.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tetafro/godot v1.5.4 // indirect + github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect + github.com/timonwong/loggercheck v0.11.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.12.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.4.1 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.11.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + github.com/xen0n/gosmopolitan v1.3.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.3.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + gitlab.com/bosi/decorder v0.4.2 // indirect + go-simpler.org/musttag v0.14.0 // indirect + go-simpler.org/sloglint v0.11.1 // indirect + go.augendre.info/arangolint v0.3.1 // indirect + go.augendre.info/fatcontext v0.9.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.40.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.6.1 // indirect + k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect + k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 // indirect + mvdan.cc/gofumpt v0.9.2 // indirect + mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kustomize/api v0.21.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.21.0 // indirect + sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.21.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) // Replacements for argo-rollouts @@ -92,3 +293,8 @@ replace ( k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2 k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2 ) + +tool ( + github.com/golangci/golangci-lint/v2/cmd/golangci-lint + sigs.k8s.io/kustomize/kustomize/v5 +) diff --git a/go.sum b/go.sum index 59339eaf7..945867094 100644 --- a/go.sum +++ b/go.sum @@ -1,198 +1,735 @@ +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= +codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= +codeberg.org/polyfloyd/go-errorlint v1.9.0 h1:VkdEEmA1VBpH6ecQoMR4LdphVI3fA4RrCh2an7YmodI= +codeberg.org/polyfloyd/go-errorlint v1.9.0/go.mod h1:GPRRu2LzVijNn4YkrZYJfatQIdS+TrcK8rL5Xs24qw8= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= +dev.gaijin.team/go/golib v0.6.0 h1:v6nnznFTs4bppib/NyU1PQxobwDHwCXXl15P7DV5Zgo= +dev.gaijin.team/go/golib v0.6.0/go.mod h1:uY1mShx8Z/aNHWDyAkZTkX+uCi5PdX7KsG1eDQa2AVE= +github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= +github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= +github.com/Abirdcfly/dupword v0.1.7 h1:2j8sInznrje4I0CMisSL6ipEBkeJUJAmK1/lfoNGWrQ= +github.com/Abirdcfly/dupword v0.1.7/go.mod h1:K0DkBeOebJ4VyOICFdppB23Q0YMOgVafM0zYW0n9lF4= +github.com/AdminBenni/iota-mixing v1.0.0 h1:Os6lpjG2dp/AE5fYBPAA1zfa2qMdCAWwPMCgpwKq7wo= +github.com/AdminBenni/iota-mixing v1.0.0/go.mod h1:i4+tpAaB+qMVIV9OK3m4/DAynOd5bQFaOu+2AhtBCNY= +github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= +github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= +github.com/Antonboom/errname v1.1.1 h1:bllB7mlIbTVzO9jmSWVWLjxTEbGBVQ1Ff/ClQgtPw9Q= +github.com/Antonboom/errname v1.1.1/go.mod h1:gjhe24xoxXp0ScLtHzjiXp0Exi1RFLKJb0bVBtWKCWQ= +github.com/Antonboom/nilnil v1.1.1 h1:9Mdr6BYd8WHCDngQnNVV0b554xyisFioEKi30sksufQ= +github.com/Antonboom/nilnil v1.1.1/go.mod h1:yCyAmSw3doopbOWhJlVci+HuyNRuHJKIv6V2oYQa8II= +github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS87UiqmQ= +github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g= +github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/MirrexOne/unqueryvet v1.4.0 h1:6KAkqqW2KUnkl9Z0VuTphC3IXRPoFqEkJEtyxxHj5eQ= +github.com/MirrexOne/unqueryvet v1.4.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.21.1 h1:FaSDrp6N+3pphkNKU6HPCiYLgm8dbe5UXIXcoBhZSWA= +github.com/alecthomas/chroma/v2 v2.21.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= +github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= +github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= +github.com/alexkohler/prealloc v1.0.1 h1:A9P1haqowqUxWvU9nk6tQ7YktXIHf+LQM9wPRhuteEE= +github.com/alexkohler/prealloc v1.0.1/go.mod h1:fT39Jge3bQrfA7nPMDngUfvUbQGQeJyGQnR+913SCig= +github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= +github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= +github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA= github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8= +github.com/ashanbrown/forbidigo/v2 v2.3.0 h1:OZZDOchCgsX5gvToVtEBoV2UWbFfI6RKQTir2UZzSxo= +github.com/ashanbrown/forbidigo/v2 v2.3.0/go.mod h1:5p6VmsG5/1xx3E785W9fouMxIOkvY2rRV9nMdWadd6c= +github.com/ashanbrown/makezero/v2 v2.1.0 h1:snuKYMbqosNokUKm+R6/+vOPs8yVAi46La7Ck6QYSaE= +github.com/ashanbrown/makezero/v2 v2.1.0/go.mod h1:aEGT/9q3S8DHeE57C88z2a6xydvgx8J5hgXIGWgo0MY= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= +github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= +github.com/bombsimon/wsl/v5 v5.3.0 h1:nZWREJFL6U3vgW/B1lfDOigl+tEF6qgs6dGGbFeR0UM= +github.com/bombsimon/wsl/v5 v5.3.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= +github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= +github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= +github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= +github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= +github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E= +github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= +github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.11 h1:g1/EX1eIiKS57NTWsYtHDZ/APfeXKhye1DidBcABctk= +github.com/charithe/durationcheck v0.0.11/go.mod h1:x5iZaixRNl8ctbM+3B2RrPG5t856TxRyVQEnbIEM2X4= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= +github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= +github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= +github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= +github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= +github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0= -github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= -github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.6 h1:vmiBcKV/3EqKY3ZiPxCINmpS431OcE1S47AQUwhrg8E= +github.com/firefart/nonamedreturns v1.0.6/go.mod h1:R8NisJnSIpvPWheCq0mNRXJok6D8h7fagJTF8EMEwCo= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghostiam/protogetter v0.3.18 h1:yEpghRGtP9PjKvVXtEzGpYfQj1Wl/ZehAfU6fr62Lfo= +github.com/ghostiam/protogetter v0.3.18/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= +github.com/go-critic/go-critic v0.14.3 h1:5R1qH2iFeo4I/RJU8vTezdqs08Egi4u5p6vOESA0pog= +github.com/go-critic/go-critic v0.14.3/go.mod h1:xwntfW6SYAd7h1OqDzmN6hBX/JxsEKl5up/Y2bsxgVQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= +github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godoc-lint/godoc-lint v0.11.1 h1:z9as8Qjiy6miRIa3VRymTa+Gt2RLnGICVikcvlUVOaA= +github.com/godoc-lint/godoc-lint v0.11.1/go.mod h1:BAqayheFSuZrEAqCRxgw9MyvsM+S/hZwJbU1s/ejRj8= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= +github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarogrvjO9AfiW3B4U= +github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint/v2 v2.8.0 h1:wJnr3hJWY3eVzOUcfwbDc2qbi2RDEpvLmQeNFaPSNYA= +github.com/golangci/golangci-lint/v2 v2.8.0/go.mod h1:xl+HafQ9xoP8rzw0z5AwnO5kynxtb80e8u02Ej/47RI= +github.com/golangci/golines v0.14.0 h1:xt9d3RKBjhasA3qpoXs99J2xN2t6eBlpLHt0TrgyyXc= +github.com/golangci/golines v0.14.0/go.mod h1:gf555vPG2Ia7mmy2mzmhVQbVjuK8Orw0maR1G4vVAAQ= +github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= +github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= +github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2bRA5htgAG9r7s3tHsfjIhN98WshBTJ9jM= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= +github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= +github.com/gostaticanalysis/nilerr v0.1.2 h1:S6nk8a9N8g062nsx63kUkF6AzbHGw7zzyHMcpu52xQU= +github.com/gostaticanalysis/nilerr v0.1.2/go.mod h1:A19UHhoY3y8ahoL7YKz6sdjDtduwTSI4CsymaC2htPA= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jgautheron/goconst v1.8.2 h1:y0XF7X8CikZ93fSNT6WBTb/NElBu9IjaY7CCYQrCMX4= +github.com/jgautheron/goconst v1.8.2/go.mod h1:A0oxgBCHy55NQn6sYpO7UdnA9p+h7cPtoOZUmvNIako= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jjti/go-spancheck v0.6.5 h1:lmi7pKxa37oKYIMScialXUK6hP3iY5F1gu+mLBPgYB8= +github.com/jjti/go-spancheck v0.6.5/go.mod h1:aEogkeatBrbYsyW6y5TgDfihCulDYciL1B7rG2vSsrU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.2 h1:yfNQvP9YaGQR7VaWLYcfZUlRP2eo2vhExWKxD/fP6q0= +github.com/karamaru-alpha/copyloopvar v1.2.2/go.mod h1:oY4rGZqZ879JkJMtX3RRkcXRkmUvH0x35ykgaKgsgJY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= +github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= +github.com/kunwardeep/paralleltest v1.0.15 h1:ZMk4Qt306tHIgKISHWFJAO1IDQJLc6uDyJMLyncOb6w= +github.com/kunwardeep/paralleltest v1.0.15/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.5 h1:kv2ZGUVI6VwRfp/+bcQ6Nbx0ghFWcGIKInkG/oFn1aQ= +github.com/ldez/exptostd v0.4.5/go.mod h1:QRjHRMXJrCTIm9WxVNH6VW7oN7KrGSht69bIRwvdFsM= +github.com/ldez/gomoddirectives v0.8.0 h1:JqIuTtgvFC2RdH1s357vrE23WJF2cpDCPFgA/TWDGpk= +github.com/ldez/gomoddirectives v0.8.0/go.mod h1:jutzamvZR4XYJLr0d5Honycp4Gy6GEg2mS9+2YX3F1Q= +github.com/ldez/grignotin v0.10.1 h1:keYi9rYsgbvqAZGI1liek5c+jv9UUjbvdj3Tbn5fn4o= +github.com/ldez/grignotin v0.10.1/go.mod h1:UlDbXFCARrXbWGNGP3S5vsysNXAPhnSuBufpTEbwOas= +github.com/ldez/structtags v0.6.1 h1:bUooFLbXx41tW8SvkfwfFkkjPYvFFs59AAMgVg6DUBk= +github.com/ldez/structtags v0.6.1/go.mod h1:YDxVSgDy/MON6ariaxLF2X09bh19qL7MtGBN5MrvbdY= +github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= +github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= +github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= +github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= +github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0 h1:3mAIyaGRtjK6EO9E73JlXLtiy7ha80b2ZVGyacxgfww= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0/go.mod h1:z8dFSyXqp+fC6NLDSljRJeNQJJDWnY7RoWFzV3PC6UM= +github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= +github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= +github.com/maratori/testableexamples v1.0.1 h1:HfOQXs+XgfeRBJ+Wz0XfH+FHnoY9TVqL6Fcevpzy4q8= +github.com/maratori/testableexamples v1.0.1/go.mod h1:XE2F/nQs7B9N08JgyRmdGjYVGqxWwClLPCGSQhXQSrQ= +github.com/maratori/testpackage v1.1.2 h1:ffDSh+AgqluCLMXhM19f/cpvQAKygKAJXFl9aUjmbqs= +github.com/maratori/testpackage v1.1.2/go.mod h1:8F24GdVDFW5Ew43Et02jamrVMNXLUNaOynhDssITGfc= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo= -github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI= -github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.21.2 h1:khzWfm2/Br8ZemX8QM1pl72LwM+rMeW6VUbQ4rzh0Po= +github.com/nunnatsa/ginkgolinter v0.21.2/go.mod h1:GItSI5fw7mCGLPmkvGYrr1kEetZe7B593jcyOpyabsY= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/openshift/api v0.0.0-20260107103503-6d35063ca179 h1:5gMFMmuVLAcEnBAjNFql/8L2ZRPBDOxl7nmbjO5klvk= +github.com/openshift/api v0.0.0-20260107103503-6d35063ca179/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae h1:veyDeAOBVJun1KoOsTIRlD7Q5LwRR32kfS2IPjPXJKE= +github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae/go.mod h1:leoeMrUnO40DwByGl7we2l+h6HQq3Y6bHUa+DnmRl+8= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= -github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= -github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= +github.com/quasilyte/go-ruleguard v0.4.5/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= +github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= +github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= +github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= +github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= +github.com/securego/gosec/v2 v2.22.11 h1:tW+weM/hCM/GX3iaCV91d5I6hqaRT2TPsFM1+USPXwg= +github.com/securego/gosec/v2 v2.22.11/go.mod h1:KE4MW/eH0GLWztkbt4/7XpyH0zJBBnu7sYB4l6Wn7Mw= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= -github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= -github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= +github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg= +github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= +github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= +github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= +github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= +go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= +go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= +go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= +go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= +go.augendre.info/arangolint v0.3.1 h1:n2E6p8f+zfXSFLa2e2WqFPp4bfvcuRdd50y6cT65pSo= +go.augendre.info/arangolint v0.3.1/go.mod h1:6ZKzEzIZuBQwoSvlKT+qpUfIbBfFCE5gbAoTg0/117g= +go.augendre.info/fatcontext v0.9.0 h1:Gt5jGD4Zcj8CDMVzjOJITlSb9cEch54hjRRlN3qDojE= +go.augendre.info/fatcontext v0.9.0/go.mod h1:L94brOAT1OOUNue6ph/2HnwxoNlds9aXDF2FcUntbNw= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= -k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 h1:oV4uULAC2QPIdMQwjMaNIwykyhWhnhBwX40yd5h9u3U= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= +mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6yRGpLLPNfuGtU2Z0cPvFquGDYU= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.21.0 h1:I7nry5p8iDJbuRdYS7ez8MUvw7XVNPcIP5GkzzuXIIQ= +sigs.k8s.io/kustomize/api v0.21.0/go.mod h1:XGVQuR5n2pXKWbzXHweZU683pALGw/AMVO4zU4iS8SE= +sigs.k8s.io/kustomize/cmd/config v0.21.0 h1:ikLtzcNK9isBqSaXXhAg7LRCTNKdp70z5v/c4Y55DOw= +sigs.k8s.io/kustomize/cmd/config v0.21.0/go.mod h1:oxa6eRzeLWUcE7M3Rmio29Sfc4KpqGspHur3GjOYqNA= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 h1:CCIJK7z/xJOlkXOaDOcL2jprV53a/eloiL02wg7oJJs= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0/go.mod h1:qewGAExYZK9LbPPbnJMPK5HQ8nsdxRzpclIg0qslzDo= +sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= +sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/pkg/alerting/alerter.go b/internal/pkg/alerting/alerter.go new file mode 100644 index 000000000..edbc22812 --- /dev/null +++ b/internal/pkg/alerting/alerter.go @@ -0,0 +1,51 @@ +package alerting + +import ( + "context" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// AlertMessage contains the details of a reload event to be sent as an alert. +type AlertMessage struct { + WorkloadKind string + WorkloadName string + WorkloadNamespace string + ResourceKind string + ResourceName string + ResourceNamespace string + Timestamp time.Time +} + +// Alerter is the interface for sending reload notifications. +type Alerter interface { + Send(ctx context.Context, message AlertMessage) error +} + +// NewAlerter creates an Alerter based on the configuration. +// Returns a NoOpAlerter if alerting is disabled. +func NewAlerter(cfg *config.Config) Alerter { + alertCfg := cfg.Alerting + if !alertCfg.Enabled || alertCfg.WebhookURL == "" { + return &NoOpAlerter{} + } + + switch alertCfg.Sink { + case "slack": + return NewSlackAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + case "teams": + return NewTeamsAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + case "gchat": + return NewGChatAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + default: + return NewRawAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional, alertCfg.Structured) + } +} + +// NoOpAlerter is an Alerter that does nothing. +type NoOpAlerter struct{} + +func (a *NoOpAlerter) Send(ctx context.Context, message AlertMessage) error { + return nil +} diff --git a/internal/pkg/alerting/alerter_test.go b/internal/pkg/alerting/alerter_test.go new file mode 100644 index 000000000..d6ae4ad40 --- /dev/null +++ b/internal/pkg/alerting/alerter_test.go @@ -0,0 +1,273 @@ +package alerting + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// testServer creates a test HTTP server that captures the request body. +// Returns the server and a function to retrieve the captured body. +func testServer(t *testing.T, expectedContentType string) (*httptest.Server, func() []byte) { + t.Helper() + var body []byte + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("Expected POST request, got %s", r.Method) + } + if r.Header.Get("Content-Type") != expectedContentType { + t.Errorf("Expected Content-Type %s, got %s", expectedContentType, r.Header.Get("Content-Type")) + } + body, _ = io.ReadAll(r.Body) + w.WriteHeader(http.StatusOK) + })) + return server, func() []byte { return body } +} + +// testAlertMessage returns a standard AlertMessage for testing. +func testAlertMessage() AlertMessage { + return AlertMessage{ + WorkloadKind: "Deployment", + WorkloadName: "nginx", + WorkloadNamespace: "default", + ResourceKind: "ConfigMap", + ResourceName: "nginx-config", + ResourceNamespace: "default", + Timestamp: time.Now(), + } +} + +func TestNewAlerter(t *testing.T) { + tests := []struct { + name string + setup func(*config.Config) + wantType string + }{ + { + name: "disabled", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = false + }, + wantType: "*alerting.NoOpAlerter", + }, + { + name: "no webhook URL", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "" + }, + wantType: "*alerting.NoOpAlerter", + }, + { + name: "slack", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "slack" + }, + wantType: "*alerting.SlackAlerter", + }, + { + name: "teams", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "teams" + }, + wantType: "*alerting.TeamsAlerter", + }, + { + name: "gchat", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "gchat" + }, + wantType: "*alerting.GChatAlerter", + }, + { + name: "raw", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "raw" + }, + wantType: "*alerting.RawAlerter", + }, + { + name: "empty sink defaults to raw", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "" + }, + wantType: "*alerting.RawAlerter", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + tt.setup(cfg) + alerter := NewAlerter(cfg) + gotType := getTypeName(alerter) + if gotType != tt.wantType { + t.Errorf("NewAlerter() type = %s, want %s", gotType, tt.wantType) + } + }) + } +} + +func getTypeName(a Alerter) string { + switch a.(type) { + case *NoOpAlerter: + return "*alerting.NoOpAlerter" + case *SlackAlerter: + return "*alerting.SlackAlerter" + case *TeamsAlerter: + return "*alerting.TeamsAlerter" + case *GChatAlerter: + return "*alerting.GChatAlerter" + case *RawAlerter: + return "*alerting.RawAlerter" + default: + return "unknown" + } +} + +func TestNoOpAlerter_Send(t *testing.T) { + alerter := &NoOpAlerter{} + if err := alerter.Send(context.Background(), AlertMessage{}); err != nil { + t.Errorf("NoOpAlerter.Send() error = %v, want nil", err) + } +} + +func TestAlerter_Send(t *testing.T) { + tests := []struct { + name string + contentType string + newAlert func(url string) Alerter + validate func(t *testing.T, body []byte) + }{ + { + name: "slack", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewSlackAlerter(url, "", "Test Cluster") }, + validate: func(t *testing.T, body []byte) { + var msg slackMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if len(msg.Attachments) != 1 { + t.Fatalf("Expected 1 attachment, got %d", len(msg.Attachments)) + } + if msg.Attachments[0].Text == "" { + t.Error("Expected non-empty attachment text") + } + if msg.Attachments[0].Color != "good" { + t.Errorf("Expected color 'good', got %s", msg.Attachments[0].Color) + } + if msg.Attachments[0].AuthorName != "Reloader" { + t.Errorf("Expected author_name 'Reloader', got %s", msg.Attachments[0].AuthorName) + } + }, + }, + { + name: "teams", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewTeamsAlerter(url, "", "") }, + validate: func(t *testing.T, body []byte) { + var msg teamsMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if msg.Type != "MessageCard" { + t.Errorf("@type = %s, want MessageCard", msg.Type) + } + }, + }, + { + name: "gchat", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewGChatAlerter(url, "", "") }, + validate: func(t *testing.T, body []byte) { + var msg gchatMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if len(msg.Cards) != 1 { + t.Errorf("cards = %d, want 1", len(msg.Cards)) + } + }, + }, + { + name: "raw plain text (default)", + contentType: "text/plain", + newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info", false) }, + validate: func(t *testing.T, body []byte) { + text := string(body) + if text == "" { + t.Error("Expected non-empty text") + } + if !strings.Contains(text, "custom-info") { + t.Error("Expected text to contain 'custom-info'") + } + if !strings.Contains(text, "nginx") { + t.Error("Expected text to contain workload name 'nginx'") + } + }, + }, + { + name: "raw structured JSON", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info", true) }, + validate: func(t *testing.T, body []byte) { + var msg rawMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if msg.Event != "reload" { + t.Errorf("event = %s, want reload", msg.Event) + } + if msg.WorkloadName != "nginx" { + t.Errorf("workloadName = %s, want nginx", msg.WorkloadName) + } + if msg.Additional != "custom-info" { + t.Errorf("additional = %s, want custom-info", msg.Additional) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server, getBody := testServer(t, tt.contentType) + defer server.Close() + + alerter := tt.newAlert(server.URL) + if err := alerter.Send(context.Background(), testAlertMessage()); err != nil { + t.Fatalf("Send() error = %v", err) + } + tt.validate(t, getBody()) + }) + } +} + +func TestAlerter_WebhookError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + alerter := NewRawAlerter(server.URL, "", "", false) + if err := alerter.Send(context.Background(), AlertMessage{}); err == nil { + t.Error("Expected error for non-2xx response") + } +} diff --git a/internal/pkg/alerting/gchat.go b/internal/pkg/alerting/gchat.go new file mode 100644 index 000000000..8ad0c2f7f --- /dev/null +++ b/internal/pkg/alerting/gchat.go @@ -0,0 +1,90 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// GChatAlerter sends alerts to Google Chat webhooks. +type GChatAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewGChatAlerter creates a new GChatAlerter. +func NewGChatAlerter(webhookURL, proxyURL, additional string) *GChatAlerter { + return &GChatAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// gchatMessage represents a Google Chat message. +type gchatMessage struct { + Text string `json:"text,omitempty"` + Cards []gchatCard `json:"cards,omitempty"` +} + +type gchatCard struct { + Header gchatHeader `json:"header"` + Sections []gchatSection `json:"sections"` +} + +type gchatHeader struct { + Title string `json:"title"` + Subtitle string `json:"subtitle,omitempty"` +} + +type gchatSection struct { + Widgets []gchatWidget `json:"widgets"` +} + +type gchatWidget struct { + KeyValue *gchatKeyValue `json:"keyValue,omitempty"` +} + +type gchatKeyValue struct { + TopLabel string `json:"topLabel"` + Content string `json:"content"` +} + +func (a *GChatAlerter) Send(ctx context.Context, message AlertMessage) error { + msg := a.buildMessage(message) + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling gchat message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *GChatAlerter) buildMessage(msg AlertMessage) gchatMessage { + widgets := []gchatWidget{ + {KeyValue: &gchatKeyValue{TopLabel: "Workload", Content: fmt.Sprintf("%s/%s (%s)", msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind)}}, + {KeyValue: &gchatKeyValue{TopLabel: "Resource", Content: fmt.Sprintf("%s/%s (%s)", msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind)}}, + {KeyValue: &gchatKeyValue{TopLabel: "Time", Content: msg.Timestamp.Format("2006-01-02 15:04:05 UTC")}}, + } + + subtitle := "" + if a.additional != "" { + subtitle = a.additional + } + + return gchatMessage{ + Cards: []gchatCard{ + { + Header: gchatHeader{ + Title: "Reloader triggered reload", + Subtitle: subtitle, + }, + Sections: []gchatSection{ + {Widgets: widgets}, + }, + }, + }, + } +} diff --git a/internal/pkg/alerting/http.go b/internal/pkg/alerting/http.go new file mode 100644 index 000000000..2501e695f --- /dev/null +++ b/internal/pkg/alerting/http.go @@ -0,0 +1,59 @@ +package alerting + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + httputil "github.com/stakater/Reloader/internal/pkg/http" +) + +// httpClient wraps http.Client with common configuration. +type httpClient struct { + client *http.Client +} + +// newHTTPClient creates a new httpClient with optional proxy support. +func newHTTPClient(proxyURL string) *httpClient { + cfg := httputil.DefaultConfig() + cfg.Timeout = httputil.AlertingTimeout + cfg.ProxyURL = proxyURL + + return &httpClient{ + client: httputil.NewClient(cfg), + } +} + +// post sends a POST request with JSON body. +func (c *httpClient) post(ctx context.Context, url string, body []byte) error { + return c.doPost(ctx, url, body, "application/json") +} + +// postText sends a POST request with plain text body. +func (c *httpClient) postText(ctx context.Context, url string, text string) error { + return c.doPost(ctx, url, []byte(text), "text/plain") +} + +// doPost sends a POST request with the specified content type. +func (c *httpClient) doPost(ctx context.Context, url string, body []byte, contentType string) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Content-Type", contentType) + + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} diff --git a/internal/pkg/alerting/raw.go b/internal/pkg/alerting/raw.go new file mode 100644 index 000000000..d8ea3046a --- /dev/null +++ b/internal/pkg/alerting/raw.go @@ -0,0 +1,90 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" + "strings" +) + +// RawAlerter sends alerts to a webhook as plain text (default) or structured JSON. +type RawAlerter struct { + webhookURL string + additional string + structured bool + client *httpClient +} + +// NewRawAlerter creates a new RawAlerter. +// If structured is true, sends JSON; otherwise sends plain text. +func NewRawAlerter(webhookURL, proxyURL, additional string, structured bool) *RawAlerter { + return &RawAlerter{ + webhookURL: webhookURL, + additional: additional, + structured: structured, + client: newHTTPClient(proxyURL), + } +} + +// rawMessage is the JSON payload for structured raw webhook alerts. +type rawMessage struct { + Event string `json:"event"` + WorkloadKind string `json:"workloadKind"` + WorkloadName string `json:"workloadName"` + WorkloadNamespace string `json:"workloadNamespace"` + ResourceKind string `json:"resourceKind"` + ResourceName string `json:"resourceName"` + ResourceNamespace string `json:"resourceNamespace"` + Timestamp string `json:"timestamp"` + Additional string `json:"additional,omitempty"` +} + +func (a *RawAlerter) Send(ctx context.Context, message AlertMessage) error { + if a.structured { + return a.sendStructured(ctx, message) + } + return a.sendPlainText(ctx, message) +} + +func (a *RawAlerter) sendStructured(ctx context.Context, message AlertMessage) error { + msg := rawMessage{ + Event: "reload", + WorkloadKind: message.WorkloadKind, + WorkloadName: message.WorkloadName, + WorkloadNamespace: message.WorkloadNamespace, + ResourceKind: message.ResourceKind, + ResourceName: message.ResourceName, + ResourceNamespace: message.ResourceNamespace, + Timestamp: message.Timestamp.Format("2006-01-02T15:04:05Z07:00"), + Additional: a.additional, + } + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling raw message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *RawAlerter) sendPlainText(ctx context.Context, message AlertMessage) error { + text := a.formatMessage(message) + // Strip markdown formatting for plain text + text = strings.ReplaceAll(text, "*", "") + return a.client.postText(ctx, a.webhookURL, text) +} + +func (a *RawAlerter) formatMessage(msg AlertMessage) string { + text := fmt.Sprintf( + "Reloader triggered reload - Workload: %s/%s (%s), Resource: %s/%s (%s), Time: %s", + msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind, + msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind, + msg.Timestamp.Format("2006-01-02 15:04:05 UTC"), + ) + + if a.additional != "" { + text = a.additional + " : " + text + } + + return text +} diff --git a/internal/pkg/alerting/slack.go b/internal/pkg/alerting/slack.go new file mode 100644 index 000000000..68df2ac00 --- /dev/null +++ b/internal/pkg/alerting/slack.go @@ -0,0 +1,128 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// SlackAlerter sends alerts to Slack webhooks. +type SlackAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewSlackAlerter creates a new SlackAlerter. +func NewSlackAlerter(webhookURL, proxyURL, additional string) *SlackAlerter { + return &SlackAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// slackMessage represents a Slack webhook message. +type slackMessage struct { + Username string `json:"username,omitempty"` + IconEmoji string `json:"icon_emoji,omitempty"` + IconURL string `json:"icon_url,omitempty"` + Channel string `json:"channel,omitempty"` + ThreadTimestamp string `json:"thread_ts,omitempty"` + Text string `json:"text,omitempty"` + Attachments []slackAttachment `json:"attachments,omitempty"` + Parse string `json:"parse,omitempty"` + ResponseType string `json:"response_type,omitempty"` + ReplaceOriginal bool `json:"replace_original,omitempty"` + DeleteOriginal bool `json:"delete_original,omitempty"` + ReplyBroadcast bool `json:"reply_broadcast,omitempty"` +} + +// slackAttachment represents a Slack message attachment. +type slackAttachment struct { + Color string `json:"color,omitempty"` + Fallback string `json:"fallback,omitempty"` + + CallbackID string `json:"callback_id,omitempty"` + ID int `json:"id,omitempty"` + + AuthorID string `json:"author_id,omitempty"` + AuthorName string `json:"author_name,omitempty"` + AuthorSubname string `json:"author_subname,omitempty"` + AuthorLink string `json:"author_link,omitempty"` + AuthorIcon string `json:"author_icon,omitempty"` + + Title string `json:"title,omitempty"` + TitleLink string `json:"title_link,omitempty"` + Pretext string `json:"pretext,omitempty"` + Text string `json:"text,omitempty"` + + ImageURL string `json:"image_url,omitempty"` + ThumbURL string `json:"thumb_url,omitempty"` + + ServiceName string `json:"service_name,omitempty"` + ServiceIcon string `json:"service_icon,omitempty"` + FromURL string `json:"from_url,omitempty"` + OriginalURL string `json:"original_url,omitempty"` + + Fields []slackField `json:"fields,omitempty"` + MarkdownIn []string `json:"mrkdwn_in,omitempty"` + + Footer string `json:"footer,omitempty"` + FooterIcon string `json:"footer_icon,omitempty"` + + Actions []slackAction `json:"actions,omitempty"` +} + +// slackField represents a field in a Slack attachment. +type slackField struct { + Title string `json:"title"` + Value string `json:"value"` + Short bool `json:"short"` +} + +// slackAction represents an action button in a Slack attachment. +type slackAction struct { + Type string `json:"type"` + Text string `json:"text"` + URL string `json:"url"` + Style string `json:"style"` +} + +func (a *SlackAlerter) Send(ctx context.Context, message AlertMessage) error { + text := a.formatMessage(message) + msg := slackMessage{ + Attachments: []slackAttachment{ + { + Text: text, + Color: "good", + AuthorName: "Reloader", + }, + }, + } + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling slack message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *SlackAlerter) formatMessage(msg AlertMessage) string { + text := fmt.Sprintf( + "Reloader triggered reload\n"+ + "*Workload:* %s/%s (%s)\n"+ + "*Resource:* %s/%s (%s)\n"+ + "*Time:* %s", + msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind, + msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind, + msg.Timestamp.Format("2006-01-02 15:04:05 UTC"), + ) + + if a.additional != "" { + text = a.additional + "\n" + text + } + + return text +} diff --git a/internal/pkg/alerting/teams.go b/internal/pkg/alerting/teams.go new file mode 100644 index 000000000..99b08d5c8 --- /dev/null +++ b/internal/pkg/alerting/teams.go @@ -0,0 +1,81 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// TeamsAlerter sends alerts to Microsoft Teams webhooks. +type TeamsAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewTeamsAlerter creates a new TeamsAlerter. +func NewTeamsAlerter(webhookURL, proxyURL, additional string) *TeamsAlerter { + return &TeamsAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// teamsMessage represents a Microsoft Teams message card. +type teamsMessage struct { + Type string `json:"@type"` + Context string `json:"@context"` + ThemeColor string `json:"themeColor"` + Summary string `json:"summary"` + Sections []teamsSection `json:"sections"` +} + +type teamsSection struct { + ActivityTitle string `json:"activityTitle"` + ActivitySubtitle string `json:"activitySubtitle,omitempty"` + Facts []teamsFact `json:"facts"` +} + +type teamsFact struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (a *TeamsAlerter) Send(ctx context.Context, message AlertMessage) error { + msg := a.buildMessage(message) + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling teams message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *TeamsAlerter) buildMessage(msg AlertMessage) teamsMessage { + facts := []teamsFact{ + {Name: "Workload", Value: fmt.Sprintf("%s/%s (%s)", msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind)}, + {Name: "Resource", Value: fmt.Sprintf("%s/%s (%s)", msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind)}, + {Name: "Time", Value: msg.Timestamp.Format("2006-01-02 15:04:05 UTC")}, + } + + subtitle := "" + if a.additional != "" { + subtitle = a.additional + } + + return teamsMessage{ + Type: "MessageCard", + Context: "http://schema.org/extensions", + ThemeColor: "0076D7", + Summary: "Reloader triggered reload", + Sections: []teamsSection{ + { + ActivityTitle: "Reloader triggered reload", + ActivitySubtitle: subtitle, + Facts: facts, + }, + }, + } +} diff --git a/internal/pkg/alerts/alert.go b/internal/pkg/alerts/alert.go deleted file mode 100644 index 6b9568ff0..000000000 --- a/internal/pkg/alerts/alert.go +++ /dev/null @@ -1,154 +0,0 @@ -package alert - -import ( - "fmt" - "os" - "strings" - - "github.com/parnurzeal/gorequest" - "github.com/sirupsen/logrus" -) - -type AlertSink string - -const ( - AlertSinkSlack AlertSink = "slack" - AlertSinkTeams AlertSink = "teams" - AlertSinkGoogleChat AlertSink = "gchat" - AlertSinkRaw AlertSink = "raw" -) - -// function to send alert msg to webhook service -func SendWebhookAlert(msg string) { - webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL") - if !ok { - logrus.Error("ALERT_WEBHOOK_URL env variable not provided") - return - } - webhook_url = strings.TrimSpace(webhook_url) - alert_sink := os.Getenv("ALERT_SINK") - alert_sink = strings.ToLower(strings.TrimSpace(alert_sink)) - - // Provision to add Proxy to reach webhook server if required - webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY") - webhook_proxy = strings.TrimSpace(webhook_proxy) - - // Provision to add Additional information in the alert. e.g ClusterName - alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO") - if ok { - alert_additional_info = strings.TrimSpace(alert_additional_info) - msg = fmt.Sprintf("%s : %s", alert_additional_info, msg) - } - - switch AlertSink(alert_sink) { - case AlertSinkSlack: - sendSlackAlert(webhook_url, webhook_proxy, msg) - case AlertSinkTeams: - sendTeamsAlert(webhook_url, webhook_proxy, msg) - case AlertSinkGoogleChat: - sendGoogleChatAlert(webhook_url, webhook_proxy, msg) - default: - msg = strings.ReplaceAll(msg, "*", "") - sendRawWebhookAlert(webhook_url, webhook_proxy, msg) - } -} - -// function to handle server redirection -func redirectPolicy(req gorequest.Request, via []gorequest.Request) error { - return fmt.Errorf("incorrect token (redirection)") -} - -// function to send alert to slack -func sendSlackAlert(webhookUrl string, proxy string, msg string) []error { - attachment := Attachment{ - Text: msg, - Color: "good", - AuthorName: "Reloader", - } - - payload := WebhookMessage{ - Attachments: []Attachment{attachment}, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(payload). - End() - - if err != nil { - return err - } - if resp.StatusCode >= 400 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to Microsoft Teams webhook -func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error { - attachment := Attachment{ - Text: msg, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(attachment). - End() - - if err != nil { - return err - } - if resp.StatusCode != 200 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to Google Chat webhook -func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error { - payload := map[string]interface{}{ - "text": msg, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(payload). - End() - - if err != nil { - return err - } - if resp.StatusCode != 200 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to webhook service as text -func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error { - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - Type("text"). - RedirectPolicy(redirectPolicy). - Send(msg). - End() - - if err != nil { - return err - } - if resp.StatusCode >= 400 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} diff --git a/internal/pkg/alerts/slack_alert.go b/internal/pkg/alerts/slack_alert.go deleted file mode 100644 index a21727a25..000000000 --- a/internal/pkg/alerts/slack_alert.go +++ /dev/null @@ -1,61 +0,0 @@ -package alert - -type WebhookMessage struct { - Username string `json:"username,omitempty"` - IconEmoji string `json:"icon_emoji,omitempty"` - IconURL string `json:"icon_url,omitempty"` - Channel string `json:"channel,omitempty"` - ThreadTimestamp string `json:"thread_ts,omitempty"` - Text string `json:"text,omitempty"` - Attachments []Attachment `json:"attachments,omitempty"` - Parse string `json:"parse,omitempty"` - ResponseType string `json:"response_type,omitempty"` - ReplaceOriginal bool `json:"replace_original,omitempty"` - DeleteOriginal bool `json:"delete_original,omitempty"` - ReplyBroadcast bool `json:"reply_broadcast,omitempty"` -} - -type Attachment struct { - Color string `json:"color,omitempty"` - Fallback string `json:"fallback,omitempty"` - - CallbackID string `json:"callback_id,omitempty"` - ID int `json:"id,omitempty"` - - AuthorID string `json:"author_id,omitempty"` - AuthorName string `json:"author_name,omitempty"` - AuthorSubname string `json:"author_subname,omitempty"` - AuthorLink string `json:"author_link,omitempty"` - AuthorIcon string `json:"author_icon,omitempty"` - - Title string `json:"title,omitempty"` - TitleLink string `json:"title_link,omitempty"` - Pretext string `json:"pretext,omitempty"` - Text string `json:"text,omitempty"` - - ImageURL string `json:"image_url,omitempty"` - ThumbURL string `json:"thumb_url,omitempty"` - - ServiceName string `json:"service_name,omitempty"` - ServiceIcon string `json:"service_icon,omitempty"` - FromURL string `json:"from_url,omitempty"` - OriginalURL string `json:"original_url,omitempty"` - - MarkdownIn []string `json:"mrkdwn_in,omitempty"` - - Footer string `json:"footer,omitempty"` - FooterIcon string `json:"footer_icon,omitempty"` -} - -type Field struct { - Title string `json:"title"` - Value string `json:"value"` - Short bool `json:"short"` -} - -type Action struct { - Type string `json:"type"` - Text string `json:"text"` - Url string `json:"url"` - Style string `json:"style"` -} diff --git a/internal/pkg/app/app.go b/internal/pkg/app/app.go deleted file mode 100644 index 8d09188fc..000000000 --- a/internal/pkg/app/app.go +++ /dev/null @@ -1,9 +0,0 @@ -package app - -import "github.com/stakater/Reloader/internal/pkg/cmd" - -// Run runs the command -func Run() error { - cmd := cmd.NewReloaderCommand() - return cmd.Execute() -} diff --git a/internal/pkg/callbacks/rolling_upgrade.go b/internal/pkg/callbacks/rolling_upgrade.go deleted file mode 100644 index 13e5a63cd..000000000 --- a/internal/pkg/callbacks/rolling_upgrade.go +++ /dev/null @@ -1,579 +0,0 @@ -package callbacks - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - - "maps" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" -) - -// ItemFunc is a generic function to return a specific resource in given namespace -type ItemFunc func(kube.Clients, string, string) (runtime.Object, error) - -// ItemsFunc is a generic function to return a specific resource array in given namespace -type ItemsFunc func(kube.Clients, string) []runtime.Object - -// ContainersFunc is a generic func to return containers -type ContainersFunc func(runtime.Object) []v1.Container - -// InitContainersFunc is a generic func to return containers -type InitContainersFunc func(runtime.Object) []v1.Container - -// VolumesFunc is a generic func to return volumes -type VolumesFunc func(runtime.Object) []v1.Volume - -// UpdateFunc performs the resource update -type UpdateFunc func(kube.Clients, string, runtime.Object) error - -// PatchFunc performs the resource patch -type PatchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error - -// PatchTemplateFunc is a generic func to return strategic merge JSON patch template -type PatchTemplatesFunc func() PatchTemplates - -// AnnotationsFunc is a generic func to return annotations -type AnnotationsFunc func(runtime.Object) map[string]string - -// PodAnnotationsFunc is a generic func to return annotations -type PodAnnotationsFunc func(runtime.Object) map[string]string - -// RollingUpgradeFuncs contains generic functions to perform rolling upgrade -type RollingUpgradeFuncs struct { - ItemFunc ItemFunc - ItemsFunc ItemsFunc - AnnotationsFunc AnnotationsFunc - PodAnnotationsFunc PodAnnotationsFunc - ContainersFunc ContainersFunc - ContainerPatchPathFunc ContainersFunc - InitContainersFunc InitContainersFunc - UpdateFunc UpdateFunc - PatchFunc PatchFunc - PatchTemplatesFunc PatchTemplatesFunc - VolumesFunc VolumesFunc - ResourceType string - SupportsPatch bool -} - -// PatchTemplates contains merge JSON patch templates -type PatchTemplates struct { - AnnotationTemplate string - EnvVarTemplate string - DeleteEnvVarTemplate string -} - -// GetDeploymentItem returns the deployment in given namespace -func GetDeploymentItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - deployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get deployment %v", err) - return nil, err - } - - if deployment.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - deployment.Spec.Template.Annotations = annotations - } - - return deployment, nil -} - -// GetDeploymentItems returns the deployments in given namespace -func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object { - deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list deployments %v", err) - } - - items := make([]runtime.Object, len(deployments.Items)) - // Ensure we always have pod annotations to add to - for i, v := range deployments.Items { - if v.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - deployments.Items[i].Spec.Template.Annotations = annotations - } - items[i] = &deployments.Items[i] - } - - return items -} - -// GetCronJobItem returns the job in given namespace -func GetCronJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - cronjob, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get cronjob %v", err) - return nil, err - } - - return cronjob, nil -} - -// GetCronJobItems returns the jobs in given namespace -func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object { - cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list cronjobs %v", err) - } - - items := make([]runtime.Object, len(cronjobs.Items)) - // Ensure we always have pod annotations to add to - for i, v := range cronjobs.Items { - if v.Spec.JobTemplate.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - cronjobs.Items[i].Spec.JobTemplate.Spec.Template.Annotations = annotations - } - items[i] = &cronjobs.Items[i] - } - - return items -} - -// GetJobItem returns the job in given namespace -func GetJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - job, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get job %v", err) - return nil, err - } - - return job, nil -} - -// GetJobItems returns the jobs in given namespace -func GetJobItems(clients kube.Clients, namespace string) []runtime.Object { - jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list jobs %v", err) - } - - items := make([]runtime.Object, len(jobs.Items)) - // Ensure we always have pod annotations to add to - for i, v := range jobs.Items { - if v.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - jobs.Items[i].Spec.Template.Annotations = annotations - } - items[i] = &jobs.Items[i] - } - - return items -} - -// GetDaemonSetItem returns the daemonSet in given namespace -func GetDaemonSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - daemonSet, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get daemonSet %v", err) - return nil, err - } - - return daemonSet, nil -} - -// GetDaemonSetItems returns the daemonSets in given namespace -func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object { - daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list daemonSets %v", err) - } - - items := make([]runtime.Object, len(daemonSets.Items)) - // Ensure we always have pod annotations to add to - for i, v := range daemonSets.Items { - if v.Spec.Template.Annotations == nil { - daemonSets.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &daemonSets.Items[i] - } - - return items -} - -// GetStatefulSetItem returns the statefulSet in given namespace -func GetStatefulSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - statefulSet, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get statefulSet %v", err) - return nil, err - } - - return statefulSet, nil -} - -// GetStatefulSetItems returns the statefulSets in given namespace -func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object { - statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list statefulSets %v", err) - } - - items := make([]runtime.Object, len(statefulSets.Items)) - // Ensure we always have pod annotations to add to - for i, v := range statefulSets.Items { - if v.Spec.Template.Annotations == nil { - statefulSets.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &statefulSets.Items[i] - } - - return items -} - -// GetRolloutItem returns the rollout in given namespace -func GetRolloutItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - rollout, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get Rollout %v", err) - return nil, err - } - - return rollout, nil -} - -// GetRolloutItems returns the rollouts in given namespace -func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object { - rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list Rollouts %v", err) - } - - items := make([]runtime.Object, len(rollouts.Items)) - // Ensure we always have pod annotations to add to - for i, v := range rollouts.Items { - if v.Spec.Template.Annotations == nil { - rollouts.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &rollouts.Items[i] - } - - return items -} - -// GetDeploymentAnnotations returns the annotations of given deployment -func GetDeploymentAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Annotations == nil { - item.(*appsv1.Deployment).Annotations = make(map[string]string) - } - return item.(*appsv1.Deployment).Annotations -} - -// GetCronJobAnnotations returns the annotations of given cronjob -func GetCronJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Annotations == nil { - item.(*batchv1.CronJob).Annotations = make(map[string]string) - } - return item.(*batchv1.CronJob).Annotations -} - -// GetJobAnnotations returns the annotations of given job -func GetJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Annotations == nil { - item.(*batchv1.Job).Annotations = make(map[string]string) - } - return item.(*batchv1.Job).Annotations -} - -// GetDaemonSetAnnotations returns the annotations of given daemonSet -func GetDaemonSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Annotations == nil { - item.(*appsv1.DaemonSet).Annotations = make(map[string]string) - } - return item.(*appsv1.DaemonSet).Annotations -} - -// GetStatefulSetAnnotations returns the annotations of given statefulSet -func GetStatefulSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Annotations == nil { - item.(*appsv1.StatefulSet).Annotations = make(map[string]string) - } - return item.(*appsv1.StatefulSet).Annotations -} - -// GetRolloutAnnotations returns the annotations of given rollout -func GetRolloutAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string) - } - return item.(*argorolloutv1alpha1.Rollout).Annotations -} - -// GetDeploymentPodAnnotations returns the pod's annotations of given deployment -func GetDeploymentPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Spec.Template.Annotations == nil { - item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.Deployment).Spec.Template.Annotations -} - -// GetCronJobPodAnnotations returns the pod's annotations of given cronjob -func GetCronJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil { - item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) - } - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations -} - -// GetJobPodAnnotations returns the pod's annotations of given job -func GetJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Spec.Template.Annotations == nil { - item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string) - } - return item.(*batchv1.Job).Spec.Template.Annotations -} - -// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet -func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil { - item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.DaemonSet).Spec.Template.Annotations -} - -// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet -func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil { - item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.StatefulSet).Spec.Template.Annotations -} - -// GetRolloutPodAnnotations returns the pod's annotations of given rollout -func GetRolloutPodAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string) - } - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations -} - -// GetDeploymentContainers returns the containers of given deployment -func GetDeploymentContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.Containers -} - -// GetCronJobContainers returns the containers of given cronjob -func GetCronJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers -} - -// GetJobContainers returns the containers of given job -func GetJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.Containers -} - -// GetDaemonSetContainers returns the containers of given daemonSet -func GetDaemonSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers -} - -// GetStatefulSetContainers returns the containers of given statefulSet -func GetStatefulSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers -} - -// GetRolloutContainers returns the containers of given rollout -func GetRolloutContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers -} - -// GetDeploymentInitContainers returns the containers of given deployment -func GetDeploymentInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers -} - -// GetCronJobInitContainers returns the containers of given cronjob -func GetCronJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers -} - -// GetJobInitContainers returns the containers of given job -func GetJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.InitContainers -} - -// GetDaemonSetInitContainers returns the containers of given daemonSet -func GetDaemonSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers -} - -// GetStatefulSetInitContainers returns the containers of given statefulSet -func GetStatefulSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers -} - -// GetRolloutInitContainers returns the containers of given rollout -func GetRolloutInitContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers -} - -// GetPatchTemplates returns patch templates -func GetPatchTemplates() PatchTemplates { - return PatchTemplates{ - AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, // strategic merge patch - EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, // strategic merge patch - DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, // JSON patch - } -} - -// UpdateDeployment performs rolling upgrade on deployment -func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error { - deployment := resource.(*appsv1.Deployment) - _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -// PatchDeployment performs rolling upgrade on deployment -func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - deployment := resource.(*appsv1.Deployment) - _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// CreateJobFromCronjob performs rolling upgrade on cronjob -func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error { - cronJob := resource.(*batchv1.CronJob) - - annotations := make(map[string]string) - annotations["cronjob.kubernetes.io/instantiate"] = "manual" - maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) - - job := &batchv1.Job{ - ObjectMeta: meta_v1.ObjectMeta{ - GenerateName: cronJob.Name + "-", - Namespace: cronJob.Namespace, - Annotations: annotations, - Labels: cronJob.Spec.JobTemplate.Labels, - OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))}, - }, - Spec: cronJob.Spec.JobTemplate.Spec, - } - _, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: CronJob") -} - -// ReCreateJobFromjob performs rolling upgrade on job -func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error { - oldJob := resource.(*batchv1.Job) - job := oldJob.DeepCopy() - - // Delete the old job - policy := meta_v1.DeletePropagationBackground - err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy}) - if err != nil { - return err - } - - // Remove fields that should not be specified when creating a new Job - job.ResourceVersion = "" - job.UID = "" - job.CreationTimestamp = meta_v1.Time{} - job.Status = batchv1.JobStatus{} - - // Remove problematic labels - delete(job.Spec.Template.Labels, "controller-uid") - delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel) - delete(job.Spec.Template.Labels, batchv1.JobNameLabel) - delete(job.Spec.Template.Labels, "job-name") - - // Remove the selector to allow it to be auto-generated - job.Spec.Selector = nil - - // Create the new job with same spec - _, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: Job") -} - -// UpdateDaemonSet performs rolling upgrade on daemonSet -func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error { - daemonSet := resource.(*appsv1.DaemonSet) - _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - daemonSet := resource.(*appsv1.DaemonSet) - _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// UpdateStatefulSet performs rolling upgrade on statefulSet -func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error { - statefulSet := resource.(*appsv1.StatefulSet) - _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - statefulSet := resource.(*appsv1.StatefulSet) - _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// UpdateRollout performs rolling upgrade on rollout -func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error { - rollout := resource.(*argorolloutv1alpha1.Rollout) - strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation] - var err error - switch options.ToArgoRolloutStrategy(strategy) { - case options.RestartStrategy: - _, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"}) - case options.RolloutStrategy: - _, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - } - return err -} - -func PatchRollout(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: Rollout") -} - -// GetDeploymentVolumes returns the Volumes of given deployment -func GetDeploymentVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes -} - -// GetCronJobVolumes returns the Volumes of given cronjob -func GetCronJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes -} - -// GetJobVolumes returns the Volumes of given job -func GetJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.Job).Spec.Template.Spec.Volumes -} - -// GetDaemonSetVolumes returns the Volumes of given daemonSet -func GetDaemonSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes -} - -// GetStatefulSetVolumes returns the Volumes of given statefulSet -func GetStatefulSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes -} - -// GetRolloutVolumes returns the Volumes of given rollout -func GetRolloutVolumes(item runtime.Object) []v1.Volume { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes -} diff --git a/internal/pkg/callbacks/rolling_upgrade_test.go b/internal/pkg/callbacks/rolling_upgrade_test.go deleted file mode 100644 index 452867f47..000000000 --- a/internal/pkg/callbacks/rolling_upgrade_test.go +++ /dev/null @@ -1,773 +0,0 @@ -package callbacks_test - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/fake" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" - patchtypes "k8s.io/apimachinery/pkg/types" - - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/kube" -) - -var ( - clients = setupTestClients() -) - -type testFixtures struct { - defaultContainers []v1.Container - defaultInitContainers []v1.Container - defaultVolumes []v1.Volume - namespace string -} - -func newTestFixtures() testFixtures { - return testFixtures{ - defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}}, - defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}}, - defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}}, - namespace: "default", - } -} - -func setupTestClients() kube.Clients { - return kube.Clients{ - KubernetesClient: fake.NewSimpleClientset(), - ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(), - } -} - -// TestUpdateRollout test update rollout strategy annotation -func TestUpdateRollout(t *testing.T) { - namespace := "test-ns" - - cases := map[string]struct { - name string - strategy string - isRestart bool - }{ - "test-without-strategy": { - name: "defaults to rollout strategy", - strategy: "", - isRestart: false, - }, - "test-with-restart-strategy": { - name: "triggers a restart strategy", - strategy: "restart", - isRestart: true, - }, - "test-with-rollout-strategy": { - name: "triggers a rollout strategy", - strategy: "rollout", - isRestart: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - rollout, err := testutil.CreateRollout( - clients.ArgoRolloutClient, name, namespace, - map[string]string{options.RolloutStrategyAnnotation: tc.strategy}, - ) - if err != nil { - t.Errorf("creating rollout: %v", err) - } - modifiedChan := watchRollout(rollout.Name, namespace) - - err = callbacks.UpdateRollout(clients, namespace, rollout) - if err != nil { - t.Errorf("updating rollout: %v", err) - } - rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts( - namespace).Get(context.TODO(), rollout.Name, metav1.GetOptions{}) - - if err != nil { - t.Errorf("getting rollout: %v", err) - } - if isRestartStrategy(rollout) == tc.isRestart { - t.Errorf("Should not be a restart strategy") - } - select { - case <-modifiedChan: - // object has been modified - case <-time.After(1 * time.Second): - t.Errorf("Rollout has not been updated") - } - }) - } -} - -func TestPatchRollout(t *testing.T) { - namespace := "test-ns" - rollout := testutil.GetRollout(namespace, "test", map[string]string{options.RolloutStrategyAnnotation: ""}) - err := callbacks.PatchRollout(clients, namespace, rollout, patchtypes.StrategicMergePatchType, []byte(`{"spec": {}}`)) - assert.EqualError(t, err, "not supported patching: Rollout") -} - -func TestResourceItem(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - getItemFunc func(kube.Clients, string, string) (runtime.Object, error) - deleteFunc func(kube.Clients, string, string) error - }{ - { - name: "Deployment", - createFunc: createTestDeploymentWithAnnotations, - getItemFunc: callbacks.GetDeploymentItem, - deleteFunc: deleteTestDeployment, - }, - { - name: "CronJob", - createFunc: createTestCronJobWithAnnotations, - getItemFunc: callbacks.GetCronJobItem, - deleteFunc: deleteTestCronJob, - }, - { - name: "Job", - createFunc: createTestJobWithAnnotations, - getItemFunc: callbacks.GetJobItem, - deleteFunc: deleteTestJob, - }, - { - name: "DaemonSet", - createFunc: createTestDaemonSetWithAnnotations, - getItemFunc: callbacks.GetDaemonSetItem, - deleteFunc: deleteTestDaemonSet, - }, - { - name: "StatefulSet", - createFunc: createTestStatefulSetWithAnnotations, - getItemFunc: callbacks.GetStatefulSetItem, - deleteFunc: deleteTestStatefulSet, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - _, err = tt.getItemFunc(clients, accessor.GetName(), fixtures.namespace) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestResourceItems(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string) error - getItemsFunc func(kube.Clients, string) []runtime.Object - deleteFunc func(kube.Clients, string) error - expectedCount int - }{ - { - name: "Deployments", - createFunc: createTestDeployments, - getItemsFunc: callbacks.GetDeploymentItems, - deleteFunc: deleteTestDeployments, - expectedCount: 2, - }, - { - name: "CronJobs", - createFunc: createTestCronJobs, - getItemsFunc: callbacks.GetCronJobItems, - deleteFunc: deleteTestCronJobs, - expectedCount: 2, - }, - { - name: "Jobs", - createFunc: createTestJobs, - getItemsFunc: callbacks.GetJobItems, - deleteFunc: deleteTestJobs, - expectedCount: 2, - }, - { - name: "DaemonSets", - createFunc: createTestDaemonSets, - getItemsFunc: callbacks.GetDaemonSetItems, - deleteFunc: deleteTestDaemonSets, - expectedCount: 2, - }, - { - name: "StatefulSets", - createFunc: createTestStatefulSets, - getItemsFunc: callbacks.GetStatefulSetItems, - deleteFunc: deleteTestStatefulSets, - expectedCount: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.createFunc(clients, fixtures.namespace) - assert.NoError(t, err) - - items := tt.getItemsFunc(clients, fixtures.namespace) - assert.Equal(t, tt.expectedCount, len(items)) - }) - } -} - -func TestGetAnnotations(t *testing.T) { - testAnnotations := map[string]string{"version": "1"} - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) map[string]string - }{ - {"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations}, - {"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations}, - {"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations}, - {"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations}, - {"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations}, - {"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, testAnnotations, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetPodAnnotations(t *testing.T) { - testAnnotations := map[string]string{"version": "1"} - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) map[string]string - }{ - {"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations}, - {"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations}, - {"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations}, - {"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations}, - {"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations}, - {"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, testAnnotations, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetContainers(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Container - }{ - {"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers}, - {"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers}, - {"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers}, - {"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers}, - {"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers}, - {"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetInitContainers(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Container - }{ - {"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers}, - {"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers}, - {"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers}, - {"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers}, - {"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers}, - {"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource)) - }) - } -} - -func TestUpdateResources(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - updateFunc func(kube.Clients, string, runtime.Object) error - deleteFunc func(kube.Clients, string, string) error - }{ - {"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment, deleteTestDeployment}, - {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet, deleteTestDaemonSet}, - {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet, deleteTestStatefulSet}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = tt.updateFunc(clients, fixtures.namespace, resource) - assert.NoError(t, err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestPatchResources(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - patchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error - deleteFunc func(kube.Clients, string, string) error - assertFunc func(err error) - }{ - {"Deployment", createTestDeploymentWithAnnotations, callbacks.PatchDeployment, deleteTestDeployment, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).Annotations["test"]) - }}, - {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).Annotations["test"]) - }}, - {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).Annotations["test"]) - }}, - {"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) { - assert.EqualError(t, err, "not supported patching: CronJob") - }}, - {"Job", createTestJobWithAnnotations, callbacks.PatchJob, deleteTestJob, func(err error) { - assert.EqualError(t, err, "not supported patching: Job") - }}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = tt.patchFunc(clients, fixtures.namespace, resource, patchtypes.StrategicMergePatchType, []byte(`{"metadata":{"annotations":{"test":"test"}}}`)) - tt.assertFunc(err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestCreateJobFromCronjob(t *testing.T) { - fixtures := newTestFixtures() - - runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - cronJob := runtimeObj.(*batchv1.CronJob) - err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob) - assert.NoError(t, err) - - jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - - ownerFound := false - for _, job := range jobList.Items { - if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) { - ownerFound = true - break - } - } - assert.Truef(t, ownerFound, "Missing CronJob owner reference") - - err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name) - assert.NoError(t, err) -} - -func TestReCreateJobFromJob(t *testing.T) { - fixtures := newTestFixtures() - - job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job)) - assert.NoError(t, err) - - err = deleteTestJob(clients, fixtures.namespace, "test-job") - assert.NoError(t, err) -} - -func TestGetVolumes(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Volume - }{ - {"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes}, - {"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes}, - {"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes}, - {"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes}, - {"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource)) - }) - } -} - -func TesGetPatchTemplateAnnotation(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.AnnotationTemplate) - assert.Equal(t, 2, strings.Count(templates.AnnotationTemplate, "%s")) -} - -func TestGetPatchTemplateEnvVar(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.EnvVarTemplate) - assert.Equal(t, 3, strings.Count(templates.EnvVarTemplate, "%s")) -} - -func TestGetPatchDeleteTemplateEnvVar(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.DeleteEnvVarTemplate) - assert.Equal(t, 2, strings.Count(templates.DeleteEnvVarTemplate, "%d")) -} - -// Helper functions - -func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool { - return rollout.Spec.RestartAt == nil -} - -func watchRollout(name, namespace string) chan interface{} { - timeOut := int64(1) - modifiedChan := make(chan interface{}) - watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), metav1.ListOptions{TimeoutSeconds: &timeOut}) - go watchModified(watcher, name, modifiedChan) - return modifiedChan -} - -func watchModified(watcher watch.Interface, name string, modifiedChan chan interface{}) { - for event := range watcher.ResultChan() { - item := event.Object.(*argorolloutv1alpha1.Rollout) - if item.Name == name { - switch event.Type { - case watch.Modified: - modifiedChan <- nil - } - return - } - } -} - -func createTestDeployments(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestDeployments(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteDeployment(clients.KubernetesClient, namespace, fmt.Sprintf("test-deployment-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestCronJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestCronJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteCronJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-cron-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-job-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestDaemonSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestDaemonSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-daemonset-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestStatefulSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestStatefulSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-statefulset-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Annotations = annotations - case *appsv1.DaemonSet: - v.Spec.Template.Annotations = annotations - case *appsv1.StatefulSet: - v.Spec.Template.Annotations = annotations - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Annotations = annotations - case *batchv1.Job: - v.Spec.Template.Annotations = annotations - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Annotations = annotations - } - return obj -} - -func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.Containers = containers - case *appsv1.DaemonSet: - v.Spec.Template.Spec.Containers = containers - case *appsv1.StatefulSet: - v.Spec.Template.Spec.Containers = containers - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers - case *batchv1.Job: - v.Spec.Template.Spec.Containers = containers - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Spec.Containers = containers - } - return obj -} - -func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.InitContainers = initContainers - case *appsv1.DaemonSet: - v.Spec.Template.Spec.InitContainers = initContainers - case *appsv1.StatefulSet: - v.Spec.Template.Spec.InitContainers = initContainers - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers - case *batchv1.Job: - v.Spec.Template.Spec.InitContainers = initContainers - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Spec.InitContainers = initContainers - } - return obj -} - -func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.Volumes = volumes - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes - case *batchv1.Job: - v.Spec.Template.Spec.Volumes = volumes - case *appsv1.DaemonSet: - v.Spec.Template.Spec.Volumes = volumes - case *appsv1.StatefulSet: - v.Spec.Template.Spec.Volumes = volumes - } - return obj -} - -func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) -} - -func deleteTestDeployment(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - daemonSet := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-daemonset", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{}) -} - -func deleteTestDaemonSet(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - statefulSet := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefulset", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) -} - -func deleteTestStatefulSet(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - cronJob := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cronjob", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{}) -} - -func deleteTestCronJob(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-job", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) -} - -func deleteTestJob(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool { - for _, ownerRef := range ownerRefs { - if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name { - return true - } - } - return false -} diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go deleted file mode 100644 index f54d75717..000000000 --- a/internal/pkg/cmd/reloader.go +++ /dev/null @@ -1,209 +0,0 @@ -package cmd - -import ( - "context" - "errors" - "fmt" - "net/http" - _ "net/http/pprof" - "os" - "strings" - - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/leadership" - - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" -) - -// NewReloaderCommand starts the reloader controller -func NewReloaderCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "reloader", - Short: "A watcher for your Kubernetes cluster", - PreRunE: validateFlags, - Run: startReloader, - } - - // options - util.ConfigureReloaderFlags(cmd) - - return cmd -} - -func validateFlags(*cobra.Command, []string) error { - // Ensure the reload strategy is one of the following... - var validReloadStrategy bool - valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy} - for _, s := range valid { - if s == options.ReloadStrategy { - validReloadStrategy = true - } - } - - if !validReloadStrategy { - err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", ")) - return errors.New(err) - } - - // Validate that HA options are correct - if options.EnableHA { - if err := validateHAEnvs(); err != nil { - return err - } - } - - return nil -} - -func configureLogging(logFormat, logLevel string) error { - switch logFormat { - case "json": - logrus.SetFormatter(&logrus.JSONFormatter{}) - default: - // just let the library use default on empty string. - if logFormat != "" { - return fmt.Errorf("unsupported logging formatter: %q", logFormat) - } - } - // set log level - level, err := logrus.ParseLevel(logLevel) - if err != nil { - return err - } - logrus.SetLevel(level) - return nil -} - -func validateHAEnvs() error { - podName, podNamespace := getHAEnvs() - - if podName == "" { - return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv) - } - if podNamespace == "" { - return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv) - } - return nil -} - -func getHAEnvs() (string, string) { - podName := os.Getenv(constants.PodNameEnv) - podNamespace := os.Getenv(constants.PodNamespaceEnv) - - return podName, podNamespace -} - -func startReloader(cmd *cobra.Command, args []string) { - common.GetCommandLineOptions() - err := configureLogging(options.LogFormat, options.LogLevel) - if err != nil { - logrus.Warn(err) - } - - logrus.Info("Starting Reloader") - isGlobal := false - currentNamespace := os.Getenv("KUBERNETES_NAMESPACE") - if len(currentNamespace) == 0 { - currentNamespace = v1.NamespaceAll - isGlobal = true - logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.") - } - - // create the clientset - clientset, err := kube.GetKubernetesClient() - if err != nil { - logrus.Fatal(err) - } - - ignoredResourcesList, err := util.GetIgnoredResourcesList() - if err != nil { - logrus.Fatal(err) - } - - ignoredNamespacesList := options.NamespacesToIgnore - namespaceLabelSelector := "" - - if isGlobal { - namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors) - if err != nil { - logrus.Fatal(err) - } - } - - resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors) - if err != nil { - logrus.Fatal(err) - } - - if len(namespaceLabelSelector) > 0 { - logrus.Warnf("namespace-selector is set, will only detect changes in namespaces with these labels: %s.", namespaceLabelSelector) - } - - if len(resourceLabelSelector) > 0 { - logrus.Warnf("resource-label-selector is set, will only detect changes on resources with these labels: %s.", resourceLabelSelector) - } - - if options.WebhookUrl != "" { - logrus.Warnf("webhook-url is set, will only send webhook, no resources will be reloaded") - } - - collectors := metrics.SetupPrometheusEndpoint() - - var controllers []*controller.Controller - for k := range kube.ResourceMap { - if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { - continue - } - - c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, resourceLabelSelector, collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - controllers = append(controllers, c) - - // If HA is enabled we only run the controller when - if options.EnableHA { - continue - } - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - logrus.Infof("Starting Controller to watch resource type: %s", k) - go c.Run(1, stop) - } - - // Run leadership election - if options.EnableHA { - podName, podNamespace := getHAEnvs() - lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers) - } - - common.PublishMetaInfoConfigmap(clientset) - - if options.EnablePProf { - go startPProfServer() - } - - leadership.SetupLivenessEndpoint() - logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil)) -} - -func startPProfServer() { - logrus.Infof("Starting pprof server on %s", options.PProfAddr) - if err := http.ListenAndServe(options.PProfAddr, nil); err != nil { - logrus.Errorf("Failed to start pprof server: %v", err) - } -} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go new file mode 100644 index 000000000..c33b78adc --- /dev/null +++ b/internal/pkg/config/config.go @@ -0,0 +1,189 @@ +// Package config provides configuration management for Reloader. +package config + +import ( + "strings" + "time" + + "k8s.io/apimachinery/pkg/labels" +) + +// ReloadStrategy defines how Reloader triggers workload restarts. +type ReloadStrategy string + +const ( + ReloadStrategyEnvVars ReloadStrategy = "env-vars" + ReloadStrategyAnnotations ReloadStrategy = "annotations" +) + +// ArgoRolloutStrategy defines the strategy for Argo Rollout updates. +type ArgoRolloutStrategy string + +const ( + ArgoRolloutStrategyRestart ArgoRolloutStrategy = "restart" + ArgoRolloutStrategyRollout ArgoRolloutStrategy = "rollout" +) + +// Config holds all configuration for Reloader. +type Config struct { + Annotations AnnotationConfig `json:"annotations"` + AutoReloadAll bool `json:"autoReloadAll"` + ReloadStrategy ReloadStrategy `json:"reloadStrategy"` + ArgoRolloutsEnabled bool `json:"argoRolloutsEnabled"` + ArgoRolloutStrategy ArgoRolloutStrategy `json:"argoRolloutStrategy"` + DeploymentConfigEnabled bool `json:"deploymentConfigEnabled"` + ReloadOnCreate bool `json:"reloadOnCreate"` + ReloadOnDelete bool `json:"reloadOnDelete"` + SyncAfterRestart bool `json:"syncAfterRestart"` + EnableHA bool `json:"enableHA"` + WebhookURL string `json:"webhookUrl,omitempty"` + + IgnoredResources []string `json:"ignoredResources,omitempty"` + IgnoredWorkloads []string `json:"ignoredWorkloads,omitempty"` + IgnoredNamespaces []string `json:"ignoredNamespaces,omitempty"` + NamespaceSelectors []labels.Selector `json:"-"` + ResourceSelectors []labels.Selector `json:"-"` + NamespaceSelectorStrings []string `json:"namespaceSelectors,omitempty"` + ResourceSelectorStrings []string `json:"resourceSelectors,omitempty"` + + LogFormat string `json:"logFormat,omitempty"` + LogLevel string `json:"logLevel"` + MetricsAddr string `json:"metricsAddr"` + HealthAddr string `json:"healthAddr"` + EnablePProf bool `json:"enablePProf"` + PProfAddr string `json:"pprofAddr,omitempty"` + + Alerting AlertingConfig `json:"alerting"` + LeaderElection LeaderElectionConfig `json:"leaderElection"` + WatchedNamespace string `json:"watchedNamespace,omitempty"` + SyncPeriod time.Duration `json:"syncPeriod"` +} + +// AnnotationConfig holds customizable annotation keys. +type AnnotationConfig struct { + Prefix string `json:"prefix"` + Auto string `json:"auto"` + ConfigmapAuto string `json:"configmapAuto"` + SecretAuto string `json:"secretAuto"` + ConfigmapReload string `json:"configmapReload"` + SecretReload string `json:"secretReload"` + ConfigmapExclude string `json:"configmapExclude"` + SecretExclude string `json:"secretExclude"` + Ignore string `json:"ignore"` + Search string `json:"search"` + Match string `json:"match"` + RolloutStrategy string `json:"rolloutStrategy"` + PausePeriod string `json:"pausePeriod"` + PausedAt string `json:"pausedAt"` + LastReloadedFrom string `json:"lastReloadedFrom"` +} + +// AlertingConfig holds configuration for alerting integrations. +type AlertingConfig struct { + Enabled bool `json:"enabled"` + WebhookURL string `json:"webhookUrl,omitempty"` + Sink string `json:"sink,omitempty"` + Proxy string `json:"proxy,omitempty"` + Additional string `json:"additional,omitempty"` + Structured bool `json:"structured,omitempty"` // For raw sink: send structured JSON instead of plain text +} + +// LeaderElectionConfig holds configuration for leader election. +type LeaderElectionConfig struct { + LockName string `json:"lockName"` + Namespace string `json:"namespace,omitempty"` + Identity string `json:"identity,omitempty"` + LeaseDuration time.Duration `json:"leaseDuration"` + RenewDeadline time.Duration `json:"renewDeadline"` + RetryPeriod time.Duration `json:"retryPeriod"` + ReleaseOnCancel bool `json:"releaseOnCancel"` +} + +// NewDefault creates a Config with default values. +func NewDefault() *Config { + return &Config{ + Annotations: DefaultAnnotations(), + AutoReloadAll: false, + ReloadStrategy: ReloadStrategyEnvVars, + ArgoRolloutsEnabled: false, + ArgoRolloutStrategy: ArgoRolloutStrategyRollout, + DeploymentConfigEnabled: false, + ReloadOnCreate: false, + ReloadOnDelete: false, + SyncAfterRestart: false, + EnableHA: false, + WebhookURL: "", + IgnoredResources: []string{}, + IgnoredWorkloads: []string{}, + IgnoredNamespaces: []string{}, + NamespaceSelectors: []labels.Selector{}, + ResourceSelectors: []labels.Selector{}, + LogFormat: "", + LogLevel: "info", + MetricsAddr: ":9090", + HealthAddr: ":8080", + EnablePProf: false, + PProfAddr: ":6060", + Alerting: AlertingConfig{}, + LeaderElection: LeaderElectionConfig{ + LockName: "reloader-leader-election", + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, + ReleaseOnCancel: true, + }, + WatchedNamespace: "", + SyncPeriod: 0, + } +} + +// DefaultAnnotations returns the default annotation configuration. +func DefaultAnnotations() AnnotationConfig { + return AnnotationConfig{ + Prefix: "reloader.stakater.com", + Auto: "reloader.stakater.com/auto", + ConfigmapAuto: "configmap.reloader.stakater.com/auto", + SecretAuto: "secret.reloader.stakater.com/auto", + ConfigmapReload: "configmap.reloader.stakater.com/reload", + SecretReload: "secret.reloader.stakater.com/reload", + ConfigmapExclude: "configmaps.exclude.reloader.stakater.com/reload", + SecretExclude: "secrets.exclude.reloader.stakater.com/reload", + Ignore: "reloader.stakater.com/ignore", + Search: "reloader.stakater.com/search", + Match: "reloader.stakater.com/match", + RolloutStrategy: "reloader.stakater.com/rollout-strategy", + PausePeriod: "deployment.reloader.stakater.com/pause-period", + PausedAt: "deployment.reloader.stakater.com/paused-at", + LastReloadedFrom: "reloader.stakater.com/last-reloaded-from", + } +} + +// IsResourceIgnored checks if a resource name should be ignored (case-insensitive). +func (c *Config) IsResourceIgnored(name string) bool { + for _, ignored := range c.IgnoredResources { + if strings.EqualFold(ignored, name) { + return true + } + } + return false +} + +// IsWorkloadIgnored checks if a workload type should be ignored (case-insensitive). +func (c *Config) IsWorkloadIgnored(workloadType string) bool { + for _, ignored := range c.IgnoredWorkloads { + if strings.EqualFold(ignored, workloadType) { + return true + } + } + return false +} + +// IsNamespaceIgnored checks if a namespace should be ignored. +func (c *Config) IsNamespaceIgnored(namespace string) bool { + for _, ignored := range c.IgnoredNamespaces { + if ignored == namespace { + return true + } + } + return false +} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go new file mode 100644 index 000000000..f117ad609 --- /dev/null +++ b/internal/pkg/config/config_test.go @@ -0,0 +1,201 @@ +package config + +import ( + "testing" + "time" +) + +func TestNewDefault(t *testing.T) { + cfg := NewDefault() + + if cfg == nil { + t.Fatal("NewDefault() returned nil") + } + + if cfg.ReloadStrategy != ReloadStrategyEnvVars { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) + } + + if cfg.ArgoRolloutStrategy != ArgoRolloutStrategyRollout { + t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, ArgoRolloutStrategyRollout) + } + + if cfg.AutoReloadAll { + t.Error("AutoReloadAll should be false by default") + } + + if cfg.ArgoRolloutsEnabled { + t.Error("ArgoRolloutsEnabled should be false by default") + } + + if cfg.ReloadOnCreate { + t.Error("ReloadOnCreate should be false by default") + } + + if cfg.ReloadOnDelete { + t.Error("ReloadOnDelete should be false by default") + } + + if cfg.EnableHA { + t.Error("EnableHA should be false by default") + } + + if cfg.LogLevel != "info" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "info") + } + + if cfg.MetricsAddr != ":9090" { + t.Errorf("MetricsAddr = %q, want %q", cfg.MetricsAddr, ":9090") + } + + if cfg.HealthAddr != ":8080" { + t.Errorf("HealthAddr = %q, want %q", cfg.HealthAddr, ":8080") + } + + if cfg.PProfAddr != ":6060" { + t.Errorf("PProfAddr = %q, want %q", cfg.PProfAddr, ":6060") + } +} + +func TestDefaultAnnotations(t *testing.T) { + ann := DefaultAnnotations() + + tests := []struct { + name string + got string + want string + }{ + {"Prefix", ann.Prefix, "reloader.stakater.com"}, + {"Auto", ann.Auto, "reloader.stakater.com/auto"}, + {"ConfigmapAuto", ann.ConfigmapAuto, "configmap.reloader.stakater.com/auto"}, + {"SecretAuto", ann.SecretAuto, "secret.reloader.stakater.com/auto"}, + {"ConfigmapReload", ann.ConfigmapReload, "configmap.reloader.stakater.com/reload"}, + {"SecretReload", ann.SecretReload, "secret.reloader.stakater.com/reload"}, + {"ConfigmapExclude", ann.ConfigmapExclude, "configmaps.exclude.reloader.stakater.com/reload"}, + {"SecretExclude", ann.SecretExclude, "secrets.exclude.reloader.stakater.com/reload"}, + {"Ignore", ann.Ignore, "reloader.stakater.com/ignore"}, + {"Search", ann.Search, "reloader.stakater.com/search"}, + {"Match", ann.Match, "reloader.stakater.com/match"}, + {"RolloutStrategy", ann.RolloutStrategy, "reloader.stakater.com/rollout-strategy"}, + {"PausePeriod", ann.PausePeriod, "deployment.reloader.stakater.com/pause-period"}, + {"PausedAt", ann.PausedAt, "deployment.reloader.stakater.com/paused-at"}, + {"LastReloadedFrom", ann.LastReloadedFrom, "reloader.stakater.com/last-reloaded-from"}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + if tt.got != tt.want { + t.Errorf("%s = %q, want %q", tt.name, tt.got, tt.want) + } + }, + ) + } +} + +func TestDefaultLeaderElection(t *testing.T) { + cfg := NewDefault() + + if cfg.LeaderElection.LockName != "reloader-leader-election" { + t.Errorf("LockName = %q, want %q", cfg.LeaderElection.LockName, "reloader-leader-election") + } + + if cfg.LeaderElection.LeaseDuration != 15*time.Second { + t.Errorf("LeaseDuration = %v, want %v", cfg.LeaderElection.LeaseDuration, 15*time.Second) + } + + if cfg.LeaderElection.RenewDeadline != 10*time.Second { + t.Errorf("RenewDeadline = %v, want %v", cfg.LeaderElection.RenewDeadline, 10*time.Second) + } + + if cfg.LeaderElection.RetryPeriod != 2*time.Second { + t.Errorf("RetryPeriod = %v, want %v", cfg.LeaderElection.RetryPeriod, 2*time.Second) + } + + if !cfg.LeaderElection.ReleaseOnCancel { + t.Error("ReleaseOnCancel should be true by default") + } +} + +func TestConfig_IsResourceIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredResources = []string{"configmaps", "secrets"} + + tests := []struct { + name string + resource string + want bool + }{ + {"exact match lowercase", "configmaps", true}, + {"exact match uppercase", "CONFIGMAPS", true}, + {"exact match mixed case", "ConfigMaps", true}, + {"not ignored", "deployments", false}, + {"partial match (not ignored)", "config", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsResourceIgnored(tt.resource) + if got != tt.want { + t.Errorf("IsResourceIgnored(%q) = %v, want %v", tt.resource, got, tt.want) + } + }, + ) + } +} + +func TestConfig_IsWorkloadIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"jobs", "cronjobs"} + + tests := []struct { + name string + workload string + want bool + }{ + {"exact match", "jobs", true}, + {"case insensitive", "JOBS", true}, + {"not ignored", "deployments", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsWorkloadIgnored(tt.workload) + if got != tt.want { + t.Errorf("IsWorkloadIgnored(%q) = %v, want %v", tt.workload, got, tt.want) + } + }, + ) + } +} + +func TestConfig_IsNamespaceIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system", "kube-public"} + + tests := []struct { + name string + namespace string + want bool + }{ + {"exact match", "kube-system", true}, + {"case sensitive no match", "Kube-System", false}, + {"not ignored", "default", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsNamespaceIgnored(tt.namespace) + if got != tt.want { + t.Errorf("IsNamespaceIgnored(%q) = %v, want %v", tt.namespace, got, tt.want) + } + }, + ) + } +} diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go new file mode 100644 index 000000000..195b84efa --- /dev/null +++ b/internal/pkg/config/flags.go @@ -0,0 +1,377 @@ +package config + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + "k8s.io/apimachinery/pkg/labels" +) + +// v is the viper instance for configuration. +var v *viper.Viper + +func init() { + v = viper.New() + // Convert flag names like "alert-webhook-url" to env vars like "ALERT_WEBHOOK_URL" + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() +} + +// BindFlags binds configuration flags to the provided flag set. +// Call this before parsing flags, then call ApplyFlags after parsing. +func BindFlags(fs *pflag.FlagSet, cfg *Config) { + // Auto reload + fs.Bool( + "auto-reload-all", cfg.AutoReloadAll, + "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations", + ) + + // Reload strategy + fs.String( + "reload-strategy", string(cfg.ReloadStrategy), + "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'", + ) + + // Argo Rollouts + fs.String( + "is-Argo-Rollouts", "false", + "Enable Argo Rollouts support (true/false)", + ) + + // OpenShift DeploymentConfig + fs.String( + "is-openshift", "", + "Enable OpenShift DeploymentConfig support (true/false/auto). Empty or 'auto' enables auto-detection", + ) + + // Event watching + fs.String( + "reload-on-create", "false", + "Reload when configmaps/secrets are created (true/false)", + ) + fs.String( + "reload-on-delete", "false", + "Reload when configmaps/secrets are deleted (true/false)", + ) + + // Sync after restart + fs.Bool( + "sync-after-restart", cfg.SyncAfterRestart, + "Trigger sync operation after restart", + ) + + // High availability / Leader election + fs.Bool( + "enable-ha", cfg.EnableHA, + "Enable high-availability mode with leader election", + ) + fs.String( + "leader-election-id", cfg.LeaderElection.LockName, + "Name of the lease resource for leader election", + ) + fs.String( + "leader-election-namespace", cfg.LeaderElection.Namespace, + "Namespace for the leader election lease (defaults to pod namespace)", + ) + fs.Duration( + "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, + "Duration that non-leader candidates will wait before attempting to acquire leadership", + ) + fs.Duration( + "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, + "Duration that the acting leader will retry refreshing leadership before giving up", + ) + fs.Duration( + "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, + "Duration between leader election retries", + ) + fs.Bool( + "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, + "Release the leader lock when the manager is stopped", + ) + + // Webhook + fs.String( + "webhook-url", cfg.WebhookURL, + "URL to send notification instead of triggering reload", + ) + + // Filtering - resources + fs.String( + "resources-to-ignore", "", + "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')", + ) + fs.String( + "ignored-workload-types", "", + "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)", + ) + fs.String( + "namespaces-to-ignore", "", + "Comma-separated list of namespaces to ignore", + ) + + // Filtering - selectors + fs.StringSlice( + "namespace-selector", nil, + "Namespace label selectors (can be specified multiple times)", + ) + fs.StringSlice( + "resource-label-selector", nil, + "Resource label selectors (can be specified multiple times)", + ) + + // Logging + fs.String( + "log-format", cfg.LogFormat, + "Log format: 'json' or empty for default", + ) + fs.String( + "log-level", cfg.LogLevel, + "Log level: trace, debug, info, warning, error, fatal, panic", + ) + + // Metrics + fs.String( + "metrics-addr", cfg.MetricsAddr, + "Address to serve metrics on", + ) + + // Health probes + fs.String( + "health-addr", cfg.HealthAddr, + "Address to serve health probes on", + ) + + // Profiling + fs.Bool( + "enable-pprof", cfg.EnablePProf, + "Enable pprof profiling server", + ) + fs.String( + "pprof-addr", cfg.PProfAddr, + "Address for pprof server", + ) + + // Annotation customization (flag names match v1 for backward compatibility) + fs.String( + "auto-annotation", cfg.Annotations.Auto, + "Annotation to detect changes in secrets/configmaps", + ) + fs.String( + "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, + "Annotation to detect changes in configmaps", + ) + fs.String( + "secret-auto-annotation", cfg.Annotations.SecretAuto, + "Annotation to detect changes in secrets", + ) + fs.String( + "configmap-annotation", cfg.Annotations.ConfigmapReload, + "Annotation to detect changes in configmaps, specified by name", + ) + fs.String( + "secret-annotation", cfg.Annotations.SecretReload, + "Annotation to detect changes in secrets, specified by name", + ) + fs.String( + "auto-search-annotation", cfg.Annotations.Search, + "Annotation to detect changes in configmaps or secrets tagged with special match annotation", + ) + fs.String( + "search-match-annotation", cfg.Annotations.Match, + "Annotation to mark secrets or configmaps to match the search", + ) + fs.String( + "pause-deployment-annotation", cfg.Annotations.PausePeriod, + "Annotation to define the time period to pause a deployment after a configmap/secret change", + ) + fs.String( + "pause-deployment-time-annotation", cfg.Annotations.PausedAt, + "Annotation to indicate when a deployment was paused by Reloader", + ) + + // Watched namespace (for single-namespace mode) + fs.String( + "watch-namespace", cfg.WatchedNamespace, + "Namespace to watch (empty for all namespaces)", + ) + + // Alerting + fs.Bool( + "alert-on-reload", cfg.Alerting.Enabled, + "Enable sending alerts when resources are reloaded", + ) + fs.String( + "alert-webhook-url", cfg.Alerting.WebhookURL, + "Webhook URL to send alerts to", + ) + fs.String( + "alert-sink", cfg.Alerting.Sink, + "Alert sink type: 'slack', 'teams', 'gchat', or 'raw' (default)", + ) + fs.String( + "alert-proxy", cfg.Alerting.Proxy, + "Proxy URL for alert webhook requests", + ) + fs.String( + "alert-additional-info", cfg.Alerting.Additional, + "Additional info to include in alerts (e.g., cluster name)", + ) + fs.Bool( + "alert-structured", cfg.Alerting.Structured, + "For raw sink: send structured JSON instead of plain text", + ) + + // Bind pflags to viper + _ = v.BindPFlags(fs) + + // Bind legacy env var names that don't match the automatic conversion + // (flag "alert-proxy" -> env "ALERT_PROXY", but legacy is "ALERT_WEBHOOK_PROXY") + _ = v.BindEnv("alert-proxy", "ALERT_PROXY", "ALERT_WEBHOOK_PROXY") +} + +// ApplyFlags applies flag values from viper to the config struct. +// Call this after parsing flags. +func ApplyFlags(cfg *Config) error { + // Boolean flags + cfg.AutoReloadAll = v.GetBool("auto-reload-all") + cfg.SyncAfterRestart = v.GetBool("sync-after-restart") + cfg.EnableHA = v.GetBool("enable-ha") + cfg.EnablePProf = v.GetBool("enable-pprof") + + // Boolean string flags (legacy format: "true"/"false" strings) + cfg.ArgoRolloutsEnabled = parseBoolString(v.GetString("is-Argo-Rollouts")) + cfg.ReloadOnCreate = parseBoolString(v.GetString("reload-on-create")) + cfg.ReloadOnDelete = parseBoolString(v.GetString("reload-on-delete")) + + switch strings.ToLower(strings.TrimSpace(v.GetString("is-openshift"))) { + case "true": + cfg.DeploymentConfigEnabled = true + case "false": + cfg.DeploymentConfigEnabled = false + default: + } + + // String flags + cfg.ReloadStrategy = ReloadStrategy(v.GetString("reload-strategy")) + cfg.WebhookURL = v.GetString("webhook-url") + cfg.LogFormat = v.GetString("log-format") + cfg.LogLevel = v.GetString("log-level") + cfg.MetricsAddr = v.GetString("metrics-addr") + cfg.HealthAddr = v.GetString("health-addr") + cfg.PProfAddr = v.GetString("pprof-addr") + cfg.WatchedNamespace = v.GetString("watch-namespace") + if cfg.WatchedNamespace == "" { + cfg.WatchedNamespace = v.GetString("KUBERNETES_NAMESPACE") + } + + // Leader election + cfg.LeaderElection.LockName = v.GetString("leader-election-id") + cfg.LeaderElection.Namespace = v.GetString("leader-election-namespace") + cfg.LeaderElection.LeaseDuration = v.GetDuration("leader-election-lease-duration") + cfg.LeaderElection.RenewDeadline = v.GetDuration("leader-election-renew-deadline") + cfg.LeaderElection.RetryPeriod = v.GetDuration("leader-election-retry-period") + cfg.LeaderElection.ReleaseOnCancel = v.GetBool("leader-election-release-on-cancel") + + // Annotations + cfg.Annotations.Auto = v.GetString("auto-annotation") + cfg.Annotations.ConfigmapAuto = v.GetString("configmap-auto-annotation") + cfg.Annotations.SecretAuto = v.GetString("secret-auto-annotation") + cfg.Annotations.ConfigmapReload = v.GetString("configmap-annotation") + cfg.Annotations.SecretReload = v.GetString("secret-annotation") + cfg.Annotations.Search = v.GetString("auto-search-annotation") + cfg.Annotations.Match = v.GetString("search-match-annotation") + cfg.Annotations.PausePeriod = v.GetString("pause-deployment-annotation") + cfg.Annotations.PausedAt = v.GetString("pause-deployment-time-annotation") + + // Alerting + cfg.Alerting.Enabled = v.GetBool("alert-on-reload") + cfg.Alerting.WebhookURL = v.GetString("alert-webhook-url") + cfg.Alerting.Sink = strings.ToLower(v.GetString("alert-sink")) + cfg.Alerting.Proxy = v.GetString("alert-proxy") + cfg.Alerting.Additional = v.GetString("alert-additional-info") + cfg.Alerting.Structured = v.GetBool("alert-structured") + + // Special case: if webhook URL is set, auto-enable alerting + if cfg.Alerting.WebhookURL != "" { + cfg.Alerting.Enabled = true + } + + // Parse comma-separated lists + cfg.IgnoredResources = splitAndTrim(v.GetString("resources-to-ignore")) + cfg.IgnoredWorkloads = splitAndTrim(v.GetString("ignored-workload-types")) + cfg.IgnoredNamespaces = splitAndTrim(v.GetString("namespaces-to-ignore")) + + // Get selector slices and join with comma + nsSelectors := v.GetStringSlice("namespace-selector") + resSelectors := v.GetStringSlice("resource-label-selector") + + if len(nsSelectors) > 0 { + cfg.NamespaceSelectorStrings = nsSelectors + } + if len(resSelectors) > 0 { + cfg.ResourceSelectorStrings = resSelectors + } + + if len(nsSelectors) > 0 { + joinedNS := strings.Join(nsSelectors, ",") + selector, err := labels.Parse(joinedNS) + if err != nil { + return fmt.Errorf("invalid selector %q: %w", joinedNS, err) + } + cfg.NamespaceSelectors = []labels.Selector{selector} + } + if len(resSelectors) > 0 { + joinedRes := strings.Join(resSelectors, ",") + selector, err := labels.Parse(joinedRes) + if err != nil { + return fmt.Errorf("invalid selector %q: %w", joinedRes, err) + } + cfg.ResourceSelectors = []labels.Selector{selector} + } + + // Ensure duration defaults are preserved if not set + if cfg.LeaderElection.LeaseDuration == 0 { + cfg.LeaderElection.LeaseDuration = 15 * time.Second + } + if cfg.LeaderElection.RenewDeadline == 0 { + cfg.LeaderElection.RenewDeadline = 10 * time.Second + } + if cfg.LeaderElection.RetryPeriod == 0 { + cfg.LeaderElection.RetryPeriod = 2 * time.Second + } + + return nil +} + +// parseBoolString parses a string as a boolean, defaulting to false. +func parseBoolString(s string) bool { + s = strings.ToLower(strings.TrimSpace(s)) + return s == "true" || s == "1" || s == "yes" +} + +// ShouldAutoDetectOpenShift returns true if OpenShift DeploymentConfig support +// should be auto-detected (i.e., the --is-openshift flag was not explicitly set). +func ShouldAutoDetectOpenShift() bool { + val := strings.ToLower(strings.TrimSpace(v.GetString("is-openshift"))) + return val == "" || val == "auto" +} + +// splitAndTrim splits a comma-separated string and trims whitespace. +func splitAndTrim(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ",") + result := make([]string, 0, len(parts)) + for _, p := range parts { + p = strings.TrimSpace(p) + if p != "" { + result = append(result, p) + } + } + return result +} diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go new file mode 100644 index 000000000..6a5b3fb26 --- /dev/null +++ b/internal/pkg/config/flags_test.go @@ -0,0 +1,435 @@ +package config + +import ( + "strings" + "testing" + + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +// resetViper resets the viper instance for testing. +func resetViper() { + v = viper.New() + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() +} + +func TestBindFlags(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + expectedFlags := []string{ + "auto-reload-all", + "reload-strategy", + "is-Argo-Rollouts", + "reload-on-create", + "reload-on-delete", + "sync-after-restart", + "enable-ha", + "leader-election-id", + "leader-election-namespace", + "leader-election-lease-duration", + "leader-election-renew-deadline", + "leader-election-retry-period", + "leader-election-release-on-cancel", + "webhook-url", + "resources-to-ignore", + "ignored-workload-types", + "namespaces-to-ignore", + "namespace-selector", + "resource-label-selector", + "log-format", + "log-level", + "metrics-addr", + "health-addr", + "enable-pprof", + "pprof-addr", + "auto-annotation", + "configmap-auto-annotation", + "secret-auto-annotation", + "configmap-annotation", + "secret-annotation", + "auto-search-annotation", + "search-match-annotation", + "pause-deployment-annotation", + "pause-deployment-time-annotation", + "watch-namespace", + "alert-on-reload", + "alert-webhook-url", + "alert-sink", + "alert-proxy", + "alert-additional-info", + "alert-structured", + } + + for _, flagName := range expectedFlags { + if fs.Lookup(flagName) == nil { + t.Errorf("Expected flag %q to be registered", flagName) + } + } +} + +func TestBindFlags_DefaultValues(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.ReloadStrategy != ReloadStrategyEnvVars { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) + } + + if cfg.LogLevel != "info" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "info") + } +} + +func TestBindFlags_CustomValues(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + args := []string{ + "--auto-reload-all=true", + "--reload-strategy=annotations", + "--log-level=debug", + "--log-format=json", + "--webhook-url=https://example.com/hook", + "--enable-ha=true", + "--enable-pprof=true", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if !cfg.AutoReloadAll { + t.Error("AutoReloadAll should be true") + } + + if cfg.ReloadStrategy != ReloadStrategyAnnotations { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyAnnotations) + } + + if cfg.LogLevel != "debug" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "debug") + } + + if cfg.LogFormat != "json" { + t.Errorf("LogFormat = %q, want %q", cfg.LogFormat, "json") + } + + if cfg.WebhookURL != "https://example.com/hook" { + t.Errorf("WebhookURL = %q, want %q", cfg.WebhookURL, "https://example.com/hook") + } + + if !cfg.EnableHA { + t.Error("EnableHA should be true") + } + + if !cfg.EnablePProf { + t.Error("EnablePProf should be true") + } +} + +func TestApplyFlags_BooleanStrings(t *testing.T) { + tests := []struct { + name string + args []string + want bool + wantErr bool + }{ + {"true lowercase", []string{"--is-Argo-Rollouts=true"}, true, false}, + {"TRUE uppercase", []string{"--is-Argo-Rollouts=TRUE"}, true, false}, + {"1", []string{"--is-Argo-Rollouts=1"}, true, false}, + {"yes", []string{"--is-Argo-Rollouts=yes"}, true, false}, + {"false", []string{"--is-Argo-Rollouts=false"}, false, false}, + {"no", []string{"--is-Argo-Rollouts=no"}, false, false}, + {"0", []string{"--is-Argo-Rollouts=0"}, false, false}, + {"empty", []string{}, false, false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if cfg.ArgoRolloutsEnabled != tt.want { + t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) + } + }, + ) + } +} + +func TestApplyFlags_CommaSeparatedLists(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--resources-to-ignore=configMaps,secrets", + "--ignored-workload-types=jobs,cronjobs", + "--namespaces-to-ignore=kube-system,kube-public", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if len(cfg.IgnoredResources) != 2 { + t.Errorf("IgnoredResources length = %d, want 2", len(cfg.IgnoredResources)) + } + if cfg.IgnoredResources[0] != "configMaps" || cfg.IgnoredResources[1] != "secrets" { + t.Errorf("IgnoredResources = %v", cfg.IgnoredResources) + } + + if len(cfg.IgnoredWorkloads) != 2 { + t.Errorf("IgnoredWorkloads length = %d, want 2", len(cfg.IgnoredWorkloads)) + } + + if len(cfg.IgnoredNamespaces) != 2 { + t.Errorf("IgnoredNamespaces length = %d, want 2", len(cfg.IgnoredNamespaces)) + } +} + +func TestApplyFlags_Selectors(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--namespace-selector=env=production,team=platform", + "--resource-label-selector=app=myapp", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if len(cfg.NamespaceSelectors) != 1 { + t.Errorf("NamespaceSelectors length = %d, want 1", len(cfg.NamespaceSelectors)) + } + + if len(cfg.ResourceSelectors) != 1 { + t.Errorf("ResourceSelectors length = %d, want 1", len(cfg.ResourceSelectors)) + } + + if len(cfg.NamespaceSelectorStrings) != 2 { + t.Errorf("NamespaceSelectorStrings length = %d, want 2", len(cfg.NamespaceSelectorStrings)) + } +} + +func TestApplyFlags_InvalidSelector(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--namespace-selector=env in (prod,staging", // missing closing paren + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if err == nil { + t.Error("ApplyFlags() should return error for invalid selector") + } +} + +func TestApplyFlags_AlertingEnvVars(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + wantURL string + wantSink string + wantEnable bool + }{ + { + name: "ALERT_WEBHOOK_URL enables alerting", + envVars: map[string]string{ + "ALERT_WEBHOOK_URL": "https://hooks.example.com", + }, + wantURL: "https://hooks.example.com", + wantEnable: true, + }, + { + name: "all alert env vars", + envVars: map[string]string{ + "ALERT_WEBHOOK_URL": "https://hooks.example.com", + "ALERT_SINK": "slack", + "ALERT_WEBHOOK_PROXY": "http://proxy:8080", + }, + wantURL: "https://hooks.example.com", + wantSink: "slack", + wantEnable: true, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetViper() + + for k, val := range tt.envVars { + t.Setenv(k, val) + } + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.Alerting.WebhookURL != tt.wantURL { + t.Errorf("Alerting.WebhookURL = %q, want %q", cfg.Alerting.WebhookURL, tt.wantURL) + } + + if tt.wantSink != "" && cfg.Alerting.Sink != tt.wantSink { + t.Errorf("Alerting.Sink = %q, want %q", cfg.Alerting.Sink, tt.wantSink) + } + + if cfg.Alerting.Enabled != tt.wantEnable { + t.Errorf("Alerting.Enabled = %v, want %v", cfg.Alerting.Enabled, tt.wantEnable) + } + }, + ) + } +} + +func TestApplyFlags_LegacyProxyEnvVar(t *testing.T) { + resetViper() + + t.Setenv("ALERT_WEBHOOK_PROXY", "http://legacy-proxy:8080") + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.Alerting.Proxy != "http://legacy-proxy:8080" { + t.Errorf("Alerting.Proxy = %q, want %q", cfg.Alerting.Proxy, "http://legacy-proxy:8080") + } +} + +func TestParseBoolString(t *testing.T) { + tests := []struct { + input string + want bool + }{ + {"true", true}, + {"TRUE", true}, + {"True", true}, + {" true ", true}, + {"1", true}, + {"yes", true}, + {"YES", true}, + {"false", false}, + {"FALSE", false}, + {"0", false}, + {"no", false}, + {"", false}, + {"invalid", false}, + } + + for _, tt := range tests { + t.Run( + tt.input, func(t *testing.T) { + got := parseBoolString(tt.input) + if got != tt.want { + t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) + } + }, + ) + } +} + +func TestSplitAndTrim(t *testing.T) { + tests := []struct { + name string + input string + want []string + }{ + {"empty string", "", nil}, + {"single value", "abc", []string{"abc"}}, + {"multiple values", "a,b,c", []string{"a", "b", "c"}}, + {"with spaces", " a , b , c ", []string{"a", "b", "c"}}, + {"empty elements", "a,,b", []string{"a", "b"}}, + {"only commas", ",,,", []string{}}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := splitAndTrim(tt.input) + if len(got) != len(tt.want) { + t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + } + } + }, + ) + } +} diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go new file mode 100644 index 000000000..b3d2695b7 --- /dev/null +++ b/internal/pkg/config/validation.go @@ -0,0 +1,160 @@ +package config + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/labels" + + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ValidationError represents a configuration validation error. +type ValidationError struct { + Field string + Message string +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("config.%s: %s", e.Field, e.Message) +} + +// ValidationErrors is a collection of validation errors. +type ValidationErrors []ValidationError + +func (e ValidationErrors) Error() string { + if len(e) == 0 { + return "" + } + if len(e) == 1 { + return e[0].Error() + } + var b strings.Builder + b.WriteString("multiple configuration errors:\n") + for _, err := range e { + b.WriteString(" - ") + b.WriteString(err.Error()) + b.WriteString("\n") + } + return b.String() +} + +// Validate checks the configuration for errors and normalizes values. +func (c *Config) Validate() error { + var errs ValidationErrors + + switch c.ReloadStrategy { + case ReloadStrategyEnvVars, ReloadStrategyAnnotations: + // valid + case "": + c.ReloadStrategy = ReloadStrategyEnvVars + default: + errs = append( + errs, ValidationError{ + Field: "ReloadStrategy", + Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ReloadStrategy, ReloadStrategyEnvVars, ReloadStrategyAnnotations), + }, + ) + } + + switch c.ArgoRolloutStrategy { + case ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout: + // valid + case "": + c.ArgoRolloutStrategy = ArgoRolloutStrategyRollout + default: + errs = append( + errs, ValidationError{ + Field: "ArgoRolloutStrategy", + Message: fmt.Sprintf( + "invalid value %q, must be %q or %q", c.ArgoRolloutStrategy, ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout, + ), + }, + ) + } + + switch strings.ToLower(c.LogLevel) { + case "trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", "": + // valid + default: + errs = append( + errs, ValidationError{ + Field: "LogLevel", + Message: fmt.Sprintf("invalid log level %q", c.LogLevel), + }, + ) + } + + switch strings.ToLower(c.LogFormat) { + case "json", "": + // valid + default: + errs = append( + errs, ValidationError{ + Field: "LogFormat", + Message: fmt.Sprintf("invalid log format %q, must be \"json\" or empty", c.LogFormat), + }, + ) + } + + c.IgnoredResources = normalizeToLower(c.IgnoredResources) + + // Normalize ignored workloads to canonical Kind values (e.g., "cronjobs" -> "CronJob") + c.IgnoredWorkloads = normalizeToLower(c.IgnoredWorkloads) + normalizedWorkloads := make([]string, 0, len(c.IgnoredWorkloads)) + for _, w := range c.IgnoredWorkloads { + kind, err := workload.KindFromString(w) + if err != nil { + errs = append( + errs, ValidationError{ + Field: "IgnoredWorkloads", + Message: fmt.Sprintf("unknown workload type %q", w), + }, + ) + } else { + normalizedWorkloads = append(normalizedWorkloads, string(kind)) + } + } + c.IgnoredWorkloads = normalizedWorkloads + + if len(errs) > 0 { + return errs + } + return nil +} + +// normalizeToLower converts all strings in the slice to lowercase and removes empty strings. +func normalizeToLower(items []string) []string { + if len(items) == 0 { + return items + } + result := make([]string, 0, len(items)) + for _, item := range items { + item = strings.TrimSpace(strings.ToLower(item)) + if item != "" { + result = append(result, item) + } + } + return result +} + +// ParseSelectors parses a slice of selector strings into label selectors. +func ParseSelectors(selectorStrings []string) ([]labels.Selector, error) { + if len(selectorStrings) == 0 { + return nil, nil + } + + selectors := make([]labels.Selector, 0, len(selectorStrings)) + for _, s := range selectorStrings { + s = strings.TrimSpace(s) + if s == "" { + continue + } + selector, err := labels.Parse(s) + if err != nil { + return nil, fmt.Errorf("invalid selector %q: %w", s, err) + } + selectors = append(selectors, selector) + } + return selectors, nil +} diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go new file mode 100644 index 000000000..52dc6f000 --- /dev/null +++ b/internal/pkg/config/validation_test.go @@ -0,0 +1,339 @@ +package config + +import ( + "errors" + "strings" + "testing" +) + +func TestConfig_Validate_ReloadStrategy(t *testing.T) { + tests := []struct { + name string + strategy ReloadStrategy + wantErr bool + wantVal ReloadStrategy + }{ + {"valid env-vars", ReloadStrategyEnvVars, false, ReloadStrategyEnvVars}, + {"valid annotations", ReloadStrategyAnnotations, false, ReloadStrategyAnnotations}, + {"empty defaults to env-vars", "", false, ReloadStrategyEnvVars}, + {"invalid strategy", "invalid", true, ""}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ReloadStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return + } + + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } + + if cfg.ReloadStrategy != tt.wantVal { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, tt.wantVal) + } + }, + ) + } +} + +func TestConfig_Validate_ArgoRolloutStrategy(t *testing.T) { + tests := []struct { + name string + strategy ArgoRolloutStrategy + wantErr bool + wantVal ArgoRolloutStrategy + }{ + {"valid restart", ArgoRolloutStrategyRestart, false, ArgoRolloutStrategyRestart}, + {"valid rollout", ArgoRolloutStrategyRollout, false, ArgoRolloutStrategyRollout}, + {"empty defaults to rollout", "", false, ArgoRolloutStrategyRollout}, + {"invalid strategy", "invalid", true, ""}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ArgoRolloutStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return + } + + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } + + if cfg.ArgoRolloutStrategy != tt.wantVal { + t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, tt.wantVal) + } + }, + ) + } +} + +func TestConfig_Validate_LogLevel(t *testing.T) { + validLevels := []string{"trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", ""} + for _, level := range validLevels { + t.Run( + "valid_"+level, func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = level + if err := cfg.Validate(); err != nil { + t.Errorf("Validate() error for level %q: %v", level, err) + } + }, + ) + } + + t.Run( + "invalid level", func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = "invalid" + err := cfg.Validate() + if err == nil { + t.Error("Validate() should return error for invalid log level") + } + }, + ) +} + +func TestConfig_Validate_LogFormat(t *testing.T) { + tests := []struct { + name string + format string + wantErr bool + }{ + {"json format", "json", false}, + {"empty format", "", false}, + {"invalid format", "xml", true}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.LogFormat = tt.format + err := cfg.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }, + ) + } +} + +func TestConfig_Validate_NormalizesIgnoredResources(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredResources = []string{"ConfigMaps", "SECRETS", " spaces "} + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() error = %v", err) + } + + expected := []string{"configmaps", "secrets", "spaces"} + if len(cfg.IgnoredResources) != len(expected) { + t.Fatalf("IgnoredResources length = %d, want %d", len(cfg.IgnoredResources), len(expected)) + } + + for i, got := range cfg.IgnoredResources { + if got != expected[i] { + t.Errorf("IgnoredResources[%d] = %q, want %q", i, got, expected[i]) + } + } +} + +func TestConfig_Validate_NormalizesIgnoredWorkloads(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"Jobs", "CRONJOBS", ""} + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() error = %v", err) + } + + // Should be normalized to canonical Kind values (e.g., "CronJob" not "cronjobs") + expected := []string{"Job", "CronJob"} + if len(cfg.IgnoredWorkloads) != len(expected) { + t.Fatalf("IgnoredWorkloads length = %d, want %d", len(cfg.IgnoredWorkloads), len(expected)) + } + + for i, got := range cfg.IgnoredWorkloads { + if got != expected[i] { + t.Errorf("IgnoredWorkloads[%d] = %q, want %q", i, got, expected[i]) + } + } +} + +func TestConfig_Validate_InvalidIgnoredWorkload(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"deployment", "invalidtype"} + + err := cfg.Validate() + if err == nil { + t.Fatal("Validate() should return error for invalid workload type") + } + + if !strings.Contains(err.Error(), "invalidtype") { + t.Errorf("Error should mention invalid workload type, got: %v", err) + } +} + +func TestConfig_Validate_MultipleErrors(t *testing.T) { + cfg := NewDefault() + cfg.ReloadStrategy = "invalid" + cfg.ArgoRolloutStrategy = "invalid" + cfg.LogLevel = "invalid" + cfg.LogFormat = "invalid" + + err := cfg.Validate() + if err == nil { + t.Fatal("Validate() should return error for multiple invalid values") + } + + var errs ValidationErrors + ok := errors.As(err, &errs) + if !ok { + t.Fatalf("Expected ValidationErrors, got %T", err) + } + + if len(errs) != 4 { + t.Errorf("Expected 4 errors, got %d: %v", len(errs), errs) + } +} + +func TestValidationError_Error(t *testing.T) { + err := ValidationError{ + Field: "TestField", + Message: "test message", + } + + expected := "config.TestField: test message" + if err.Error() != expected { + t.Errorf("Error() = %q, want %q", err.Error(), expected) + } +} + +func TestValidationErrors_Error(t *testing.T) { + t.Run( + "empty", func(t *testing.T) { + var errs ValidationErrors + if errs.Error() != "" { + t.Errorf("Empty errors should return empty string, got %q", errs.Error()) + } + }, + ) + + t.Run( + "single error", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + } + if !strings.Contains(errs.Error(), "Field1") { + t.Errorf("Error() should contain field name, got %q", errs.Error()) + } + }, + ) + + t.Run( + "multiple errors", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + {Field: "Field2", Message: "error2"}, + } + errStr := errs.Error() + if !strings.Contains(errStr, "multiple configuration errors") { + t.Errorf("Error() should mention multiple errors, got %q", errStr) + } + if !strings.Contains(errStr, "Field1") || !strings.Contains(errStr, "Field2") { + t.Errorf("Error() should contain all field names, got %q", errStr) + } + }, + ) +} + +func TestParseSelectors(t *testing.T) { + tests := []struct { + name string + selectors []string + wantLen int + wantErr bool + }{ + {"nil input", nil, 0, false}, + {"empty input", []string{}, 0, false}, + {"single valid selector", []string{"env=production"}, 1, false}, + {"multiple valid selectors", []string{"env=production", "team=platform"}, 2, false}, + {"selector with whitespace", []string{" env=production "}, 1, false}, + {"empty string in list", []string{"env=production", "", "team=platform"}, 2, false}, + {"invalid selector syntax", []string{"env in (prod,staging"}, 0, true}, // missing closing paren + {"set-based selector", []string{"env in (prod,staging)"}, 1, false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + selectors, err := ParseSelectors(tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("ParseSelectors() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(selectors) != tt.wantLen { + t.Errorf("ParseSelectors() returned %d selectors, want %d", len(selectors), tt.wantLen) + } + }, + ) + } +} + +func TestNormalizeToLower(t *testing.T) { + tests := []struct { + name string + input []string + want []string + }{ + {"nil input", nil, nil}, + {"empty input", []string{}, []string{}}, + {"lowercase", []string{"abc"}, []string{"abc"}}, + {"uppercase", []string{"ABC"}, []string{"abc"}}, + {"mixed case", []string{"AbC"}, []string{"abc"}}, + {"with whitespace", []string{" abc "}, []string{"abc"}}, + {"removes empty", []string{"abc", "", "def"}, []string{"abc", "def"}}, + {"only whitespace", []string{" "}, []string{}}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := normalizeToLower(tt.input) + if tt.want == nil && got != nil { + t.Errorf("normalizeToLower() = %v, want nil", got) + return + } + if len(got) != len(tt.want) { + t.Errorf("normalizeToLower() length = %d, want %d", len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("normalizeToLower()[%d] = %q, want %q", i, got[i], tt.want[i]) + } + } + }, + ) + } +} diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go deleted file mode 100644 index 18d1cc759..000000000 --- a/internal/pkg/constants/constants.go +++ /dev/null @@ -1,32 +0,0 @@ -package constants - -const ( - // DefaultHttpListenAddr is the default listening address for global http server - DefaultHttpListenAddr = ":9090" - - // ConfigmapEnvVarPostfix is a postfix for configmap envVar - ConfigmapEnvVarPostfix = "CONFIGMAP" - // SecretEnvVarPostfix is a postfix for secret envVar - SecretEnvVarPostfix = "SECRET" - // EnvVarPrefix is a Prefix for environment variable - EnvVarPrefix = "STAKATER_" - - // ReloaderAnnotationPrefix is a Prefix for all reloader annotations - ReloaderAnnotationPrefix = "reloader.stakater.com" - // LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload - LastReloadedFromAnnotation = "last-reloaded-from" - - // ReloadStrategyFlag The reload strategy flag name - ReloadStrategyFlag = "reload-strategy" - // EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart - EnvVarsReloadStrategy = "env-vars" - // AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart - AnnotationsReloadStrategy = "annotations" -) - -// Leadership election related consts -const ( - LockName string = "stakater-reloader-lock" - PodNameEnv string = "POD_NAME" - PodNamespaceEnv string = "POD_NAMESPACE" -) diff --git a/internal/pkg/constants/enums.go b/internal/pkg/constants/enums.go deleted file mode 100644 index 43fc60352..000000000 --- a/internal/pkg/constants/enums.go +++ /dev/null @@ -1,15 +0,0 @@ -package constants - -// Result is a status for deployment update -type Result int - -const ( - // Updated is returned when environment variable is created/updated - Updated Result = 1 + iota - // NotUpdated is returned when environment variable is found but had value equals to the new value - NotUpdated - // NoEnvVarFound is returned when no environment variable is found - NoEnvVarFound - // NoContainerFound is returned when no environment variable is found - NoContainerFound -) diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go new file mode 100644 index 000000000..04bd3bb3f --- /dev/null +++ b/internal/pkg/controller/configmap_reconciler.go @@ -0,0 +1,69 @@ +package controller + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ConfigMapReconciler watches ConfigMaps and triggers workload reloads. +type ConfigMapReconciler = ResourceReconciler[*corev1.ConfigMap] + +// NewConfigMapReconciler creates a new ConfigMapReconciler with the given dependencies. +func NewConfigMapReconciler( + c client.Client, + log logr.Logger, + cfg *config.Config, + reloadService *reload.Service, + registry *workload.Registry, + collectors *metrics.Collectors, + eventRecorder *events.Recorder, + webhookClient *webhook.Client, + alerter alerting.Alerter, + pauseHandler *reload.PauseHandler, + nsCache *NamespaceCache, +) *ConfigMapReconciler { + return NewResourceReconciler( + ResourceReconcilerDeps{ + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + NamespaceCache: nsCache, + }, + ResourceConfig[*corev1.ConfigMap]{ + ResourceType: reload.ResourceTypeConfigMap, + NewResource: func() *corev1.ConfigMap { return &corev1.ConfigMap{} }, + CreateChange: func(cm *corev1.ConfigMap, eventType reload.EventType) reload.ResourceChange { + return reload.ConfigMapChange{ConfigMap: cm, EventType: eventType} + }, + CreatePredicates: func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate { + return reload.ConfigMapPredicates(cfg, hasher) + }, + }, + ) +} + +// SetupConfigMapReconciler sets up a ConfigMap reconciler with the manager. +func SetupConfigMapReconciler(mgr ctrl.Manager, r *ConfigMapReconciler) error { + return r.SetupWithManager(mgr, &corev1.ConfigMap{}) +} + +var _ reconcile.Reconciler = &ConfigMapReconciler{} diff --git a/internal/pkg/controller/configmap_reconciler_test.go b/internal/pkg/controller/configmap_reconciler_test.go new file mode 100644 index 000000000..1b1140577 --- /dev/null +++ b/internal/pkg/controller/configmap_reconciler_test.go @@ -0,0 +1,160 @@ +package controller_test + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" +) + +func TestConfigMapReconciler_NotFound(t *testing.T) { + cfg := config.NewDefault() + reconciler := newConfigMapReconciler(t, cfg) + assertReconcileSuccess(t, reconciler, reconcileRequest("nonexistent-cm", "default")) +} + +func TestConfigMapReconciler_NotFound_ReloadOnDelete(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "deleted-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("deleted-cm", "default")) +} + +func TestConfigMapReconciler_IgnoredNamespace(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + cm := testutil.NewConfigMap("test-cm", "kube-system") + reconciler := newConfigMapReconciler(t, cfg, cm) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "kube-system")) +} + +func TestConfigMapReconciler_NoMatchingWorkloads(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", nil) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "test-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_WorkloadInDifferentNamespace(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("test-cm", "namespace-a") + deployment := testutil.NewDeployment("test-deployment", "namespace-b", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "namespace-a")) +} + +func TestConfigMapReconciler_IgnoredWorkloadType(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredWorkloads = []string{"deployment"} + + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_DaemonSet(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("test-cm", "default") + daemonset := testutil.NewDaemonSet("test-daemonset", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_StatefulSet(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("test-cm", "default") + statefulset := testutil.NewStatefulSet("test-statefulset", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, statefulset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} + +func TestConfigMapReconciler_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMap("shared-cm", "default") + deployment1 := testutil.NewDeployment("deployment-1", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) + deployment2 := testutil.NewDeployment("deployment-2", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) + daemonset := testutil.NewDaemonSet("daemonset-1", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) + + reconciler := newConfigMapReconciler(t, cfg, cm, deployment1, deployment2, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("shared-cm", "default")) +} + +func TestConfigMapReconciler_VolumeMount(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := testutil.NewConfigMap("volume-cm", "default") + deployment := testutil.NewDeploymentWithVolume("test-deployment", "default", "volume-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("volume-cm", "default")) +} + +func TestConfigMapReconciler_ProjectedVolume(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := testutil.NewConfigMap("projected-cm", "default") + deployment := testutil.NewDeploymentWithProjectedVolume("test-deployment", "default", "projected-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("projected-cm", "default")) +} + +func TestConfigMapReconciler_SearchAnnotation(t *testing.T) { + cfg := config.NewDefault() + + cm := testutil.NewConfigMapWithAnnotations("test-cm", "default", map[string]string{ + cfg.Annotations.Match: "true", + }) + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.Search: "true", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) +} diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go deleted file mode 100644 index 15b2e0f17..000000000 --- a/internal/pkg/controller/controller.go +++ /dev/null @@ -1,282 +0,0 @@ -package controller - -import ( - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/kube" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/kubectl/pkg/scheme" - "k8s.io/utils/strings/slices" -) - -// Controller for checking events -type Controller struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - resource string - ignoredNamespaces util.List - collectors metrics.Collectors - recorder record.EventRecorder - namespaceSelector string - resourceSelector string -} - -// controllerInitialized flag determines whether controlled is being initialized -var secretControllerInitialized bool = false -var configmapControllerInitialized bool = false -var selectedNamespacesCache []string - -// NewController for initializing a Controller -func NewController( - client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, error) { - - if options.SyncAfterRestart { - secretControllerInitialized = true - configmapControllerInitialized = true - } - - c := Controller{ - client: client, - namespace: namespace, - ignoredNamespaces: ignoredNamespaces, - namespaceSelector: namespaceLabelSelector, - resourceSelector: resourceLabelSelector, - resource: resource, - } - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ - Interface: client.CoreV1().Events(""), - }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)}) - - queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]()) - - optionsModifier := func(options *metav1.ListOptions) { - if resource == "namespaces" { - options.LabelSelector = c.namespaceSelector - } else if len(c.resourceSelector) > 0 { - options.LabelSelector = c.resourceSelector - } else { - options.FieldSelector = fields.Everything().String() - } - } - - listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier) - - _, informer := cache.NewInformerWithOptions(cache.InformerOptions{ - ListerWatcher: listWatcher, - ObjectType: kube.ResourceMap[resource], - ResyncPeriod: 0, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: c.Add, - UpdateFunc: c.Update, - DeleteFunc: c.Delete, - }, - Indexers: cache.Indexers{}, - }) - c.informer = informer - c.queue = queue - c.collectors = collectors - c.recorder = recorder - - logrus.Infof("created controller for: %s", resource) - return &c, nil -} - -// Add function to add a new object to the queue in case of creating a resource -func (c *Controller) Add(obj interface{}) { - - switch object := obj.(type) { - case *v1.Namespace: - c.addSelectedNamespaceToCache(*object) - return - } - - if options.ReloadOnCreate == "true" { - if !c.resourceInIgnoredNamespace(obj) && c.resourceInSelectedNamespaces(obj) && secretControllerInitialized && configmapControllerInitialized { - c.queue.Add(handler.ResourceCreatedHandler{ - Resource: obj, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } - } -} - -func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool { - switch object := raw.(type) { - case *v1.ConfigMap: - return c.ignoredNamespaces.Contains(object.Namespace) - case *v1.Secret: - return c.ignoredNamespaces.Contains(object.Namespace) - } - return false -} - -func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool { - if len(c.namespaceSelector) == 0 { - return true - } - - switch object := raw.(type) { - case *v1.ConfigMap: - if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { - return true - } - case *v1.Secret: - if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { - return true - } - } - return false -} - -func (c *Controller) addSelectedNamespaceToCache(namespace v1.Namespace) { - selectedNamespacesCache = append(selectedNamespacesCache, namespace.GetName()) - logrus.Infof("added namespace to be watched: %s", namespace.GetName()) -} - -func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) { - for i, v := range selectedNamespacesCache { - if v == namespace.GetName() { - selectedNamespacesCache = append(selectedNamespacesCache[:i], selectedNamespacesCache[i+1:]...) - logrus.Infof("removed namespace from watch: %s", namespace.GetName()) - return - } - } -} - -// Update function to add an old object and a new object to the queue in case of updating a resource -func (c *Controller) Update(old interface{}, new interface{}) { - switch new.(type) { - case *v1.Namespace: - return - } - - if !c.resourceInIgnoredNamespace(new) && c.resourceInSelectedNamespaces(new) { - c.queue.Add(handler.ResourceUpdatedHandler{ - Resource: new, - OldResource: old, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } -} - -// Delete function to add an object to the queue in case of deleting a resource -func (c *Controller) Delete(old interface{}) { - - if options.ReloadOnDelete == "true" { - if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized { - c.queue.Add(handler.ResourceDeleteHandler{ - Resource: old, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } - } - - switch object := old.(type) { - case *v1.Namespace: - c.removeSelectedNamespaceFromCache(*object) - return - } -} - -// Run function for controller which handles the queue -func (c *Controller) Run(threadiness int, stopCh chan struct{}) { - defer runtime.HandleCrash() - - // Let the workers stop when we are done - defer c.queue.ShutDown() - - go c.informer.Run(stopCh) - - // Wait for all involved caches to be synced, before processing items from the queue is started - if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) { - runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) - return - } - - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - <-stopCh - logrus.Infof("Stopping Controller") -} - -func (c *Controller) runWorker() { - // At this point the controller is fully initialized and we can start processing the resources - if c.resource == string(v1.ResourceSecrets) { - secretControllerInitialized = true - } else if c.resource == string(v1.ResourceConfigMaps) { - configmapControllerInitialized = true - } - - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - // Wait until there is a new item in the working queue - resourceHandler, quit := c.queue.Get() - if quit { - return false - } - // Tell the queue that we are done with processing this key. This unblocks the key for other workers - // This allows safe parallel processing because two events with the same key are never processed in - // parallel. - defer c.queue.Done(resourceHandler) - - // Invoke the method containing the business logic - err := resourceHandler.(handler.ResourceHandler).Handle() - // Handle the error if something went wrong during the execution of the business logic - c.handleErr(err, resourceHandler) - return true -} - -// handleErr checks if an error happened and makes sure we will retry later. -func (c *Controller) handleErr(err error, key interface{}) { - if err == nil { - // Forget about the #AddRateLimited history of the key on every successful synchronization. - // This ensures that future processing of updates for this key is not delayed because of - // an outdated error history. - c.queue.Forget(key) - return - } - - // This controller retries 5 times if something goes wrong. After that, it stops trying. - if c.queue.NumRequeues(key) < 5 { - logrus.Errorf("Error syncing events: %v", err) - - // Re-enqueue the key rate limited. Based on the rate limiter on the - // queue and the re-enqueue history, the key will be processed later again. - c.queue.AddRateLimited(key) - return - } - - c.queue.Forget(key) - // Report to an external entity that, even after several retries, we could not successfully process this key - runtime.HandleError(err) - logrus.Errorf("Dropping key out of the queue: %v", err) - logrus.Debugf("Dropping the key %q out of the queue: %v", key, err) -} diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go deleted file mode 100644 index 63e6be3e2..000000000 --- a/internal/pkg/controller/controller_test.go +++ /dev/null @@ -1,2365 +0,0 @@ -package controller - -import ( - "context" - "os" - "testing" - "time" - - "github.com/stakater/Reloader/internal/pkg/constants" - - "github.com/stakater/Reloader/internal/pkg/metrics" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" -) - -var ( - clients = kube.GetClients() - namespace = "test-reloader-" + testutil.RandSeq(5) - configmapNamePrefix = "testconfigmap-reloader" - secretNamePrefix = "testsecret-reloader" - data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - collectors = metrics.NewCollectors() -) - -const ( - sleepDuration = 3 * time.Second -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(namespace, clients.KubernetesClient) - - logrus.Infof("Creating controller") - for k := range kube.ResourceMap { - if k == "namespaces" { - continue - } - c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - go c.Run(1, stop) - } - time.Sleep(sleepDuration) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(namespace, clients.KubernetesClient) - - os.Exit(retCode) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create pod annotation var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon creating the secret -func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon creating the secret -func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -func TestController_resourceInIgnoredNamespace(t *testing.T) { - type fields struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - ignoredNamespaces util.List - } - type args struct { - raw interface{} - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetConfigmap("system", "testcm", "test"), - }, - want: true, - }, - { - name: "TestSecretResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetSecret("system", "testsecret", "test"), - }, - want: true, - }, - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetConfigmap("some-other-namespace", "testcm", "test"), - }, - want: false, - }, - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetSecret("some-other-namespace", "testsecret", "test"), - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Controller{ - client: tt.fields.client, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace, - ignoredNamespaces: tt.fields.ignoredNamespaces, - } - if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestController_resourceInNamespaceSelector(t *testing.T) { - type fields struct { - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace v1.Namespace - namespaceSelector string - } - type args struct { - raw interface{} - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "TestConfigMapResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"), - }, - want: true, - }, { - name: "TestConfigMapResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, - }, - }, - args: args{ - raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"), - }, - want: false, - }, - { - name: "TestSecretResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "testsecret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("not-selected-namespace", "secret", "test"), - }, - want: false, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyExists", - fields: fields{ - namespaceSelector: "select", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorValueIn", - fields: fields{ - namespaceSelector: "select in (select1, select2, select3)", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "select2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist", - fields: fields{ - namespaceSelector: "!select2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorMultipleConditions", - fields: fields{ - namespaceSelector: "select,select2=this2,select3!=this4", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - "select3": "this3", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset() - namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) - logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) - - c := &Controller{ - client: fakeClient, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace.Name, - namespaceSelector: tt.fields.namespaceSelector, - } - - listOptions := metav1.ListOptions{} - listOptions.LabelSelector = tt.fields.namespaceSelector - namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions) - - for _, ns := range namespaces.Items { - c.addSelectedNamespaceToCache(ns) - } - - if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want) - } - - for _, ns := range namespaces.Items { - c.removeSelectedNamespaceFromCache(ns) - } - }) - } -} diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go new file mode 100644 index 000000000..ebc1b759a --- /dev/null +++ b/internal/pkg/controller/deployment_reconciler.go @@ -0,0 +1,100 @@ +package controller + +import ( + "context" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" +) + +// DeploymentReconciler reconciles Deployment objects to handle pause expiration. +// This reconciler watches for deployments that were paused by Reloader and +// unpauses them when the pause period expires. +type DeploymentReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + PauseHandler *reload.PauseHandler +} + +// Reconcile handles Deployment pause expiration. +func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("deployment", req.NamespacedName) + log.V(1).Info("reconciling deployment", "namespace", req.Namespace, "name", req.Name) + + var deploy appsv1.Deployment + if err := r.Get(ctx, req.NamespacedName, &deploy); err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Check if this deployment was paused by Reloader + if !r.PauseHandler.IsPausedByReloader(&deploy) { + return ctrl.Result{}, nil + } + + // Check if pause period has expired + expired, remainingTime, err := r.PauseHandler.CheckPauseExpired(&deploy) + if err != nil { + log.Error(err, "Failed to check pause expiration") + return ctrl.Result{}, err + } + + if !expired { + // Still within pause period - requeue to check again + log.V(1).Info("Deployment pause not yet expired", "remaining", remainingTime) + return ctrl.Result{RequeueAfter: remainingTime}, nil + } + + log.Info("Unpausing deployment after pause period expired") + err = UpdateObjectWithRetry( + ctx, r.Client, &deploy, func() (bool, error) { + if !r.PauseHandler.IsPausedByReloader(&deploy) { + return false, nil + } + r.PauseHandler.ClearPause(&deploy) + return true, nil + }, + ) + + if err != nil { + log.Error(err, "Failed to unpause deployment") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the DeploymentReconciler with the manager. +func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&appsv1.Deployment{}). + WithEventFilter(r.pausedByReloaderPredicate()). + Complete(r) +} + +// pausedByReloaderPredicate returns a predicate that only selects deployments +// that have been paused by Reloader (have the paused-at annotation). +func (r *DeploymentReconciler) pausedByReloaderPredicate() predicate.Predicate { + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + + // Only process if deployment has our paused-at annotation + _, hasPausedAt := annotations[r.Config.Annotations.PausedAt] + return hasPausedAt + }, + ) +} diff --git a/internal/pkg/controller/filter.go b/internal/pkg/controller/filter.go new file mode 100644 index 000000000..a66279ff3 --- /dev/null +++ b/internal/pkg/controller/filter.go @@ -0,0 +1,40 @@ +package controller + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" +) + +// BuildEventFilter combines a resource-specific predicate with common filters. +func BuildEventFilter(resourcePredicate predicate.Predicate, cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.And( + resourcePredicate, + reload.NamespaceFilterPredicate(cfg), + reload.LabelSelectorPredicate(cfg), + reload.IgnoreAnnotationPredicate(cfg), + createEventPredicate(cfg, initialized), + ) +} + +func createEventPredicate(cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if !*initialized && !cfg.SyncAfterRestart { + return false + } + return cfg.ReloadOnCreate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/internal/pkg/controller/filter_test.go b/internal/pkg/controller/filter_test.go new file mode 100644 index 000000000..be6eec361 --- /dev/null +++ b/internal/pkg/controller/filter_test.go @@ -0,0 +1,196 @@ +package controller + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestCreateEventPredicate_CreateEvent(t *testing.T) { + tests := []struct { + name string + reloadOnCreate bool + syncAfterRestart bool + initialized bool + expectedResult bool + }{ + { + name: "reload on create enabled, initialized", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: true, + expectedResult: true, + }, + { + name: "reload on create disabled, initialized", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: true, + expectedResult: false, + }, + { + name: "not initialized, sync after restart enabled", + reloadOnCreate: true, + syncAfterRestart: true, + initialized: false, + expectedResult: true, + }, + { + name: "not initialized, sync after restart disabled", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: false, + expectedResult: false, + }, + { + name: "not initialized, sync after restart disabled, reload on create disabled", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: false, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnCreate: tt.reloadOnCreate, + SyncAfterRestart: tt.syncAfterRestart, + } + initialized := tt.initialized + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.CreateEvent{Object: cm} + result := pred.Create(e) + + if result != tt.expectedResult { + t.Errorf("CreateFunc() = %v, want %v", result, tt.expectedResult) + } + }, + ) + } +} + +func TestCreateEventPredicate_UpdateEvent(t *testing.T) { + cfg := &config.Config{} + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + result := pred.Update(e) + + if !result { + t.Error("UpdateFunc() should always return true") + } +} + +func TestCreateEventPredicate_DeleteEvent(t *testing.T) { + tests := []struct { + name string + reloadOnDelete bool + expectedResult bool + }{ + { + name: "reload on delete enabled", + reloadOnDelete: true, + expectedResult: true, + }, + { + name: "reload on delete disabled", + reloadOnDelete: false, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnDelete: tt.reloadOnDelete, + } + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.DeleteEvent{Object: cm} + result := pred.Delete(e) + + if result != tt.expectedResult { + t.Errorf("DeleteFunc() = %v, want %v", result, tt.expectedResult) + } + }, + ) + } +} + +func TestCreateEventPredicate_GenericEvent(t *testing.T) { + cfg := &config.Config{} + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.GenericEvent{Object: cm} + result := pred.Generic(e) + + if result { + t.Error("GenericFunc() should always return false") + } +} + +func TestBuildEventFilter(t *testing.T) { + cfg := &config.Config{ + ReloadOnCreate: true, + ReloadOnDelete: true, + } + initialized := true + + resourcePred := &alwaysTruePredicate{} + + filter := BuildEventFilter(resourcePred, cfg, &initialized) + + if filter == nil { + t.Fatal("BuildEventFilter() should return a non-nil predicate") + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + result := filter.Update(e) + + if !result { + t.Error("UpdateFunc() should return true when all predicates pass") + } +} + +// alwaysTruePredicate is a helper predicate for testing +type alwaysTruePredicate struct{} + +func (p *alwaysTruePredicate) Create(_ event.CreateEvent) bool { return true } +func (p *alwaysTruePredicate) Delete(_ event.DeleteEvent) bool { return true } +func (p *alwaysTruePredicate) Update(_ event.UpdateEvent) bool { return true } +func (p *alwaysTruePredicate) Generic(_ event.GenericEvent) bool { return true } diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go new file mode 100644 index 000000000..062001846 --- /dev/null +++ b/internal/pkg/controller/handler.go @@ -0,0 +1,197 @@ +package controller + +import ( + "context" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ReloadHandler handles the common reload workflow. +type ReloadHandler struct { + Client client.Client + Lister *workload.Lister + ReloadService *reload.Service + WebhookClient *webhook.Client + Collectors *metrics.Collectors + EventRecorder *events.Recorder + Alerter alerting.Alerter + PauseHandler *reload.PauseHandler +} + +// Process handles the reload workflow: list workloads, get decisions, webhook or apply. +func (h *ReloadHandler) Process( + ctx context.Context, + namespace, resourceName string, + resourceType reload.ResourceType, + getDecisions func([]workload.Workload) []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + workloads, err := h.Lister.List(ctx, namespace) + if err != nil { + log.Error(err, "failed to list workloads") + h.Collectors.RecordError("list_workloads") + return ctrl.Result{}, err + } + + workloadsByKind := make(map[string]int) + for _, w := range workloads { + workloadsByKind[string(w.Kind())]++ + } + for kind, count := range workloadsByKind { + h.Collectors.RecordWorkloadsScanned(kind, count) + } + + decisions := reload.FilterDecisions(getDecisions(workloads)) + + matchedByKind := make(map[string]int) + for _, d := range decisions { + matchedByKind[string(d.Workload.Kind())]++ + } + for kind, count := range matchedByKind { + h.Collectors.RecordWorkloadsMatched(kind, count) + } + + if len(decisions) == 0 { + h.Collectors.RecordSkipped("no_match") + } + + if h.WebhookClient.IsConfigured() && len(decisions) > 0 { + return h.sendWebhook(ctx, resourceName, namespace, resourceType, decisions, log) + } + + h.applyReloads(ctx, resourceName, namespace, resourceType, decisions, log) + return ctrl.Result{}, nil +} + +func (h *ReloadHandler) sendWebhook( + ctx context.Context, + resourceName, namespace string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + var workloads []webhook.WorkloadInfo + var hash string + for _, d := range decisions { + workloads = append( + workloads, webhook.WorkloadInfo{ + Kind: string(d.Workload.Kind()), + Name: d.Workload.GetName(), + Namespace: d.Workload.GetNamespace(), + }, + ) + if hash == "" { + hash = d.Hash + } + } + + payload := webhook.Payload{ + Kind: string(resourceType), + Namespace: namespace, + ResourceName: resourceName, + ResourceType: string(resourceType), + Hash: hash, + Timestamp: time.Now().UTC(), + Workloads: workloads, + } + + actionStartTime := time.Now() + if err := h.WebhookClient.Send(ctx, payload); err != nil { + log.Error(err, "failed to send webhook notification") + h.Collectors.RecordReload(false, namespace) + h.Collectors.RecordAction("webhook", "error", time.Since(actionStartTime)) + h.Collectors.RecordError("webhook_send") + return ctrl.Result{}, err + } + + log.Info( + "webhook notification sent", + "resource", resourceName, + "workloadCount", len(workloads), + ) + h.Collectors.RecordReload(true, namespace) + h.Collectors.RecordAction("webhook", "success", time.Since(actionStartTime)) + return ctrl.Result{}, nil +} + +func (h *ReloadHandler) applyReloads( + ctx context.Context, + resourceName, resourceNamespace string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) { + for _, decision := range decisions { + log.Info( + "reloading workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + "reason", decision.Reason, + ) + + actionStartTime := time.Now() + updated, err := UpdateWorkloadWithRetry( + ctx, + h.Client, + h.ReloadService, + h.PauseHandler, + decision.Workload, + resourceName, + resourceType, + resourceNamespace, + decision.Hash, + decision.AutoReload, + ) + actionLatency := time.Since(actionStartTime) + + if err != nil { + log.Error( + err, "failed to update workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + h.EventRecorder.ReloadFailed(decision.Workload.GetObject(), resourceType.Kind(), resourceName, err) + h.Collectors.RecordReload(false, resourceNamespace) + h.Collectors.RecordAction(string(decision.Workload.Kind()), "error", actionLatency) + h.Collectors.RecordError("update_workload") + continue + } + + if updated { + h.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), resourceType.Kind(), resourceName) + h.Collectors.RecordReload(true, resourceNamespace) + h.Collectors.RecordAction(string(decision.Workload.Kind()), "success", actionLatency) + log.Info( + "workload reloaded successfully", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + + if err := h.Alerter.Send( + ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: resourceType.Kind(), + ResourceName: resourceName, + ResourceNamespace: resourceNamespace, + Timestamp: time.Now(), + }, + ); err != nil { + log.Error(err, "failed to send alert") + } + } else { + h.Collectors.RecordAction(string(decision.Workload.Kind()), "no_change", actionLatency) + } + } +} diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go new file mode 100644 index 000000000..b33b86a35 --- /dev/null +++ b/internal/pkg/controller/manager.go @@ -0,0 +1,244 @@ +package controller + +import ( + "context" + "fmt" + + argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/go-logr/logr" + openshiftv1 "github.com/openshift/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +var runtimeScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(runtimeScheme)) +} + +// AddOptionalSchemes adds optional workload type schemes if enabled. +func AddOptionalSchemes(argoRolloutsEnabled, deploymentConfigEnabled bool) { + if argoRolloutsEnabled { + utilruntime.Must(argorolloutsv1alpha1.AddToScheme(runtimeScheme)) + } + if deploymentConfigEnabled { + utilruntime.Must(openshiftv1.AddToScheme(runtimeScheme)) + } +} + +// ManagerOptions contains options for creating a new Manager. +type ManagerOptions struct { + Config *config.Config + Log logr.Logger + Collectors *metrics.Collectors +} + +// NewManager creates a new controller-runtime manager with the given options. +// This follows controller-runtime and operator-sdk conventions for leader election. +func NewManager(opts ManagerOptions) (ctrl.Manager, error) { + cfg := opts.Config + le := cfg.LeaderElection + + mgrOpts := ctrl.Options{ + Scheme: runtimeScheme, + Metrics: ctrlmetrics.Options{ + BindAddress: cfg.MetricsAddr, + }, + HealthProbeBindAddress: cfg.HealthAddr, + + // Leader election configuration following operator-sdk best practices: + // - LeaderElection enables/disables leader election + // - LeaderElectionID is the name of the lease resource + // - LeaderElectionNamespace where the lease is created (defaults to pod namespace) + // - LeaderElectionReleaseOnCancel allows faster failover by releasing the lock on shutdown + LeaderElection: cfg.EnableHA, + LeaderElectionID: le.LockName, + LeaderElectionNamespace: le.Namespace, + LeaderElectionReleaseOnCancel: le.ReleaseOnCancel, + LeaseDuration: &le.LeaseDuration, + RenewDeadline: &le.RenewDeadline, + RetryPeriod: &le.RetryPeriod, + } + + if cfg.WatchedNamespace != "" { + mgrOpts.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfg.WatchedNamespace: {}, + }, + } + opts.Log.Info("namespace filtering enabled", "namespace", cfg.WatchedNamespace) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOpts) + if err != nil { + return nil, fmt.Errorf("creating manager: %w", err) + } + + // Add health and readiness probes. + // The healthz probe reports whether the manager is running. + // The readyz probe reports whether the manager is ready to serve requests. + // When leader election is enabled, readyz will fail until this instance becomes leader. + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + return nil, fmt.Errorf("setting up health check: %w", err) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + return nil, fmt.Errorf("setting up ready check: %w", err) + } + + return mgr, nil +} + +// NewManagerWithRestConfig creates a new controller-runtime manager with the given rest.Config. +// This is useful for testing where you have a pre-existing cluster configuration. +func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctrl.Manager, error) { + cfg := opts.Config + le := cfg.LeaderElection + + mgrOpts := ctrl.Options{ + Scheme: runtimeScheme, + Metrics: ctrlmetrics.Options{ + BindAddress: "0", // Disable metrics server in tests + }, + HealthProbeBindAddress: "0", // Disable health probes in tests + + // Leader election configuration + LeaderElection: cfg.EnableHA, + LeaderElectionID: le.LockName, + LeaderElectionNamespace: le.Namespace, + LeaderElectionReleaseOnCancel: le.ReleaseOnCancel, + LeaseDuration: &le.LeaseDuration, + RenewDeadline: &le.RenewDeadline, + RetryPeriod: &le.RetryPeriod, + } + + if cfg.WatchedNamespace != "" { + mgrOpts.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfg.WatchedNamespace: {}, + }, + } + } + + mgr, err := ctrl.NewManager(restConfig, mgrOpts) + if err != nil { + return nil, fmt.Errorf("creating manager: %w", err) + } + + return mgr, nil +} + +// SetupReconcilers sets up all reconcilers with the manager. +func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { + registry := workload.NewRegistry( + workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, + }, + ) + reloadService := reload.NewService(cfg, log.WithName("reload")) + eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) + pauseHandler := reload.NewPauseHandler(cfg) + + // Create alerter based on configuration + alerter := alerting.NewAlerter(cfg) + if cfg.Alerting.Enabled { + log.Info("alerting enabled", "sink", cfg.Alerting.Sink) + } + + // Create webhook client if URL is configured + var webhookClient *webhook.Client + if cfg.WebhookURL != "" { + webhookClient = webhook.NewClient(cfg.WebhookURL, log.WithName("webhook")) + log.Info("webhook mode enabled", "url", cfg.WebhookURL) + } + + // Create namespace cache if namespace selectors are configured. + // This cache is shared between the namespace reconciler and resource reconcilers. + var nsCache *NamespaceCache + if len(cfg.NamespaceSelectors) > 0 { + nsCache = NewNamespaceCache(true) + if err := (&NamespaceReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("namespace-reconciler"), + Config: cfg, + Cache: nsCache, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up namespace reconciler: %w", err) + } + log.Info("namespace reconciler enabled for label selector filtering") + } + + // Setup ConfigMap reconciler + if !cfg.IsResourceIgnored("configmaps") { + cmReconciler := NewConfigMapReconciler( + mgr.GetClient(), + log.WithName("configmap-reconciler"), + cfg, + reloadService, + registry, + collectors, + eventRecorder, + webhookClient, + alerter, + pauseHandler, + nsCache, + ) + if err := SetupConfigMapReconciler(mgr, cmReconciler); err != nil { + return fmt.Errorf("setting up configmap reconciler: %w", err) + } + } + + // Setup Secret reconciler + if !cfg.IsResourceIgnored("secrets") { + secretReconciler := NewSecretReconciler( + mgr.GetClient(), + log.WithName("secret-reconciler"), + cfg, + reloadService, + registry, + collectors, + eventRecorder, + webhookClient, + alerter, + pauseHandler, + nsCache, + ) + if err := SetupSecretReconciler(mgr, secretReconciler); err != nil { + return fmt.Errorf("setting up secret reconciler: %w", err) + } + } + + // Setup Deployment reconciler for pause handling + if err := (&DeploymentReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("deployment-reconciler"), + Config: cfg, + PauseHandler: pauseHandler, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up deployment reconciler: %w", err) + } + + return nil +} + +// RunManager starts the manager and blocks until it stops. +func RunManager(ctx context.Context, mgr ctrl.Manager, log logr.Logger) error { + log.Info("starting manager") + return mgr.Start(ctx) +} diff --git a/internal/pkg/controller/namespace_reconciler.go b/internal/pkg/controller/namespace_reconciler.go new file mode 100644 index 000000000..4e220fd5e --- /dev/null +++ b/internal/pkg/controller/namespace_reconciler.go @@ -0,0 +1,145 @@ +package controller + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" +) + +// NamespaceCache provides thread-safe access to the set of namespaces +// that match the configured namespace label selector. +type NamespaceCache struct { + mu sync.RWMutex + namespaces map[string]struct{} + enabled bool +} + +// NewNamespaceCache creates a new NamespaceCache. +// If enabled is false, all namespace checks return true (allow all). +func NewNamespaceCache(enabled bool) *NamespaceCache { + return &NamespaceCache{ + namespaces: make(map[string]struct{}), + enabled: enabled, + } +} + +// Add adds a namespace to the cache. +func (c *NamespaceCache) Add(name string) { + c.mu.Lock() + defer c.mu.Unlock() + c.namespaces[name] = struct{}{} +} + +// Remove removes a namespace from the cache. +func (c *NamespaceCache) Remove(name string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.namespaces, name) +} + +// Contains checks if a namespace is in the cache. +// If namespace selectors are not enabled, always returns true. +func (c *NamespaceCache) Contains(name string) bool { + if !c.enabled { + return true + } + c.mu.RLock() + defer c.mu.RUnlock() + _, ok := c.namespaces[name] + return ok +} + +// List returns a copy of all namespace names in the cache. +func (c *NamespaceCache) List() []string { + c.mu.RLock() + defer c.mu.RUnlock() + result := make([]string, 0, len(c.namespaces)) + for name := range c.namespaces { + result = append(result, name) + } + return result +} + +// IsEnabled returns whether namespace selector filtering is enabled. +func (c *NamespaceCache) IsEnabled() bool { + return c.enabled +} + +// NamespaceReconciler watches Namespace objects and maintains a cache +// of namespaces that match the configured label selector. +type NamespaceReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + Cache *NamespaceCache +} + +// Reconcile handles Namespace events and updates the namespace cache. +func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("namespace", req.Name) + + var ns corev1.Namespace + if err := r.Get(ctx, req.NamespacedName, &ns); err != nil { + if errors.IsNotFound(err) { + // Namespace was deleted - remove from cache + r.Cache.Remove(req.Name) + log.V(1).Info("removed namespace from cache (deleted)") + return ctrl.Result{}, nil + } + log.Error(err, "failed to get Namespace") + return ctrl.Result{}, err + } + + // Check if namespace matches any of the configured selectors + if r.matchesSelectors(&ns) { + r.Cache.Add(ns.Name) + log.V(1).Info("added namespace to cache") + } else { + // Labels might have changed, remove from cache if no longer matches + r.Cache.Remove(ns.Name) + log.V(1).Info("removed namespace from cache (labels no longer match)") + } + + return ctrl.Result{}, nil +} + +// matchesSelectors checks if the namespace matches any configured label selector. +func (r *NamespaceReconciler) matchesSelectors(ns *corev1.Namespace) bool { + if len(r.Config.NamespaceSelectors) == 0 { + // No selectors configured - should not happen since reconciler is only + // set up when selectors are configured, but handle gracefully + return true + } + + nsLabels := ns.GetLabels() + if nsLabels == nil { + nsLabels = make(map[string]string) + } + + for _, selector := range r.Config.NamespaceSelectors { + if selector.Matches(reload.LabelsSet(nsLabels)) { + return true + } + } + + return false +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + Complete(r) +} + +// Ensure NamespaceReconciler implements reconcile.Reconciler +var _ reconcile.Reconciler = &NamespaceReconciler{} diff --git a/internal/pkg/controller/namespace_reconciler_test.go b/internal/pkg/controller/namespace_reconciler_test.go new file mode 100644 index 000000000..604dca92a --- /dev/null +++ b/internal/pkg/controller/namespace_reconciler_test.go @@ -0,0 +1,151 @@ +package controller_test + +import ( + "testing" + + "k8s.io/apimachinery/pkg/labels" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/testutil" +) + +func TestNamespaceCache_Basic(t *testing.T) { + cache := controller.NewNamespaceCache(true) + + cache.Add("namespace-1") + if !cache.Contains("namespace-1") { + t.Error("Cache should contain namespace-1") + } + if cache.Contains("namespace-2") { + t.Error("Cache should not contain namespace-2") + } + + cache.Remove("namespace-1") + if cache.Contains("namespace-1") { + t.Error("Cache should not contain namespace-1 after removal") + } +} + +func TestNamespaceCache_Disabled(t *testing.T) { + cache := controller.NewNamespaceCache(false) + + if !cache.Contains("any-namespace") { + t.Error("Disabled cache should return true for any namespace") + } +} + +func TestNamespaceCache_List(t *testing.T) { + cache := controller.NewNamespaceCache(true) + cache.Add("ns-1") + cache.Add("ns-2") + cache.Add("ns-3") + + list := cache.List() + if len(list) != 3 { + t.Errorf("Expected 3 namespaces, got %d", len(list)) + } + + found := make(map[string]bool) + for _, ns := range list { + found[ns] = true + } + for _, expected := range []string{"ns-1", "ns-2", "ns-3"} { + if !found[expected] { + t.Errorf("Expected %s in list", expected) + } + } +} + +func TestNamespaceCache_IsEnabled(t *testing.T) { + if !controller.NewNamespaceCache(true).IsEnabled() { + t.Error("EnabledCache.IsEnabled() should return true") + } + if controller.NewNamespaceCache(false).IsEnabled() { + t.Error("DisabledCache.IsEnabled() should return false") + } +} + +func TestNamespaceReconciler_Add(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + ns := testutil.NewNamespace("test-ns", map[string]string{"env": "production"}) + reconciler := newNamespaceReconciler(t, cfg, cache, ns) + + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) + + if !cache.Contains("test-ns") { + t.Error("Cache should contain test-ns after reconcile") + } +} + +func TestNamespaceReconciler_Remove_LabelChange(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + cache.Add("test-ns") // Pre-populate + + ns := testutil.NewNamespace("test-ns", map[string]string{"env": "staging"}) // Non-matching + reconciler := newNamespaceReconciler(t, cfg, cache, ns) + + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) + + if cache.Contains("test-ns") { + t.Error("Cache should not contain test-ns after reconcile (labels no longer match)") + } +} + +func TestNamespaceReconciler_Remove_Delete(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + cache.Add("deleted-ns") // Pre-populate + + reconciler := newNamespaceReconciler(t, cfg, cache) // No namespace in cluster + + assertReconcileSuccess(t, reconciler, namespaceRequest("deleted-ns")) + + if cache.Contains("deleted-ns") { + t.Error("Cache should not contain deleted-ns after reconcile") + } +} + +func TestNamespaceReconciler_MultipleSelectors(t *testing.T) { + cfg := config.NewDefault() + selector1, _ := labels.Parse("env=production") + selector2, _ := labels.Parse("team=platform") + cfg.NamespaceSelectors = []labels.Selector{selector1, selector2} + + cache := controller.NewNamespaceCache(true) + ns := testutil.NewNamespace("test-ns", map[string]string{"team": "platform"}) + reconciler := newNamespaceReconciler(t, cfg, cache, ns) + + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) + + if !cache.Contains("test-ns") { + t.Error("Cache should contain test-ns (matches second selector)") + } +} + +func TestNamespaceReconciler_NoLabels(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + ns := testutil.NewNamespace("test-ns", nil) // No labels + reconciler := newNamespaceReconciler(t, cfg, cache, ns) + + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) + + if cache.Contains("test-ns") { + t.Error("Cache should not contain test-ns (no labels)") + } +} diff --git a/internal/pkg/controller/resource_reconciler.go b/internal/pkg/controller/resource_reconciler.go new file mode 100644 index 000000000..7bfdd8057 --- /dev/null +++ b/internal/pkg/controller/resource_reconciler.go @@ -0,0 +1,203 @@ +package controller + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ResourceReconcilerDeps holds shared dependencies for resource reconcilers. +type ResourceReconcilerDeps struct { + Client client.Client + Log logr.Logger + Config *config.Config + ReloadService *reload.Service + Registry *workload.Registry + Collectors *metrics.Collectors + EventRecorder *events.Recorder + WebhookClient *webhook.Client + Alerter alerting.Alerter + PauseHandler *reload.PauseHandler + NamespaceCache *NamespaceCache +} + +// ResourceConfig provides type-specific configuration for a resource reconciler. +type ResourceConfig[T client.Object] struct { + // ResourceType identifies the type of resource (configmap or secret). + ResourceType reload.ResourceType + + // NewResource creates a new instance of the resource type. + NewResource func() T + + // CreateChange creates a change event for the resource. + CreateChange func(resource T, eventType reload.EventType) reload.ResourceChange + + // CreatePredicates creates the predicates for this resource type. + CreatePredicates func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate +} + +// ResourceReconciler is a generic reconciler for ConfigMaps and Secrets. +type ResourceReconciler[T client.Object] struct { + ResourceReconcilerDeps + ResourceConfig[T] + + handler *ReloadHandler + initialized bool + initOnce sync.Once +} + +// NewResourceReconciler creates a new generic resource reconciler. +func NewResourceReconciler[T client.Object]( + deps ResourceReconcilerDeps, + cfg ResourceConfig[T], +) *ResourceReconciler[T] { + return &ResourceReconciler[T]{ + ResourceReconcilerDeps: deps, + ResourceConfig: cfg, + } +} + +// Reconcile handles resource events and triggers workload reloads as needed. +func (r *ResourceReconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + startTime := time.Now() + resourceType := string(r.ResourceType) + log := r.Log.WithValues(resourceType, req.NamespacedName) + + r.initOnce.Do( + func() { + r.initialized = true + log.Info(resourceType + " controller initialized") + }, + ) + + r.Collectors.RecordEventReceived("reconcile", resourceType) + + resource := r.NewResource() + if err := r.Client.Get(ctx, req.NamespacedName, resource); err != nil { + if errors.IsNotFound(err) { + return r.handleNotFound(ctx, req, log, startTime) + } + log.Error(err, "failed to get "+resourceType) + r.Collectors.RecordError("get_" + resourceType) + r.Collectors.RecordReconcile("error", time.Since(startTime)) + return ctrl.Result{}, err + } + + namespace := resource.GetNamespace() + if r.Config.IsNamespaceIgnored(namespace) { + log.V(1).Info("skipping " + resourceType + " in ignored namespace") + r.Collectors.RecordSkipped("ignored_namespace") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil + } + + if r.NamespaceCache != nil && r.NamespaceCache.IsEnabled() && !r.NamespaceCache.Contains(namespace) { + log.V(1).Info("skipping "+resourceType+" in namespace not matching selector", "namespace", namespace) + r.Collectors.RecordSkipped("namespace_selector") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil + } + + result, err := r.reloadHandler().Process( + ctx, req.Namespace, req.Name, r.ResourceType, + func(workloads []workload.Workload) []reload.ReloadDecision { + return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeUpdate), workloads) + }, log, + ) + + r.recordReconcile(startTime, err) + return result, err +} + +func (r *ResourceReconciler[T]) handleNotFound( + ctx context.Context, + req ctrl.Request, + log logr.Logger, + startTime time.Time, +) (ctrl.Result, error) { + if r.Config.ReloadOnDelete { + r.Collectors.RecordEventReceived("delete", string(r.ResourceType)) + result, err := r.handleDelete(ctx, req, log) + r.recordReconcile(startTime, err) + return result, err + } + r.Collectors.RecordSkipped("not_found") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil +} + +func (r *ResourceReconciler[T]) handleDelete( + ctx context.Context, + req ctrl.Request, + log logr.Logger, +) (ctrl.Result, error) { + log.Info("handling " + string(r.ResourceType) + " deletion") + + // Create a minimal resource with just name/namespace for the delete event + resource := r.NewResource() + resource.SetName(req.Name) + resource.SetNamespace(req.Namespace) + + return r.reloadHandler().Process( + ctx, req.Namespace, req.Name, r.ResourceType, + func(workloads []workload.Workload) []reload.ReloadDecision { + return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeDelete), workloads) + }, log, + ) +} + +func (r *ResourceReconciler[T]) recordReconcile(startTime time.Time, err error) { + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } +} + +func (r *ResourceReconciler[T]) reloadHandler() *ReloadHandler { + if r.handler == nil { + r.handler = &ReloadHandler{ + Client: r.Client, + Lister: workload.NewLister(r.Client, r.Registry, r.Config), + ReloadService: r.ReloadService, + WebhookClient: r.WebhookClient, + Collectors: r.Collectors, + EventRecorder: r.EventRecorder, + Alerter: r.Alerter, + PauseHandler: r.PauseHandler, + } + } + return r.handler +} + +// Initialized returns whether the reconciler has been initialized. +func (r *ResourceReconciler[T]) Initialized() *bool { + return &r.initialized +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ResourceReconciler[T]) SetupWithManager(mgr ctrl.Manager, forObject T) error { + return ctrl.NewControllerManagedBy(mgr). + For(forObject). + WithEventFilter( + BuildEventFilter( + r.CreatePredicates(r.Config, r.ReloadService.Hasher()), + r.Config, r.Initialized(), + ), + ). + Complete(r) +} diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go new file mode 100644 index 000000000..ffb30615e --- /dev/null +++ b/internal/pkg/controller/retry.go @@ -0,0 +1,206 @@ +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// UpdateObjectWithRetry updates a Kubernetes object with retry on conflict. +// It re-fetches the object on each retry attempt and calls modifyFn to apply changes. +// The modifyFn receives the latest version of the object and should modify it in place. +// If modifyFn returns false, the update is skipped (e.g., if the condition no longer applies). +func UpdateObjectWithRetry( + ctx context.Context, + c client.Client, + obj client.Object, + modifyFn func() (shouldUpdate bool, err error), +) error { + return retry.RetryOnConflict( + retry.DefaultBackoff, func() error { + if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + shouldUpdate, err := modifyFn() + if err != nil { + return err + } + + if !shouldUpdate { + return nil + } + + return c.Update(ctx, obj, client.FieldOwner(workload.FieldManager)) + }, + ) +} + +// UpdateWorkloadWithRetry updates a workload with exponential backoff on conflict. +// On conflict, it re-fetches the object, re-applies the reload changes, and retries. +// Workloads use their UpdateStrategy to determine how they're updated: +// - UpdateStrategyPatch: uses strategic merge patch with retry (most workloads) +// - UpdateStrategyRecreate: deletes and recreates (Jobs) +// - UpdateStrategyCreateNew: creates a new resource from template (CronJobs) +// Deployments have additional pause handling for paused rollouts. +func UpdateWorkloadWithRetry( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + pauseHandler *reload.PauseHandler, + wl workload.Workload, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + switch wl.UpdateStrategy() { + case workload.UpdateStrategyRecreate, workload.UpdateStrategyCreateNew: + return updateWithSpecialStrategy(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + default: + // UpdateStrategyPatch: use standard retry logic with special handling for Deployments + if wl.Kind() == workload.KindDeployment { + return updateDeploymentWithPause(ctx, c, reloadService, pauseHandler, wl, resourceName, resourceType, namespace, hash, autoReload) + } + return updateStandardWorkload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + } +} + +// retryWithReload wraps the common retry logic for workload updates. +// It handles re-fetching on conflict, applying reload changes, and calling the update function. +func retryWithReload( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.Workload, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, + updateFn func() error, +) (bool, error) { + var updated bool + isFirstAttempt := true + + err := retry.RetryOnConflict( + retry.DefaultBackoff, func() error { + if !isFirstAttempt { + obj := wl.GetObject() + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + wl.ResetOriginal() + } + isFirstAttempt = false + + var applyErr error + updated, applyErr = reloadService.ApplyReload(ctx, wl, resourceName, resourceType, namespace, hash, autoReload) + if applyErr != nil { + return applyErr + } + + if !updated { + return nil + } + + return updateFn() + }, + ) + + return updated, err +} + +// updateStandardWorkload updates DaemonSets, StatefulSets, etc. +func updateStandardWorkload( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.Workload, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + return retryWithReload( + ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + func() error { + return wl.Update(ctx, c) + }, + ) +} + +// updateDeploymentWithPause updates a Deployment and applies pause if configured. +func updateDeploymentWithPause( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + pauseHandler *reload.PauseHandler, + wl workload.Workload, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + shouldPause := pauseHandler != nil && pauseHandler.ShouldPause(wl) + + return retryWithReload( + ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + func() error { + if shouldPause { + if err := pauseHandler.ApplyPause(wl); err != nil { + return err + } + } + return wl.Update(ctx, c) + }, + ) +} + +// updateWithSpecialStrategy handles workloads that don't use standard patch. +// It applies reload changes, then delegates to the workload's PerformSpecialUpdate. +func updateWithSpecialStrategy( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.Workload, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + updated, err := reloadService.ApplyReload( + ctx, + wl, + resourceName, + resourceType, + namespace, + hash, + autoReload, + ) + if err != nil { + return false, err + } + + if !updated { + return false, nil + } + + return wl.PerformSpecialUpdate(ctx, c) +} diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go new file mode 100644 index 000000000..ff33c0b55 --- /dev/null +++ b/internal/pkg/controller/retry_test.go @@ -0,0 +1,587 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/testutil" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { + tests := []struct { + name string + object runtime.Object + workload func(runtime.Object) workload.Workload + resourceType reload.ResourceType + verify func(t *testing.T, c client.Client) + }{ + { + name: "Deployment", + object: testutil.NewDeployment("test-deployment", "default", nil), + workload: func(o runtime.Object) workload.Workload { + return workload.NewDeploymentWorkload(o.(*appsv1.Deployment)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var result appsv1.Deployment + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, + }, + { + name: "DaemonSet", + object: testutil.NewDaemonSet("test-daemonset", "default", nil), + workload: func(o runtime.Object) workload.Workload { + return workload.NewDaemonSetWorkload(o.(*appsv1.DaemonSet)) + }, + resourceType: reload.ResourceTypeSecret, + verify: func(t *testing.T, c client.Client) { + var result appsv1.DaemonSet + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-daemonset", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get daemonset: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, + }, + { + name: "StatefulSet", + object: testutil.NewStatefulSet("test-statefulset", "default", nil), + workload: func(o runtime.Object) workload.Workload { + return workload.NewStatefulSetWorkload(o.(*appsv1.StatefulSet)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var result appsv1.StatefulSet + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-statefulset", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get statefulset: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, + }, + { + name: "Job", + object: testutil.NewJob("test-job", "default"), + workload: func(o runtime.Object) workload.Workload { + return workload.NewJobWorkload(o.(*batchv1.Job)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var jobs batchv1.JobList + if err := c.List(context.Background(), &jobs, client.InNamespace("default")); err != nil { + t.Fatalf("Failed to list jobs: %v", err) + } + if len(jobs.Items) != 1 { + t.Errorf("Expected 1 job (recreated), got %d", len(jobs.Items)) + } + }, + }, + { + name: "CronJob", + object: testutil.NewCronJob("test-cronjob", "default"), + workload: func(o runtime.Object) workload.Workload { + return workload.NewCronJobWorkload(o.(*batchv1.CronJob)) + }, + resourceType: reload.ResourceTypeSecret, + verify: func(t *testing.T, c client.Client) { + var jobs batchv1.JobList + if err := c.List(context.Background(), &jobs, client.InNamespace("default")); err != nil { + t.Fatalf("Failed to list jobs: %v", err) + } + if len(jobs.Items) != 1 { + t.Errorf("Expected 1 job from cronjob, got %d", len(jobs.Items)) + } + if len(jobs.Items) > 0 && jobs.Items[0].Annotations["cronjob.kubernetes.io/instantiate"] != "manual" { + t.Error("Expected job to have manual instantiate annotation") + } + }, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithRuntimeObjects(tt.object). + Build() + + wl := tt.workload(tt.object) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + nil, // no pause handler + wl, + "test-resource", + tt.resourceType, + "default", + "abc123", + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + tt.verify(t, fakeClient) + }, + ) + } +} + +func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { + tests := []struct { + name string + strategy config.ReloadStrategy + verify func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) + }{ + { + name: "EnvVarStrategy", + strategy: config.ReloadStrategyEnvVars, + verify: func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) { + found := false + for _, env := range result.Spec.Template.Spec.Containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Error("Expected STAKATER_TEST_CM_CONFIGMAP env var to be set") + } + }, + }, + { + name: "AnnotationStrategy", + strategy: config.ReloadStrategyAnnotations, + verify: func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) { + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + if _, ok := result.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; !ok { + t.Errorf("Expected %s annotation to be set", cfg.Annotations.LastReloadedFrom) + } + for _, env := range result.Spec.Template.Spec.Containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" { + t.Error("Annotation strategy should not add env vars") + } + } + }, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = tt.strategy + reloadService := reload.NewService(cfg, testr.New(t)) + + deployment := testutil.NewDeployment("test-deployment", "default", nil) + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + nil, // no pause handler for this test + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + tt.verify(t, cfg, &result) + }, + ) + } +} + +func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + + deployment := testutil.NewDeployment("test-deployment", "default", nil) + deployment.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + { + Name: "STAKATER_TEST_CM_CONFIGMAP", + Value: "abc123", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + nil, // no pause handler + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", // Same hash as already set + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if updated { + t.Error("Expected workload NOT to be updated (same hash)") + } +} + +func TestResourceTypeKind(t *testing.T) { + tests := []struct { + resourceType reload.ResourceType + expectedKind string + }{ + {reload.ResourceTypeConfigMap, "ConfigMap"}, + {reload.ResourceTypeSecret, "Secret"}, + } + + for _, tt := range tests { + t.Run( + string(tt.resourceType), func(t *testing.T) { + if got := tt.resourceType.Kind(); got != tt.expectedKind { + t.Errorf("ResourceType.Kind() = %v, want %v", got, tt.expectedKind) + } + }, + ) + } +} + +func TestUpdateWorkloadWithRetry_PauseDeployment(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + "deployment.reloader.stakater.com/pause-period": "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation tests pause with explicit configmap annotation (no auto). +func TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", // explicit, not auto + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + false, // NOT auto reload + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithSecretReload tests pause with Secret-triggered reload. +func TestUpdateWorkloadWithRetry_PauseWithSecretReload(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", // explicit secret, not auto + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-secret", + reload.ResourceTypeSecret, + "default", + "abc123", + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithAutoSecret tests pause with auto annotation + Secret change. +func TestUpdateWorkloadWithRetry_PauseWithAutoSecret(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-secret", + reload.ResourceTypeSecret, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } +} + +func TestUpdateWorkloadWithRetry_NoPauseWithoutAnnotation(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg, testr.New(t)) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Paused { + t.Error("Expected deployment NOT to be paused (no pause-period annotation)") + } +} diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go new file mode 100644 index 000000000..b50c75476 --- /dev/null +++ b/internal/pkg/controller/secret_reconciler.go @@ -0,0 +1,69 @@ +package controller + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// SecretReconciler watches Secrets and triggers workload reloads. +type SecretReconciler = ResourceReconciler[*corev1.Secret] + +// NewSecretReconciler creates a new SecretReconciler with the given dependencies. +func NewSecretReconciler( + c client.Client, + log logr.Logger, + cfg *config.Config, + reloadService *reload.Service, + registry *workload.Registry, + collectors *metrics.Collectors, + eventRecorder *events.Recorder, + webhookClient *webhook.Client, + alerter alerting.Alerter, + pauseHandler *reload.PauseHandler, + nsCache *NamespaceCache, +) *SecretReconciler { + return NewResourceReconciler( + ResourceReconcilerDeps{ + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + NamespaceCache: nsCache, + }, + ResourceConfig[*corev1.Secret]{ + ResourceType: reload.ResourceTypeSecret, + NewResource: func() *corev1.Secret { return &corev1.Secret{} }, + CreateChange: func(s *corev1.Secret, eventType reload.EventType) reload.ResourceChange { + return reload.SecretChange{Secret: s, EventType: eventType} + }, + CreatePredicates: func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate { + return reload.SecretPredicates(cfg, hasher) + }, + }, + ) +} + +// SetupSecretReconciler sets up a Secret reconciler with the manager. +func SetupSecretReconciler(mgr ctrl.Manager, r *SecretReconciler) error { + return r.SetupWithManager(mgr, &corev1.Secret{}) +} + +var _ reconcile.Reconciler = &SecretReconciler{} diff --git a/internal/pkg/controller/secret_reconciler_test.go b/internal/pkg/controller/secret_reconciler_test.go new file mode 100644 index 000000000..f55e84a80 --- /dev/null +++ b/internal/pkg/controller/secret_reconciler_test.go @@ -0,0 +1,173 @@ +package controller_test + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" +) + +func TestSecretReconciler_NotFound(t *testing.T) { + cfg := config.NewDefault() + reconciler := newSecretReconciler(t, cfg) + assertReconcileSuccess(t, reconciler, reconcileRequest("nonexistent-secret", "default")) +} + +func TestSecretReconciler_NotFound_ReloadOnDelete(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "deleted-secret", + }) + reconciler := newSecretReconciler(t, cfg, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("deleted-secret", "default")) +} + +func TestSecretReconciler_IgnoredNamespace(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + secret := testutil.NewSecret("test-secret", "kube-system") + reconciler := newSecretReconciler(t, cfg, secret) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "kube-system")) +} + +func TestSecretReconciler_NoMatchingWorkloads(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", nil) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "", "test-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_WorkloadInDifferentNamespace(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("test-secret", "namespace-a") + deployment := testutil.NewDeployment("test-deployment", "namespace-b", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "namespace-a")) +} + +func TestSecretReconciler_IgnoredWorkloadType(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredWorkloads = []string{"deployment"} + + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_DaemonSet(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("test-secret", "default") + daemonset := testutil.NewDaemonSet("test-daemonset", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_StatefulSet(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("test-secret", "default") + statefulset := testutil.NewStatefulSet("test-statefulset", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, statefulset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecret("shared-secret", "default") + deployment1 := testutil.NewDeployment("deployment-1", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) + deployment2 := testutil.NewDeployment("deployment-2", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) + daemonset := testutil.NewDaemonSet("daemonset-1", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) + + reconciler := newSecretReconciler(t, cfg, secret, deployment1, deployment2, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("shared-secret", "default")) +} + +func TestSecretReconciler_VolumeMount(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := testutil.NewSecret("volume-secret", "default") + deployment := testutil.NewDeploymentWithVolume("test-deployment", "default", "", "volume-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("volume-secret", "default")) +} + +func TestSecretReconciler_ProjectedVolume(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := testutil.NewSecret("projected-secret", "default") + deployment := testutil.NewDeploymentWithProjectedVolume("test-deployment", "default", "", "projected-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("projected-secret", "default")) +} + +func TestSecretReconciler_SearchAnnotation(t *testing.T) { + cfg := config.NewDefault() + + secret := testutil.NewSecretWithAnnotations("test-secret", "default", map[string]string{ + cfg.Annotations.Match: "true", + }) + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.Search: "true", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) +} + +func TestSecretReconciler_ServiceAccountTokenIgnored(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + // Service account tokens should be ignored + secret := testutil.NewSecret("sa-token", "default") + secret.Type = "kubernetes.io/service-account-token" + + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "", "sa-token") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("sa-token", "default")) +} diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go new file mode 100644 index 000000000..2b0f9e75b --- /dev/null +++ b/internal/pkg/controller/test_helpers_test.go @@ -0,0 +1,151 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/testr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/testutil" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// testDeps holds shared test dependencies. +type testDeps struct { + client *fake.ClientBuilder + log logr.Logger + cfg *config.Config + reloadService *reload.Service + registry *workload.Registry + collectors *metrics.Collectors + eventRecorder *events.Recorder + webhookClient *webhook.Client + alerter alerting.Alerter +} + +// newTestDeps creates shared test dependencies for reconciler tests. +func newTestDeps(t *testing.T, cfg *config.Config, objects ...runtime.Object) testDeps { + t.Helper() + log := testr.New(t) + collectors := metrics.NewCollectors() + return testDeps{ + client: fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithRuntimeObjects(objects...), + log: log, + cfg: cfg, + reloadService: reload.NewService(cfg, log), + registry: workload.NewRegistry( + workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, + }, + ), + collectors: &collectors, + eventRecorder: events.NewRecorder(nil), + webhookClient: webhook.NewClient("", log), + alerter: &alerting.NoOpAlerter{}, + } +} + +// newConfigMapReconciler creates a ConfigMapReconciler for testing. +func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { + t.Helper() + deps := newTestDeps(t, cfg, objects...) + return controller.NewConfigMapReconciler( + deps.client.Build(), + deps.log, + deps.cfg, + deps.reloadService, + deps.registry, + deps.collectors, + deps.eventRecorder, + deps.webhookClient, + deps.alerter, + nil, + nil, + ) +} + +// newSecretReconciler creates a SecretReconciler for testing. +func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { + t.Helper() + deps := newTestDeps(t, cfg, objects...) + return controller.NewSecretReconciler( + deps.client.Build(), + deps.log, + deps.cfg, + deps.reloadService, + deps.registry, + deps.collectors, + deps.eventRecorder, + deps.webhookClient, + deps.alerter, + nil, + nil, + ) +} + +// newNamespaceReconciler creates a NamespaceReconciler for testing. +func newNamespaceReconciler(t *testing.T, cfg *config.Config, cache *controller.NamespaceCache, objects ...runtime.Object) *controller.NamespaceReconciler { + t.Helper() + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objects...). + Build() + + return &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } +} + +// reconcileRequest creates a ctrl.Request for the given name and namespace. +func reconcileRequest(name, namespace string) ctrl.Request { + return ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } +} + +// namespaceRequest creates a ctrl.Request for a namespace (no namespace field needed). +func namespaceRequest(name string) ctrl.Request { + return ctrl.Request{ + NamespacedName: types.NamespacedName{Name: name}, + } +} + +// assertReconcileSuccess runs reconcile and asserts no error and no requeue. +func assertReconcileSuccess(t *testing.T, reconciler interface { + Reconcile(context.Context, ctrl.Request) (ctrl.Result, error) +}, req ctrl.Request) { + t.Helper() + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.RequeueAfter > 0 { + t.Error("Should not requeue") + } +} diff --git a/internal/pkg/crypto/sha.go b/internal/pkg/crypto/sha.go deleted file mode 100644 index 043fc2273..000000000 --- a/internal/pkg/crypto/sha.go +++ /dev/null @@ -1,20 +0,0 @@ -package crypto - -import ( - "crypto/sha1" - "fmt" - "io" - - "github.com/sirupsen/logrus" -) - -// GenerateSHA generates SHA from string -func GenerateSHA(data string) string { - hasher := sha1.New() - _, err := io.WriteString(hasher, data) - if err != nil { - logrus.Errorf("Unable to write data in hash writer %v", err) - } - sha := hasher.Sum(nil) - return fmt.Sprintf("%x", sha) -} diff --git a/internal/pkg/crypto/sha_test.go b/internal/pkg/crypto/sha_test.go deleted file mode 100644 index 60d5af635..000000000 --- a/internal/pkg/crypto/sha_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package crypto - -import ( - "testing" -) - -// TestGenerateSHA generates the sha from given data and verifies whether it is correct or not -func TestGenerateSHA(t *testing.T) { - data := "www.stakater.com" - sha := "abd4ed82fb04548388a6cf3c339fd9dc84d275df" - result := GenerateSHA(data) - if result != sha { - t.Errorf("Failed to generate SHA") - } -} diff --git a/internal/pkg/events/recorder.go b/internal/pkg/events/recorder.go new file mode 100644 index 000000000..1f3eef58f --- /dev/null +++ b/internal/pkg/events/recorder.go @@ -0,0 +1,60 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +const ( + // EventTypeNormal represents a normal event. + EventTypeNormal = corev1.EventTypeNormal + // EventTypeWarning represents a warning event. + EventTypeWarning = corev1.EventTypeWarning + + // ReasonReloaded indicates a workload was successfully reloaded. + ReasonReloaded = "Reloaded" + // ReasonReloadFailed indicates a workload reload failed. + ReasonReloadFailed = "ReloadFailed" +) + +// Recorder wraps the Kubernetes event recorder. +type Recorder struct { + recorder record.EventRecorder +} + +// NewRecorder creates a new event Recorder. +func NewRecorder(recorder record.EventRecorder) *Recorder { + if recorder == nil { + return nil + } + return &Recorder{recorder: recorder} +} + +// ReloadSuccess records a successful reload event. +func (r *Recorder) ReloadSuccess(object runtime.Object, resourceType, resourceName string) { + if r == nil || r.recorder == nil { + return + } + r.recorder.Event( + object, + EventTypeNormal, + ReasonReloaded, + fmt.Sprintf("Reloaded due to %s %s change", resourceType, resourceName), + ) +} + +// ReloadFailed records a failed reload event. +func (r *Recorder) ReloadFailed(object runtime.Object, resourceType, resourceName string, err error) { + if r == nil || r.recorder == nil { + return + } + r.recorder.Event( + object, + EventTypeWarning, + ReasonReloadFailed, + fmt.Sprintf("Failed to reload due to %s %s change: %v", resourceType, resourceName, err), + ) +} diff --git a/internal/pkg/events/recorder_test.go b/internal/pkg/events/recorder_test.go new file mode 100644 index 000000000..475173ee2 --- /dev/null +++ b/internal/pkg/events/recorder_test.go @@ -0,0 +1,169 @@ +package events + +import ( + "errors" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +func TestNewRecorder_NilInput(t *testing.T) { + r := NewRecorder(nil) + if r != nil { + t.Error("NewRecorder(nil) should return nil") + } +} + +func TestNewRecorder_ValidInput(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + if r == nil { + t.Error("NewRecorder with valid recorder should not return nil") + } +} + +func TestReloadSuccess_RecordsEvent(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + r.ReloadSuccess(pod, "ConfigMap", "my-config") + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + // Event format: "Normal Reloaded Reloaded due to ConfigMap my-config change" + expectedContains := []string{"Normal", "Reloaded", "ConfigMap", "my-config"} + for _, expected := range expectedContains { + if !contains(event, expected) { + t.Errorf("Event %q should contain %q", event, expected) + } + } + default: + t.Error("Expected event to be recorded, but none was") + } +} + +func TestReloadFailed_RecordsWarningEvent(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + testErr := errors.New("update conflict") + r.ReloadFailed(pod, "Secret", "my-secret", testErr) + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + // Event format: "Warning ReloadFailed Failed to reload due to Secret my-secret change: update conflict" + expectedContains := []string{"Warning", "ReloadFailed", "Secret", "my-secret", "update conflict"} + for _, expected := range expectedContains { + if !contains(event, expected) { + t.Errorf("Event %q should contain %q", event, expected) + } + } + default: + t.Error("Expected event to be recorded, but none was") + } +} + +func TestNilRecorder_NoPanic(t *testing.T) { + var r *Recorder = nil + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + // These should not panic + r.ReloadSuccess(pod, "ConfigMap", "my-config") + r.ReloadFailed(pod, "Secret", "my-secret", errors.New("test error")) +} + +func TestRecorder_NilInternalRecorder(t *testing.T) { + r := &Recorder{recorder: nil} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + r.ReloadSuccess(pod, "ConfigMap", "my-config") + r.ReloadFailed(pod, "Secret", "my-secret", errors.New("test error")) +} + +func TestReloadSuccess_DifferentObjectTypes(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + tests := []struct { + name string + object runtime.Object + }{ + { + name: "Pod", + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}, + }, + }, + { + name: "ConfigMap", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}, + }, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + r.ReloadSuccess(tt.object, "ConfigMap", "my-config") + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + default: + t.Error("Expected event to be recorded") + } + }, + ) + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstring(s, substr)) +} + +func containsSubstring(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/pkg/handler/create.go b/internal/pkg/handler/create.go deleted file mode 100644 index fab737888..000000000 --- a/internal/pkg/handler/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package handler - -import ( - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" -) - -// ResourceCreatedHandler contains new objects -type ResourceCreatedHandler struct { - Resource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes the newly created resource -func (r ResourceCreatedHandler) Handle() error { - if r.Resource == nil { - logrus.Errorf("Resource creation handler received nil resource") - } else { - config, _ := r.GetConfig() - // Send webhook - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy) - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceCreatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} diff --git a/internal/pkg/handler/delete.go b/internal/pkg/handler/delete.go deleted file mode 100644 index 65c671e89..000000000 --- a/internal/pkg/handler/delete.go +++ /dev/null @@ -1,100 +0,0 @@ -package handler - -import ( - "fmt" - "slices" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/common" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" -) - -// ResourceDeleteHandler contains new objects -type ResourceDeleteHandler struct { - Resource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes resources being deleted -func (r ResourceDeleteHandler) Handle() error { - if r.Resource == nil { - logrus.Errorf("Resource delete handler received nil resource") - } else { - config, _ := r.GetConfig() - // Send webhook - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy) - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceDeleteHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} - -func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - return removePodAnnotations(upgradeFuncs, item, config, autoReload) - } - - return removeContainerEnvVars(upgradeFuncs, item, config, autoReload) -} - -func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - config.SHAValue = testutil.GetSHAfromEmptyData() - return updatePodAnnotations(upgradeFuncs, item, config, autoReload) -} - -func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - envVar := getEnvVarName(config.ResourceName, config.Type) - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - //remove if env var exists - if len(container.Env) > 0 { - index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool { - return envVariable.Name == envVar - }) - if index != -1 { - var patch []byte - if upgradeFuncs.SupportsPatch { - containers := upgradeFuncs.ContainersFunc(item) - containerIndex := slices.IndexFunc(containers, func(c v1.Container) bool { - return c.Name == container.Name - }) - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate, containerIndex, index) - } - - container.Env = append(container.Env[:index], container.Env[index+1:]...) - return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.JSONPatchType, Bytes: patch}} - } - } - - return InvokeStrategyResult{constants.NotUpdated, nil} -} diff --git a/internal/pkg/handler/handler.go b/internal/pkg/handler/handler.go deleted file mode 100644 index 1f5858e58..000000000 --- a/internal/pkg/handler/handler.go +++ /dev/null @@ -1,9 +0,0 @@ -package handler - -import "github.com/stakater/Reloader/pkg/common" - -// ResourceHandler handles the creation and update of resources -type ResourceHandler interface { - Handle() error - GetConfig() (common.Config, string) -} diff --git a/internal/pkg/handler/pause_deployment.go b/internal/pkg/handler/pause_deployment.go deleted file mode 100644 index 28d1b9efd..000000000 --- a/internal/pkg/handler/pause_deployment.go +++ /dev/null @@ -1,242 +0,0 @@ -package handler - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - app "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - patchtypes "k8s.io/apimachinery/pkg/types" -) - -// Keeps track of currently active timers -var activeTimers = make(map[string]*time.Timer) - -// Returns unique key for the activeTimers map -func getTimerKey(namespace, deploymentName string) string { - return fmt.Sprintf("%s/%s", namespace, deploymentName) -} - -// Checks if a deployment is currently paused -func IsPaused(deployment *app.Deployment) bool { - return deployment.Spec.Paused -} - -// Deployment paused by reloader ? -func IsPausedByReloader(deployment *app.Deployment) bool { - if IsPaused(deployment) { - pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation] - return pausedAtAnnotationValue != "" - } - return false -} - -// Returns the time, the deployment was paused by reloader, nil otherwise -func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) { - if !IsPausedByReloader(deployment) { - return nil, nil - } - - pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation] - parsedTime, err := time.Parse(time.RFC3339, pausedAtStr) - if err != nil { - return nil, err - } - - return &parsedTime, nil -} - -// ParsePauseDuration parses the pause interval value and returns a time.Duration -func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) { - pauseDuration, err := time.ParseDuration(pauseIntervalValue) - if err != nil { - logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err) - return 0, err - } - return pauseDuration, nil -} - -// Pauses a deployment for a specified duration and creates a timer to resume it -// after the specified duration -func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) { - deploymentName := deployment.Name - pauseDuration, err := ParsePauseDuration(pauseIntervalValue) - - if err != nil { - return nil, err - } - - if !IsPaused(deployment) { - logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration) - - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - pausePatch, err := CreatePausePatch() - if err != nil { - logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err) - return deployment, err - } - - err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch) - - if err != nil { - logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return deployment, err - } - - updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - - CreateResumeTimer(deployment, clients, namespace, pauseDuration) - return updatedDeployment, err - } - - if !IsPausedByReloader(deployment) { - logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace) - return deployment, nil - } - - // Deployment has already been paused by reloader, check for timer - logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace) - - timerKey := getTimerKey(namespace, deploymentName) - _, timerExists := activeTimers[timerKey] - - if !timerExists { - logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one", - deploymentName, namespace) - HandleMissingTimer(deployment, pauseDuration, clients, namespace) - } - return deployment, nil -} - -// Handles the case where missing timers for deployments that have been paused by reloader. -// Could occur after new leader election or reloader restart -func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) { - deploymentName := deployment.Name - pauseStartTime, err := GetPauseStartTime(deployment) - if err != nil { - logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately", - deploymentName, namespace, err) - ResumeDeployment(deployment, namespace, clients) - return - } - - if pauseStartTime == nil { - return - } - - elapsedPauseTime := time.Since(*pauseStartTime) - remainingPauseTime := pauseDuration - elapsedPauseTime - - if remainingPauseTime <= 0 { - logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately", - deploymentName, namespace) - ResumeDeployment(deployment, namespace, clients) - return - } - - logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s", - deploymentName, namespace, remainingPauseTime) - CreateResumeTimer(deployment, clients, namespace, remainingPauseTime) -} - -// CreateResumeTimer creates a timer to resume the deployment after the specified duration -func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) { - deploymentName := deployment.Name - timerKey := getTimerKey(namespace, deployment.Name) - - // Check if there's an existing timer for this deployment - if _, exists := activeTimers[timerKey]; exists { - logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation", - deploymentName, namespace) - return - } - - // Create and store the new timer - timer := time.AfterFunc(pauseDuration, func() { - ResumeDeployment(deployment, namespace, clients) - }) - - // Add the new timer to the map - activeTimers[timerKey] = timer - - logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s", - deploymentName, namespace, pauseDuration) -} - -// ResumeDeployment resumes a deployment that has been paused by reloader -func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) { - deploymentName := deployment.Name - - currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - - if err != nil { - logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return - } - - if !IsPausedByReloader(currentDeployment) { - logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace) - return - } - - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - resumePatch, err := CreateResumePatch() - if err != nil { - logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err) - return - } - - // Remove the timer - timerKey := getTimerKey(namespace, deploymentName) - if timer, exists := activeTimers[timerKey]; exists { - timer.Stop() - delete(activeTimers, timerKey) - logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace) - } - - err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch) - - if err != nil { - logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return - } - - logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace) -} - -func CreatePausePatch() ([]byte, error) { - patchData := map[string]interface{}{ - "spec": map[string]interface{}{ - "paused": true, - }, - "metadata": map[string]interface{}{ - "annotations": map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339), - }, - }, - } - - return json.Marshal(patchData) -} - -func CreateResumePatch() ([]byte, error) { - patchData := map[string]interface{}{ - "spec": map[string]interface{}{ - "paused": false, - }, - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - options.PauseDeploymentTimeAnnotation: nil, - }, - }, - } - - return json.Marshal(patchData) -} diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go deleted file mode 100644 index c14cbfcbe..000000000 --- a/internal/pkg/handler/pause_deployment_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package handler - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - testclient "k8s.io/client-go/kubernetes/fake" -) - -func TestIsPaused(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - paused bool - }{ - { - name: "paused deployment", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - paused: true, - }, - { - name: "unpaused deployment", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - paused: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - result := IsPaused(test.deployment) - assert.Equal(t, test.paused, result) - }) - } -} - -func TestIsPausedByReloader(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - pausedByReloader bool - }{ - { - name: "paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339), - }, - }, - }, - pausedByReloader: true, - }, - { - name: "not paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{}, - }, - }, - pausedByReloader: false, - }, - { - name: "not paused", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - pausedByReloader: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pausedByReloader := IsPausedByReloader(test.deployment) - assert.Equal(t, test.pausedByReloader, pausedByReloader) - }) - } -} - -func TestGetPauseStartTime(t *testing.T) { - now := time.Now() - nowStr := now.Format(time.RFC3339) - - tests := []struct { - name string - deployment *appsv1.Deployment - pausedByReloader bool - expectedStartTime time.Time - }{ - { - name: "valid pause time", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: nowStr, - }, - }, - }, - pausedByReloader: true, - expectedStartTime: now, - }, - { - name: "not paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - pausedByReloader: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actualStartTime, err := GetPauseStartTime(test.deployment) - - assert.NoError(t, err) - - if !test.pausedByReloader { - assert.Nil(t, actualStartTime) - } else { - assert.NotNil(t, actualStartTime) - assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second) - } - }) - } -} - -func TestParsePauseDuration(t *testing.T) { - tests := []struct { - name string - pauseIntervalValue string - expectedDuration time.Duration - invalidDuration bool - }{ - { - name: "valid duration", - pauseIntervalValue: "10s", - expectedDuration: 10 * time.Second, - invalidDuration: false, - }, - { - name: "valid minute duration", - pauseIntervalValue: "2m", - expectedDuration: 2 * time.Minute, - invalidDuration: false, - }, - { - name: "invalid duration", - pauseIntervalValue: "invalid", - expectedDuration: 0, - invalidDuration: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actualDuration, err := ParsePauseDuration(test.pauseIntervalValue) - - if test.invalidDuration { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expectedDuration, actualDuration) - } - }) - } -} - -func TestHandleMissingTimerSimple(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - shouldBePaused bool // Should be unpaused after HandleMissingTimer ? - }{ - { - name: "deployment paused by reloader, pause period has expired and no timer", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-1", - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339), - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - shouldBePaused: false, - }, - { - name: "deployment paused by reloader, pause period expires in the future and no timer", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-2", - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339), - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - shouldBePaused: true, - }, - } - - for _, test := range tests { - // Clean up any timers at the end of the test - defer func() { - for key, timer := range activeTimers { - timer.Stop() - delete(activeTimers, key) - } - }() - - t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() - clients := kube.Clients{ - KubernetesClient: fakeClient, - } - - _, err := fakeClient.AppsV1().Deployments("default").Create( - context.TODO(), - test.deployment, - metav1.CreateOptions{}) - assert.NoError(t, err, "Expected no error when creating deployment") - - pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation]) - HandleMissingTimer(test.deployment, pauseDuration, clients, "default") - - updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{}) - - assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused, - "Deployment should have correct paused state after timer expiration") - - if test.shouldBePaused { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.NotEmpty(t, pausedAtAnnotationValue, - "Pause annotation should be present and contain a value when deployment is paused") - } - }) - } -} - -func TestPauseDeployment(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - expectedError bool - expectedPaused bool - expectedAnnotation bool // Should have pause time annotation - pauseInterval string - }{ - { - name: "deployment without pause annotation", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Annotations: map[string]string{}, - }, - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - expectedError: true, - expectedPaused: false, - expectedAnnotation: false, - pauseInterval: "", - }, - { - name: "deployment already paused but not by reloader", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Annotations: map[string]string{ - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - expectedError: false, - expectedPaused: true, - expectedAnnotation: false, - pauseInterval: "5m", - }, - { - name: "deployment unpaused that needs to be paused by reloader", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-3", - Annotations: map[string]string{ - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - expectedError: false, - expectedPaused: true, - expectedAnnotation: true, - pauseInterval: "5m", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() - clients := kube.Clients{ - KubernetesClient: fakeClient, - } - - _, err := fakeClient.AppsV1().Deployments("default").Create( - context.TODO(), - test.deployment, - metav1.CreateOptions{}) - assert.NoError(t, err, "Expected no error when creating deployment") - - updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval) - if test.expectedError { - assert.Error(t, err, "Expected an error pausing the deployment") - return - } else { - assert.NoError(t, err, "Expected no error pausing the deployment") - } - - assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused, - "Deployment should have correct paused state after pause") - - if test.expectedAnnotation { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.NotEmpty(t, pausedAtAnnotationValue, - "Pause annotation should be present and contain a value when deployment is paused") - } else { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.Empty(t, pausedAtAnnotationValue, - "Pause annotation should not be present when deployment has not been paused by reloader") - } - }) - } -} - -// Simple helper function for test cases -func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*appsv1.Deployment, error) { - for _, deployment := range deployments { - accessor, err := meta.Accessor(deployment) - if err != nil { - return nil, fmt.Errorf("error getting accessor for item: %v", err) - } - if accessor.GetName() == deploymentName { - deploymentObj, ok := deployment.(*appsv1.Deployment) - if !ok { - return nil, fmt.Errorf("failed to cast to Deployment") - } - return deploymentObj, nil - } - } - return nil, fmt.Errorf("deployment '%s' not found", deploymentName) -} diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go deleted file mode 100644 index ae0bb1e21..000000000 --- a/internal/pkg/handler/update.go +++ /dev/null @@ -1,53 +0,0 @@ -package handler - -import ( - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" -) - -// ResourceUpdatedHandler contains updated objects -type ResourceUpdatedHandler struct { - Resource interface{} - OldResource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes the updated resource -func (r ResourceUpdatedHandler) Handle() error { - if r.Resource == nil || r.OldResource == nil { - logrus.Errorf("Resource update handler received nil resource") - } else { - config, oldSHAData := r.GetConfig() - if config.SHAValue != oldSHAData { - // Send a webhook if update - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy) - } - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap)) - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go deleted file mode 100644 index 6f185f1e8..000000000 --- a/internal/pkg/handler/upgrade.go +++ /dev/null @@ -1,619 +0,0 @@ -package handler - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - - "github.com/parnurzeal/gorequest" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - alert "github.com/stakater/Reloader/internal/pkg/alerts" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - app "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" -) - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment -func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetDeploymentItem, - ItemsFunc: callbacks.GetDeploymentItems, - AnnotationsFunc: callbacks.GetDeploymentAnnotations, - PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations, - ContainersFunc: callbacks.GetDeploymentContainers, - InitContainersFunc: callbacks.GetDeploymentInitContainers, - UpdateFunc: callbacks.UpdateDeployment, - PatchFunc: callbacks.PatchDeployment, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetDeploymentVolumes, - ResourceType: "Deployment", - SupportsPatch: true, - } -} - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob -func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetCronJobItem, - ItemsFunc: callbacks.GetCronJobItems, - AnnotationsFunc: callbacks.GetCronJobAnnotations, - PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations, - ContainersFunc: callbacks.GetCronJobContainers, - InitContainersFunc: callbacks.GetCronJobInitContainers, - UpdateFunc: callbacks.CreateJobFromCronjob, - PatchFunc: callbacks.PatchCronJob, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetCronJobVolumes, - ResourceType: "CronJob", - SupportsPatch: false, - } -} - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob -func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetJobItem, - ItemsFunc: callbacks.GetJobItems, - AnnotationsFunc: callbacks.GetJobAnnotations, - PodAnnotationsFunc: callbacks.GetJobPodAnnotations, - ContainersFunc: callbacks.GetJobContainers, - InitContainersFunc: callbacks.GetJobInitContainers, - UpdateFunc: callbacks.ReCreateJobFromjob, - PatchFunc: callbacks.PatchJob, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetJobVolumes, - ResourceType: "Job", - SupportsPatch: false, - } -} - -// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset -func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetDaemonSetItem, - ItemsFunc: callbacks.GetDaemonSetItems, - AnnotationsFunc: callbacks.GetDaemonSetAnnotations, - PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations, - ContainersFunc: callbacks.GetDaemonSetContainers, - InitContainersFunc: callbacks.GetDaemonSetInitContainers, - UpdateFunc: callbacks.UpdateDaemonSet, - PatchFunc: callbacks.PatchDaemonSet, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetDaemonSetVolumes, - ResourceType: "DaemonSet", - SupportsPatch: true, - } -} - -// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet -func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetStatefulSetItem, - ItemsFunc: callbacks.GetStatefulSetItems, - AnnotationsFunc: callbacks.GetStatefulSetAnnotations, - PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations, - ContainersFunc: callbacks.GetStatefulSetContainers, - InitContainersFunc: callbacks.GetStatefulSetInitContainers, - UpdateFunc: callbacks.UpdateStatefulSet, - PatchFunc: callbacks.PatchStatefulSet, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetStatefulSetVolumes, - ResourceType: "StatefulSet", - SupportsPatch: true, - } -} - -// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout -func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetRolloutItem, - ItemsFunc: callbacks.GetRolloutItems, - AnnotationsFunc: callbacks.GetRolloutAnnotations, - PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations, - ContainersFunc: callbacks.GetRolloutContainers, - InitContainersFunc: callbacks.GetRolloutInitContainers, - UpdateFunc: callbacks.UpdateRollout, - PatchFunc: callbacks.PatchRollout, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetRolloutVolumes, - ResourceType: "Rollout", - SupportsPatch: false, - } -} - -func sendUpgradeWebhook(config common.Config, webhookUrl string) error { - logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'", - config.ResourceName, config.Type, config.Namespace, webhookUrl) - - body, errs := sendWebhook(webhookUrl) - if errs != nil { - // return the first error - return errs[0] - } else { - logrus.Info(body) - } - - return nil -} - -func sendWebhook(url string) (string, []error) { - request := gorequest.New() - resp, _, err := request.Post(url).Send(`{"webhook":"update successful"}`).End() - if err != nil { - // the reloader seems to retry automatically so no retry logic added - return "", err - } - defer func() { - closeErr := resp.Body.Close() - if closeErr != nil { - logrus.Error(closeErr) - } - }() - var buffer bytes.Buffer - _, bufferErr := io.Copy(&buffer, resp.Body) - if bufferErr != nil { - logrus.Error(bufferErr) - } - return buffer.String(), nil -} - -func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error { - clients := kube.GetClients() - - // Get ignored workload types to avoid listing resources without RBAC permissions - ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList() - if err != nil { - logrus.Errorf("Failed to parse ignored workload types: %v", err) - ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails - } - - err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - - // Only process CronJobs if they are not ignored - if !ignoredWorkloadTypes.Contains("cronjobs") { - err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - // Only process Jobs if they are not ignored - if !ignoredWorkloadTypes.Contains("jobs") { - err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - - if options.IsArgoRollouts == "true" { - err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - return nil -} - -func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error { - err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy) - if err != nil { - logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err) - } - return err -} - -// PerformAction invokes the deployment if there is any change in configmap or secret data -func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - - for _, item := range items { - err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error { - return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource) - }) - if err != nil { - return err - } - } - - return nil -} - -func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error { - var lastError error - fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - err := fn(fetchResource) - fetchResource = true - switch { - case err == nil: - return true, nil - case apierrors.IsConflict(err): - lastError = err - return false, nil - default: - return false, err - } - }) - if wait.Interrupted(err) { - err = lastError - } - return err -} - -func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error { - accessor, err := meta.Accessor(resource) - if err != nil { - return err - } - - resourceName := accessor.GetName() - if fetchResource { - resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace) - if err != nil { - return err - } - } - annotations := upgradeFuncs.AnnotationsFunc(resource) - podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource) - result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions()) - - if !result.ShouldReload { - logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace) - return nil - } - - strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload) - - if strategyResult.Result != constants.Updated { - return nil - } - - // find correct annotation and update the resource - pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation] - - if foundPauseInterval { - deployment, ok := resource.(*app.Deployment) - if !ok { - logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation) - } else { - _, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval) - if err != nil { - logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err) - return err - } - } - } - - if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil { - err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes) - } else { - err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource) - } - - if err != nil { - message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err) - logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err) - - collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc() - collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc() - if recorder != nil { - recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message) - } - return err - } else { - message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace) - message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace) - - logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace) - - collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc() - collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc() - alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD") - if recorder != nil { - recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message) - } - if ok && alert_on_reload == "true" { - msg := fmt.Sprintf( - "Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*", - config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace) - alert.SendWebhookAlert(msg) - } - } - - return nil -} - -func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string { - for i := range volumes { - switch mountType { - case constants.ConfigmapEnvVarPostfix: - if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName { - return volumes[i].Name - } - - if volumes[i].Projected != nil { - for j := range volumes[i].Projected.Sources { - if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName { - return volumes[i].Name - } - } - } - case constants.SecretEnvVarPostfix: - if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName { - return volumes[i].Name - } - - if volumes[i].Projected != nil { - for j := range volumes[i].Projected.Sources { - if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName { - return volumes[i].Name - } - } - } - } - } - - return "" -} - -func getContainerWithVolumeMount(containers []v1.Container, volumeMountName string) *v1.Container { - for i := range containers { - volumeMounts := containers[i].VolumeMounts - for j := range volumeMounts { - if volumeMounts[j].Name == volumeMountName { - return &containers[i] - } - } - } - - return nil -} - -func getContainerWithEnvReference(containers []v1.Container, resourceName string, resourceType string) *v1.Container { - for i := range containers { - envs := containers[i].Env - for j := range envs { - envVarSource := envs[j].ValueFrom - if envVarSource != nil { - if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.Name == resourceName { - return &containers[i] - } else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.Name == resourceName { - return &containers[i] - } - } - } - - envsFrom := containers[i].EnvFrom - for j := range envsFrom { - if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.Name == resourceName { - return &containers[i] - } else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.Name == resourceName { - return &containers[i] - } - } - } - return nil -} - -func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container { - volumes := upgradeFuncs.VolumesFunc(item) - containers := upgradeFuncs.ContainersFunc(item) - initContainers := upgradeFuncs.InitContainersFunc(item) - var container *v1.Container - // Get the volumeMountName to find volumeMount in container - volumeMountName := getVolumeMountName(volumes, config.Type, config.ResourceName) - // Get the container with mounted configmap/secret - if volumeMountName != "" { - container = getContainerWithVolumeMount(containers, volumeMountName) - if container == nil && len(initContainers) > 0 { - container = getContainerWithVolumeMount(initContainers, volumeMountName) - if container != nil { - // if configmap/secret is being used in init container then return the first Pod container to save reloader env - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - } else if container != nil { - return container - } - } - - // Get the container with referenced secret or configmap as env var - container = getContainerWithEnvReference(containers, config.ResourceName, config.Type) - if container == nil && len(initContainers) > 0 { - container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type) - if container != nil { - // if configmap/secret is being used in init container then return the first Pod container to save reloader env - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - } - - // Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload - if container == nil && !autoReload { - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - - return container -} - -type Patch struct { - Type patchtypes.PatchType - Bytes []byte -} - -type InvokeStrategyResult struct { - Result constants.Result - Patch *Patch -} - -type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult - -func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - return updatePodAnnotations(upgradeFuncs, item, config, autoReload) - } - return updateContainerEnvVars(upgradeFuncs, item, config, autoReload) -} - -func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - // Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout - // Note: the data on this struct is purely informational and is not used for future updates - reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name}) - annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs) - if err != nil { - logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err) - return InvokeStrategyResult{constants.NotUpdated, nil} - } - - // Copy the all annotations to the item's annotations - pa := upgradeFuncs.PodAnnotationsFunc(item) - if pa == nil { - return InvokeStrategyResult{constants.NotUpdated, nil} - } - - for k, v := range annotations { - pa[k] = v - } - - return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}} -} - -func getReloaderAnnotationKey() string { - return fmt.Sprintf("%s/%s", - constants.ReloaderAnnotationPrefix, - constants.LastReloadedFromAnnotation, - ) -} - -func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) { - if target == nil { - return nil, nil, errors.New("target is required") - } - - // Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the - // resource that caused the last invokeReloadStrategy. - // Intentionally only storing the last item in order to keep - // the generated annotations as small as possible. - annotations := make(map[string]string) - lastReloadedResourceName := getReloaderAnnotationKey() - - lastReloadedResource, err := json.Marshal(target) - if err != nil { - return nil, nil, err - } - - annotations[lastReloadedResourceName] = string(lastReloadedResource) - - var patch []byte - if upgradeFuncs.SupportsPatch { - escapedValue, err := jsonEscape(annotations[lastReloadedResourceName]) - if err != nil { - return nil, nil, err - } - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().AnnotationTemplate, lastReloadedResourceName, escapedValue) - } - - return annotations, patch, nil -} - -func getEnvVarName(resourceName string, typeName string) string { - return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName -} - -func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - envVar := getEnvVarName(config.ResourceName, config.Type) - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - //update if env var exists - updateResult := updateEnvVar(container, envVar, config.SHAValue) - - // if no existing env var exists lets create one - if updateResult == constants.NoEnvVarFound { - e := v1.EnvVar{ - Name: envVar, - Value: config.SHAValue, - } - container.Env = append(container.Env, e) - updateResult = constants.Updated - } - - var patch []byte - if upgradeFuncs.SupportsPatch { - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().EnvVarTemplate, container.Name, envVar, config.SHAValue) - } - - return InvokeStrategyResult{updateResult, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}} -} - -func updateEnvVar(container *v1.Container, envVar string, shaData string) constants.Result { - envs := container.Env - for j := range envs { - if envs[j].Name == envVar { - if envs[j].Value != shaData { - envs[j].Value = shaData - return constants.Updated - } - return constants.NotUpdated - } - } - - return constants.NoEnvVarFound -} - -func jsonEscape(toEscape string) (string, error) { - bytes, err := json.Marshal(toEscape) - if err != nil { - return "", err - } - escaped := string(bytes) - return escaped[1 : len(escaped)-1], nil -} diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go deleted file mode 100644 index 9a0e94587..000000000 --- a/internal/pkg/handler/upgrade_test.go +++ /dev/null @@ -1,4288 +0,0 @@ -package handler - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/prometheus/client_golang/prometheus" - promtestutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - testclient "k8s.io/client-go/kubernetes/fake" -) - -var ( - clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()} - - arsNamespace = "test-handler-" + testutil.RandSeq(5) - arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) - arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) - - ersNamespace = "test-handler-" + testutil.RandSeq(5) - ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) -) - -func TestMain(m *testing.M) { - - // Creating namespaces - testutil.CreateNamespace(arsNamespace, clients.KubernetesClient) - testutil.CreateNamespace(ersNamespace, clients.KubernetesClient) - - logrus.Infof("Setting up the annotation reload strategy test resources") - setupArs() - logrus.Infof("Setting up the env-var reload strategy test resources") - setupErs() - - logrus.Infof("Running Testcases") - retCode := m.Run() - - logrus.Infof("tearing down the annotation reload strategy test resources") - teardownArs() - logrus.Infof("tearing down the env-var reload strategy test resources") - teardownErs() - - os.Exit(retCode) -} - -func setupArs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - // Patch with ignore annotation - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace) - _, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedConfigMapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsConfigmapWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsSecretWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated, - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with configmap and without annotations - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(clients.KubernetesClient, arsConfigMapWithNonAnnotatedDeployment, arsNamespace, map[string]string{}) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and without annotation creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretWithSecretAutoAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsConfigmapWithConfigMapAutoAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and exclude secret annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and exclude configmap annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsConfigmapWithExcludeConfigMapAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithPodAnnotations, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithBothAnnotations, arsNamespace, true) - - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } -} - -func teardownArs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and exclude secret annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and exclude configmap annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment with pause annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting Secret used with exclude secret annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with exclude configmap annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting configmap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient) - -} - -func setupErs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret exclude annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap exclude annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace) - _, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedConfigMapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersConfigmapWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersSecretWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated, - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretWithSecretAutoAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapAutoAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretWithSecretExcludeAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapExcludeAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithPodAnnotations, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithBothAnnotations, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } -} - -func teardownErs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret exclude annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap exclude annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment for testing pausing deployments - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotation: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotation: %v", err) - } - - // Deleting Secret used with secret exclude annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret exclude annotation: %v", err) - } - - // Deleting ConfigMap used with configmap exclude annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err) - } - - // Deleting ConfigMap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient) - -} - -func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config { - ns := ersNamespace - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - ns = arsNamespace - } - - return common.Config{ - Namespace: ns, - ResourceName: name, - SHAValue: shaData, - Annotation: annotation, - TypedAutoAnnotation: typedAutoAnnotation, - Type: resourceType, - } -} - -func getCollectors() metrics.Collectors { - return metrics.NewCollectors() -} - -var labelSucceeded = prometheus.Labels{"success": "true"} -var labelFailed = prometheus.Labels{"success": "false"} - -func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - config.SHAValue = testutil.GetSHAfromEmptyData() - removed := testutil.VerifyResourceAnnotationUpdate(clients, config, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate) - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationButWithAutoReloadAllUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - options.AutoReloadAll = true - defer func() { options.AutoReloadAll = false }() - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated+"-different", - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithExcludeSecretAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithExcludeSecretAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithExcludeConfigMapAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == arsConfigmapWithPodAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == arsConfigmapWithBothAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased") - } -} -func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been (ERS)") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased (ERS)") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased (ERS)") - } -} - -func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - removed := testutil.VerifyResourceEnvVarRemoved(clients, config, envVarPostfix, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate) - - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.JSONPatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - assert.Equal(t, 2, patchCalled) - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated+"-different", - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretExcludeAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretExcludeAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment that had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigMapExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapExcludeAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == ersConfigmapWithPodAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == ersConfigmapWithBothAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestPausingDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace) -} - -func TestPausingDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace) -} - -func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) { - options.ReloadStrategy = reloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - // Wait for deployment to have paused-at annotation - logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName) - err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second) - if err != nil { - t.Errorf("Failed to wait for deployment paused-at annotation: %v", err) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment has been paused") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err := isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment has not been paused") - } - - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com") - config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment is still paused") - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment should still be paused") - } - - logrus.Infof("Verifying deployment has been resumed after pause interval") - time.Sleep(11 * time.Second) - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if deploymentPaused { - t.Errorf("Deployment should have been resumed after pause interval") - } -} - -func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) { - deployment, err := FindDeploymentByName(deployments, deploymentName) - if err != nil { - return false, err - } - return IsPaused(deployment), nil -} - -// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation -func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error { - start := time.Now() - - for time.Since(start) < timeout { - items := deploymentFuncs.ItemsFunc(clients, namespace) - deployment, err := FindDeploymentByName(items, deploymentName) - if err == nil { - annotations := deployment.GetAnnotations() - if annotations != nil { - if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists { - return nil - } - } - } - - time.Sleep(100 * time.Millisecond) - } - - return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName) -} - -// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers -// This simulates the scenario where Argo Rollouts with workloadRef return empty containers -func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object { - rollout := &argorolloutv1alpha1.Rollout{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: argorolloutv1alpha1.RolloutSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{}, // Empty containers slice - InitContainers: []v1.Container{}, // Empty init containers slice - Volumes: []v1.Volume{}, // Empty volumes slice - }, - }, - }, - } - var obj runtime.Object = rollout - return &obj -} - -// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions -func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) { - namespace := "test-namespace" - resourceName := "test-configmap" - - // Use real Argo Rollout functions but mock the containers function - rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs() - originalContainersFunc := rolloutFuncs.ContainersFunc - originalInitContainersFunc := rolloutFuncs.InitContainersFunc - - // Override to return empty containers (simulating workloadRef scenario) - rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - - // Restore original functions after test - defer func() { - rolloutFuncs.ContainersFunc = originalContainersFunc - rolloutFuncs.InitContainersFunc = originalInitContainersFunc - }() - - // Use proper Argo Rollout object instead of Pod - mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout") - - config := common.Config{ - Namespace: namespace, - ResourceName: resourceName, - Type: constants.ConfigmapEnvVarPostfix, - SHAValue: "test-sha", - } - - // Test both autoReload scenarios using subtests as suggested by Felix - for _, autoReload := range []bool{true, false} { - t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) { - // This tests the actual fix in the context of Argo Rollouts - result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload) - - if result != nil { - t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result) - } - }) - } -} diff --git a/internal/pkg/http/client.go b/internal/pkg/http/client.go new file mode 100644 index 000000000..c1ca613df --- /dev/null +++ b/internal/pkg/http/client.go @@ -0,0 +1,69 @@ +// Package http provides shared HTTP client functionality. +package http + +import ( + "net/http" + "net/url" + "time" +) + +const ( + // DefaultTimeout is the default HTTP client timeout. + DefaultTimeout = 30 * time.Second + + // AlertingTimeout is the shorter timeout used for alerting. + AlertingTimeout = 10 * time.Second +) + +// ClientConfig configures an HTTP client. +type ClientConfig struct { + // Timeout for HTTP requests. + Timeout time.Duration + + // ProxyURL is an optional proxy URL. + ProxyURL string + + // MaxIdleConns controls the maximum number of idle connections. + MaxIdleConns int + + // MaxIdleConnsPerHost controls the maximum idle connections per host. + MaxIdleConnsPerHost int + + // IdleConnTimeout is the maximum time an idle connection remains open. + IdleConnTimeout time.Duration +} + +// DefaultConfig returns the default HTTP client configuration. +func DefaultConfig() ClientConfig { + return ClientConfig{ + Timeout: DefaultTimeout, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + } +} + +// NewClient creates a new HTTP client with the given configuration. +func NewClient(cfg ClientConfig) *http.Client { + transport := &http.Transport{ + MaxIdleConns: cfg.MaxIdleConns, + MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost, + IdleConnTimeout: cfg.IdleConnTimeout, + } + + if cfg.ProxyURL != "" { + if proxy, err := url.Parse(cfg.ProxyURL); err == nil { + transport.Proxy = http.ProxyURL(proxy) + } + } + + return &http.Client{ + Transport: transport, + Timeout: cfg.Timeout, + } +} + +// NewDefaultClient creates an HTTP client with default configuration. +func NewDefaultClient() *http.Client { + return NewClient(DefaultConfig()) +} diff --git a/internal/pkg/http/client_test.go b/internal/pkg/http/client_test.go new file mode 100644 index 000000000..2b937b192 --- /dev/null +++ b/internal/pkg/http/client_test.go @@ -0,0 +1,142 @@ +package http + +import ( + "net/http" + "testing" + "time" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + + if cfg.Timeout != DefaultTimeout { + t.Errorf("expected timeout %v, got %v", DefaultTimeout, cfg.Timeout) + } + if cfg.MaxIdleConns != 100 { + t.Errorf("expected MaxIdleConns 100, got %d", cfg.MaxIdleConns) + } + if cfg.MaxIdleConnsPerHost != 10 { + t.Errorf("expected MaxIdleConnsPerHost 10, got %d", cfg.MaxIdleConnsPerHost) + } + if cfg.IdleConnTimeout != 90*time.Second { + t.Errorf("expected IdleConnTimeout 90s, got %v", cfg.IdleConnTimeout) + } +} + +func TestNewClient(t *testing.T) { + tests := []struct { + name string + cfg ClientConfig + wantNil bool + }{ + { + name: "default config", + cfg: DefaultConfig(), + wantNil: false, + }, + { + name: "custom timeout", + cfg: ClientConfig{ + Timeout: 5 * time.Second, + MaxIdleConns: 50, + MaxIdleConnsPerHost: 5, + IdleConnTimeout: 30 * time.Second, + }, + wantNil: false, + }, + { + name: "with proxy", + cfg: ClientConfig{ + Timeout: DefaultTimeout, + ProxyURL: "http://proxy.example.com:8080", + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + }, + wantNil: false, + }, + { + name: "with invalid proxy URL", + cfg: ClientConfig{ + Timeout: DefaultTimeout, + ProxyURL: "://invalid", + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + }, + wantNil: false, + }, + { + name: "zero values", + cfg: ClientConfig{ + Timeout: 0, + }, + wantNil: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + + if tt.wantNil && client != nil { + t.Error("expected nil client") + } + if !tt.wantNil && client == nil { + t.Error("expected non-nil client") + } + + if client != nil { + if client.Timeout != tt.cfg.Timeout { + t.Errorf("expected timeout %v, got %v", tt.cfg.Timeout, client.Timeout) + } + + transport, ok := client.Transport.(*http.Transport) + if !ok { + t.Fatal("expected *http.Transport") + } + if transport.MaxIdleConns != tt.cfg.MaxIdleConns { + t.Errorf("expected MaxIdleConns %d, got %d", tt.cfg.MaxIdleConns, transport.MaxIdleConns) + } + if transport.MaxIdleConnsPerHost != tt.cfg.MaxIdleConnsPerHost { + t.Errorf("expected MaxIdleConnsPerHost %d, got %d", tt.cfg.MaxIdleConnsPerHost, transport.MaxIdleConnsPerHost) + } + } + }, + ) + } +} + +func TestNewDefaultClient(t *testing.T) { + client := NewDefaultClient() + + if client == nil { + t.Fatal("expected non-nil client") + } + + if client.Timeout != DefaultTimeout { + t.Errorf("expected timeout %v, got %v", DefaultTimeout, client.Timeout) + } + + transport, ok := client.Transport.(*http.Transport) + if !ok { + t.Fatal("expected *http.Transport") + } + + if transport.MaxIdleConns != 100 { + t.Errorf("expected MaxIdleConns 100, got %d", transport.MaxIdleConns) + } + if transport.MaxIdleConnsPerHost != 10 { + t.Errorf("expected MaxIdleConnsPerHost 10, got %d", transport.MaxIdleConnsPerHost) + } +} + +func TestConstants(t *testing.T) { + if DefaultTimeout != 30*time.Second { + t.Errorf("expected DefaultTimeout 30s, got %v", DefaultTimeout) + } + if AlertingTimeout != 10*time.Second { + t.Errorf("expected AlertingTimeout 10s, got %v", AlertingTimeout) + } +} diff --git a/internal/pkg/leadership/leadership.go b/internal/pkg/leadership/leadership.go deleted file mode 100644 index f8c85bc15..000000000 --- a/internal/pkg/leadership/leadership.go +++ /dev/null @@ -1,107 +0,0 @@ -package leadership - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/controller" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" -) - -var ( - // Used for liveness probe - m sync.Mutex - healthy bool = true -) - -func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock { - return &resourcelock.LeaseLock{ - LeaseMeta: v1.ObjectMeta{ - Name: lockName, - Namespace: namespace, - }, - Client: client, - LockConfig: resourcelock.ResourceLockConfig{ - Identity: podname, - }, - } -} - -// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown. -func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) { - // Construct channels for the controllers to use - var stopChannels []chan struct{} - for i := 0; i < len(controllers); i++ { - stop := make(chan struct{}) - stopChannels = append(stopChannels, stop) - } - - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - ReleaseOnCancel: true, - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(c context.Context) { - logrus.Info("became leader, starting controllers") - runControllers(controllers, stopChannels) - }, - OnStoppedLeading: func() { - logrus.Info("no longer leader, shutting down") - stopControllers(stopChannels) - cancel() - m.Lock() - defer m.Unlock() - healthy = false - }, - OnNewLeader: func(current_id string) { - if current_id == id { - logrus.Info("still the leader!") - return - } - logrus.Infof("new leader is %s", current_id) - }, - }, - }) -} - -func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) { - for i, c := range controllers { - c := c - go c.Run(1, stopChannels[i]) - } -} - -func stopControllers(stopChannels []chan struct{}) { - for _, c := range stopChannels { - close(c) - } -} - -// Healthz sets up the liveness probe endpoint. If leadership election is -// enabled and a replica stops leading the liveness probe will fail and the -// kubelet will restart the container. -func SetupLivenessEndpoint() { - http.HandleFunc("/live", healthz) -} - -func healthz(w http.ResponseWriter, req *http.Request) { - m.Lock() - defer m.Unlock() - if healthy { - if i, err := w.Write([]byte("alive")); err != nil { - logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err) - } - return - } - - w.WriteHeader(http.StatusInternalServerError) -} diff --git a/internal/pkg/leadership/leadership_test.go b/internal/pkg/leadership/leadership_test.go deleted file mode 100644 index eed070561..000000000 --- a/internal/pkg/leadership/leadership_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package leadership - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient) - - os.Exit(retCode) -} - -func TestHealthz(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response := httptest.NewRecorder() - - healthz(response, request) - got := response.Code - want := 200 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } - - // Have the liveness probe serve a 500 - healthy = false - - request, err = http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response = httptest.NewRecorder() - - healthz(response, request) - got = response.Code - want = 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } -} - -// TestRunLeaderElection validates that the liveness endpoint serves 500 when -// leadership election fails -func TestRunLeaderElection(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - - lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace) - - go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{}) - - // Liveness probe should be serving OK - request, err := http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response := httptest.NewRecorder() - - healthz(response, request) - got := response.Code - want := 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } - - // Cancel the leader election context, so leadership is released and - // live endpoint serves 500 - cancel() - - request, err = http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response = httptest.NewRecorder() - - healthz(response, request) - got = response.Code - want = 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } -} - -// TestRunLeaderElectionWithControllers tests that leadership election works -// with real controllers and that on context cancellation the controllers stop -// running. -func TestRunLeaderElectionWithControllers(t *testing.T) { - t.Logf("Creating controller") - var controllers []*controller.Controller - for k := range kube.ResourceMap { - c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, "", "", metrics.NewCollectors()) - if err != nil { - logrus.Fatalf("%s", err) - } - - controllers = append(controllers, c) - } - time.Sleep(3 * time.Second) - - lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace) - - ctx, cancel := context.WithCancel(context.TODO()) - - // Start running leadership election, this also starts the controllers - go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers) - time.Sleep(3 * time.Second) - - // Create some stuff and do a thing - configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com") - if err != nil { - t.Fatalf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true) - if err != nil { - t.Fatalf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Fatalf("Configmap was not updated") - } - time.Sleep(3 * time.Second) - - // Verifying deployment update - logrus.Infof("Verifying pod envvars has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: testutil.Namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Fatalf("Deployment was not updated") - } - time.Sleep(testutil.SleepDuration) - - // Cancel the leader election context, so leadership is released - logrus.Info("shutting down controller from test") - cancel() - time.Sleep(5 * time.Second) - - // Updating configmap again - updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new") - if updateErr != nil { - t.Fatalf("Configmap was not updated") - } - - // Verifying that the deployment was not updated as leadership has been lost - logrus.Infof("Verifying pod envvars has not been updated") - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new") - config = common.Config{ - Namespace: testutil.Namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs() - updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Fatalf("Deployment was updated") - } - - // Deleting deployment - err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(testutil.SleepDuration) -} diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go new file mode 100644 index 000000000..df306af4e --- /dev/null +++ b/internal/pkg/metadata/metadata.go @@ -0,0 +1,125 @@ +// Package metadata provides metadata ConfigMap creation for Reloader. +// The metadata ConfigMap contains build info, configuration options, and deployment info. +package metadata + +import ( + "encoding/json" + "os" + "runtime" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +const ( + // ConfigMapName is the name of the metadata ConfigMap. + ConfigMapName = "reloader-meta-info" + // ConfigMapLabelKey is the label key for the metadata ConfigMap. + ConfigMapLabelKey = "reloader.stakater.com/meta-info" + // ConfigMapLabelValue is the label value for the metadata ConfigMap. + ConfigMapLabelValue = "reloader-oss" + + // Environment variables for deployment info. + EnvReloaderNamespace = "RELOADER_NAMESPACE" + EnvReloaderDeploymentName = "RELOADER_DEPLOYMENT_NAME" +) + +// Version, Commit, and BuildDate are set during the build process +// using the -X linker flag to inject these values into the binary. +var ( + Version = "dev" + Commit = "unknown" + BuildDate = "unknown" +) + +// MetaInfo contains comprehensive metadata about the Reloader instance. +type MetaInfo struct { + // BuildInfo contains information about the build version, commit, and compilation details. + BuildInfo BuildInfo `json:"buildInfo"` + // Config contains all the configuration options used by this Reloader instance. + Config *config.Config `json:"config"` + // DeploymentInfo contains metadata about the Kubernetes deployment of this instance. + DeploymentInfo DeploymentInfo `json:"deploymentInfo"` +} + +// BuildInfo contains information about the build and version of the Reloader binary. +type BuildInfo struct { + // GoVersion is the version of Go used to compile the binary. + GoVersion string `json:"goVersion"` + // ReleaseVersion is the version tag or branch of the Reloader release. + ReleaseVersion string `json:"releaseVersion"` + // CommitHash is the Git commit hash of the source code used to build this binary. + CommitHash string `json:"commitHash"` + // CommitTime is the timestamp of the Git commit used to build this binary. + CommitTime time.Time `json:"commitTime"` +} + +// DeploymentInfo contains metadata about the Reloader deployment. +type DeploymentInfo struct { + // Name is the name of the Reloader deployment. + Name string `json:"name"` + // Namespace is the namespace where Reloader is deployed. + Namespace string `json:"namespace"` +} + +// NewBuildInfo creates a new BuildInfo with current build information. +func NewBuildInfo() BuildInfo { + return BuildInfo{ + GoVersion: runtime.Version(), + ReleaseVersion: Version, + CommitHash: Commit, + CommitTime: parseUTCTime(BuildDate), + } +} + +// NewMetaInfo creates a new MetaInfo from configuration. +func NewMetaInfo(cfg *config.Config) *MetaInfo { + return &MetaInfo{ + BuildInfo: NewBuildInfo(), + Config: cfg, + DeploymentInfo: DeploymentInfo{ + Name: os.Getenv(EnvReloaderDeploymentName), + Namespace: os.Getenv(EnvReloaderNamespace), + }, + } +} + +// ToConfigMap converts MetaInfo to a Kubernetes ConfigMap. +func (m *MetaInfo) ToConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName, + Namespace: m.DeploymentInfo.Namespace, + Labels: map[string]string{ + ConfigMapLabelKey: ConfigMapLabelValue, + }, + }, + Data: map[string]string{ + "buildInfo": toJSON(m.BuildInfo), + "config": toJSON(m.Config), + "deploymentInfo": toJSON(m.DeploymentInfo), + }, + } +} + +func toJSON(data interface{}) string { + jsonData, err := json.Marshal(data) + if err != nil { + return "" + } + return string(jsonData) +} + +func parseUTCTime(value string) time.Time { + if value == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC3339, value) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go new file mode 100644 index 000000000..52c5f1997 --- /dev/null +++ b/internal/pkg/metadata/metadata_test.go @@ -0,0 +1,307 @@ +package metadata + +import ( + "context" + "encoding/json" + "testing" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// testLogger returns a no-op logger for testing. +func testLogger() logr.Logger { + return logr.Discard() +} + +func TestNewBuildInfo(t *testing.T) { + oldVersion := Version + oldCommit := Commit + oldBuildDate := BuildDate + defer func() { + Version = oldVersion + Commit = oldCommit + BuildDate = oldBuildDate + }() + + Version = "1.0.0" + Commit = "abc123" + BuildDate = "2024-01-01T12:00:00Z" + + info := NewBuildInfo() + + if info.ReleaseVersion != "1.0.0" { + t.Errorf("ReleaseVersion = %s, want 1.0.0", info.ReleaseVersion) + } + if info.CommitHash != "abc123" { + t.Errorf("CommitHash = %s, want abc123", info.CommitHash) + } + if info.GoVersion == "" { + t.Error("GoVersion should not be empty") + } + if info.CommitTime.IsZero() { + t.Error("CommitTime should not be zero") + } +} + +func TestNewMetaInfo(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") + + cfg := config.NewDefault() + cfg.AutoReloadAll = true + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + cfg.ArgoRolloutsEnabled = true + cfg.ReloadOnCreate = true + cfg.ReloadOnDelete = true + cfg.EnableHA = true + cfg.WebhookURL = "https://example.com/webhook" + cfg.LogFormat = "json" + cfg.LogLevel = "debug" + cfg.IgnoredResources = []string{"configmaps"} + cfg.IgnoredWorkloads = []string{"jobs"} + cfg.IgnoredNamespaces = []string{"kube-system"} + + metaInfo := NewMetaInfo(cfg) + + if !metaInfo.Config.AutoReloadAll { + t.Error("AutoReloadAll should be true") + } + if metaInfo.Config.ReloadStrategy != config.ReloadStrategyAnnotations { + t.Errorf("ReloadStrategy = %s, want annotations", metaInfo.Config.ReloadStrategy) + } + if !metaInfo.Config.ArgoRolloutsEnabled { + t.Error("ArgoRolloutsEnabled should be true") + } + if !metaInfo.Config.ReloadOnCreate { + t.Error("ReloadOnCreate should be true") + } + if !metaInfo.Config.ReloadOnDelete { + t.Error("ReloadOnDelete should be true") + } + if !metaInfo.Config.EnableHA { + t.Error("EnableHA should be true") + } + if metaInfo.Config.WebhookURL != "https://example.com/webhook" { + t.Errorf("WebhookURL = %s, want https://example.com/webhook", metaInfo.Config.WebhookURL) + } + + if metaInfo.DeploymentInfo.Namespace != "test-ns" { + t.Errorf("DeploymentInfo.Namespace = %s, want test-ns", metaInfo.DeploymentInfo.Namespace) + } + if metaInfo.DeploymentInfo.Name != "test-deploy" { + t.Errorf("DeploymentInfo.Name = %s, want test-deploy", metaInfo.DeploymentInfo.Name) + } +} + +func TestMetaInfo_ToConfigMap(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "reloader-ns") + t.Setenv(EnvReloaderDeploymentName, "reloader-deploy") + + cfg := config.NewDefault() + metaInfo := NewMetaInfo(cfg) + cm := metaInfo.ToConfigMap() + + if cm.Name != ConfigMapName { + t.Errorf("Name = %s, want %s", cm.Name, ConfigMapName) + } + if cm.Namespace != "reloader-ns" { + t.Errorf("Namespace = %s, want reloader-ns", cm.Namespace) + } + if cm.Labels[ConfigMapLabelKey] != ConfigMapLabelValue { + t.Errorf("Label = %s, want %s", cm.Labels[ConfigMapLabelKey], ConfigMapLabelValue) + } + + if _, ok := cm.Data["buildInfo"]; !ok { + t.Error("buildInfo data key missing") + } + if _, ok := cm.Data["config"]; !ok { + t.Error("config data key missing") + } + if _, ok := cm.Data["deploymentInfo"]; !ok { + t.Error("deploymentInfo data key missing") + } + + // Verify buildInfo is valid JSON + var buildInfo BuildInfo + if err := json.Unmarshal([]byte(cm.Data["buildInfo"]), &buildInfo); err != nil { + t.Errorf("buildInfo is not valid JSON: %v", err) + } + + var parsedConfig config.Config + if err := json.Unmarshal([]byte(cm.Data["config"]), &parsedConfig); err != nil { + t.Errorf("config is not valid JSON: %v", err) + } + + // Verify deploymentInfo contains expected values + var deployInfo DeploymentInfo + if err := json.Unmarshal([]byte(cm.Data["deploymentInfo"]), &deployInfo); err != nil { + t.Errorf("deploymentInfo is not valid JSON: %v", err) + } + if deployInfo.Namespace != "reloader-ns" { + t.Errorf("DeploymentInfo.Namespace = %s, want reloader-ns", deployInfo.Namespace) + } + if deployInfo.Name != "reloader-deploy" { + t.Errorf("DeploymentInfo.Name = %s, want reloader-deploy", deployInfo.Name) + } +} + +func TestPublisher_Publish_NoNamespace(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "") + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg, testLogger()) + + err := publisher.Publish(context.Background()) + if err != nil { + t.Errorf("Publish() with no namespace should not error, got: %v", err) + } +} + +func TestPublisher_Publish_CreateNew(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg, testLogger()) + + ctx := context.Background() + err := publisher.Publish(ctx) + if err != nil { + t.Errorf("Publish() error = %v", err) + } + + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get created ConfigMap: %v", err) + } + if cm.Name != ConfigMapName { + t.Errorf("ConfigMap.Name = %s, want %s", cm.Name, ConfigMapName) + } +} + +func TestPublisher_Publish_UpdateExisting(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + existingCM := &corev1.ConfigMap{} + existingCM.Name = ConfigMapName + existingCM.Namespace = "test-ns" + existingCM.Data = map[string]string{ + "buildInfo": `{"goVersion":"old"}`, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(existingCM). + Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg, testLogger()) + + ctx := context.Background() + err := publisher.Publish(ctx) + if err != nil { + t.Errorf("Publish() error = %v", err) + } + + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get updated ConfigMap: %v", err) + } + + if _, ok := cm.Data["buildInfo"]; !ok { + t.Error("buildInfo data key missing after update") + } + if _, ok := cm.Data["config"]; !ok { + t.Error("config data key missing after update") + } + if _, ok := cm.Data["deploymentInfo"]; !ok { + t.Error("deploymentInfo data key missing after update") + } + + if cm.Labels[ConfigMapLabelKey] != ConfigMapLabelValue { + t.Errorf("Label not updated: %s", cm.Labels[ConfigMapLabelKey]) + } +} + +func TestPublishMetaInfoConfigMap(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "test-ns") + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + ctx := context.Background() + + err := PublishMetaInfoConfigMap(ctx, fakeClient, cfg, testLogger()) + if err != nil { + t.Errorf("PublishMetaInfoConfigMap() error = %v", err) + } + + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get created ConfigMap: %v", err) + } +} + +func TestParseUTCTime(t *testing.T) { + tests := []struct { + name string + input string + wantErr bool + }{ + { + name: "valid RFC3339 time", + input: "2024-01-01T12:00:00Z", + wantErr: false, + }, + { + name: "empty string", + input: "", + wantErr: true, // returns zero time + }, + { + name: "invalid format", + input: "not-a-time", + wantErr: true, // returns zero time + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + result := parseUTCTime(tt.input) + if tt.wantErr { + if !result.IsZero() { + t.Errorf("parseUTCTime(%s) should return zero time", tt.input) + } + } else { + if result.IsZero() { + t.Errorf("parseUTCTime(%s) should not return zero time", tt.input) + } + } + }, + ) + } +} diff --git a/internal/pkg/metadata/publisher.go b/internal/pkg/metadata/publisher.go new file mode 100644 index 000000000..6c6a42221 --- /dev/null +++ b/internal/pkg/metadata/publisher.go @@ -0,0 +1,99 @@ +package metadata + +import ( + "context" + "fmt" + "os" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// Publisher handles creating and updating the metadata ConfigMap. +type Publisher struct { + client client.Client + cfg *config.Config + log logr.Logger +} + +// NewPublisher creates a new Publisher. +func NewPublisher(c client.Client, cfg *config.Config, log logr.Logger) *Publisher { + return &Publisher{ + client: c, + cfg: cfg, + log: log, + } +} + +// Publish creates or updates the metadata ConfigMap. +func (p *Publisher) Publish(ctx context.Context) error { + namespace := os.Getenv(EnvReloaderNamespace) + if namespace == "" { + p.log.Info("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") + return nil + } + + metaInfo := NewMetaInfo(p.cfg) + configMap := metaInfo.ToConfigMap() + + existing := &corev1.ConfigMap{} + err := p.client.Get( + ctx, client.ObjectKey{ + Name: ConfigMapName, + Namespace: namespace, + }, existing, + ) + + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get existing meta info configmap: %w", err) + } + p.log.Info("Creating meta info configmap") + if err := p.client.Create(ctx, configMap, client.FieldOwner(workload.FieldManager)); err != nil { + return fmt.Errorf("failed to create meta info configmap: %w", err) + } + p.log.Info("Meta info configmap created successfully") + return nil + } + + p.log.Info("Meta info configmap already exists, updating it") + existing.Data = configMap.Data + existing.Labels = configMap.Labels + if err := p.client.Update(ctx, existing, client.FieldOwner(workload.FieldManager)); err != nil { + return fmt.Errorf("failed to update meta info configmap: %w", err) + } + p.log.Info("Meta info configmap updated successfully") + return nil +} + +// PublishMetaInfoConfigMap is a convenience function that creates a Publisher and calls Publish. +func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config, log logr.Logger) error { + publisher := NewPublisher(c, cfg, log) + return publisher.Publish(ctx) +} + +// Runnable returns a controller-runtime Runnable that publishes the metadata ConfigMap +// when the manager starts. This ensures the cache is ready before accessing the API. +func Runnable(c client.Client, cfg *config.Config, log logr.Logger) RunnableFunc { + return func(ctx context.Context) error { + if err := PublishMetaInfoConfigMap(ctx, c, cfg, log); err != nil { + log.Error(err, "Failed to create metadata ConfigMap") + // Non-fatal, don't return error to avoid crashing the manager + } + <-ctx.Done() + return nil + } +} + +// RunnableFunc is a function that implements the controller-runtime Runnable interface. +type RunnableFunc func(context.Context) error + +// Start implements the Runnable interface. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) +} diff --git a/internal/pkg/metrics/prometheus.go b/internal/pkg/metrics/prometheus.go index 94153eace..b95731c88 100644 --- a/internal/pkg/metrics/prometheus.go +++ b/internal/pkg/metrics/prometheus.go @@ -3,14 +3,169 @@ package metrics import ( "net/http" "os" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" ) +// Collectors holds all Prometheus metrics collectors for Reloader. type Collectors struct { Reloaded *prometheus.CounterVec ReloadedByNamespace *prometheus.CounterVec + countByNamespace bool + + // === Comprehensive metrics for load testing === + + // Reconcile/Handler metrics + ReconcileTotal *prometheus.CounterVec // Total reconcile calls by result + ReconcileDuration *prometheus.HistogramVec // Time spent in reconcile/handler + + // Action metrics + ActionTotal *prometheus.CounterVec // Total actions by workload kind and result + ActionLatency *prometheus.HistogramVec // Time from event to action applied + + // Skip metrics + SkippedTotal *prometheus.CounterVec // Skipped operations by reason + + // Queue metrics (controller-runtime exposes some automatically, but we add custom ones) + QueueDepth prometheus.Gauge // Current queue depth + QueueAdds prometheus.Counter // Total items added to queue + QueueLatency *prometheus.HistogramVec // Time spent in queue + + // Error and retry metrics + ErrorsTotal *prometheus.CounterVec // Errors by type + RetriesTotal prometheus.Counter // Total retries + + // Event processing metrics + EventsReceived *prometheus.CounterVec // Events received by type (add/update/delete) + EventsProcessed *prometheus.CounterVec // Events processed by type and result + + // Resource discovery metrics + WorkloadsScanned *prometheus.CounterVec // Workloads scanned by kind + WorkloadsMatched *prometheus.CounterVec // Workloads matched for reload by kind +} + +// RecordReload records a reload event with the given success status and namespace. +func (c *Collectors) RecordReload(success bool, namespace string) { + if c == nil { + return + } + + successLabel := "false" + if success { + successLabel = "true" + } + + c.Reloaded.With(prometheus.Labels{"success": successLabel}).Inc() + + if c.countByNamespace { + c.ReloadedByNamespace.With( + prometheus.Labels{ + "success": successLabel, + "namespace": namespace, + }, + ).Inc() + } +} + +// RecordReconcile records a reconcile/handler invocation. +func (c *Collectors) RecordReconcile(result string, duration time.Duration) { + if c == nil { + return + } + c.ReconcileTotal.With(prometheus.Labels{"result": result}).Inc() + c.ReconcileDuration.With(prometheus.Labels{"result": result}).Observe(duration.Seconds()) +} + +// RecordAction records a reload action on a workload. +func (c *Collectors) RecordAction(workloadKind string, result string, latency time.Duration) { + if c == nil { + return + } + c.ActionTotal.With(prometheus.Labels{"workload_kind": workloadKind, "result": result}).Inc() + c.ActionLatency.With(prometheus.Labels{"workload_kind": workloadKind}).Observe(latency.Seconds()) +} + +// RecordSkipped records a skipped operation with reason. +func (c *Collectors) RecordSkipped(reason string) { + if c == nil { + return + } + c.SkippedTotal.With(prometheus.Labels{"reason": reason}).Inc() +} + +// RecordQueueAdd records an item being added to the queue. +func (c *Collectors) RecordQueueAdd() { + if c == nil { + return + } + c.QueueAdds.Inc() +} + +// SetQueueDepth sets the current queue depth. +func (c *Collectors) SetQueueDepth(depth int) { + if c == nil { + return + } + c.QueueDepth.Set(float64(depth)) +} + +// RecordQueueLatency records how long an item spent in the queue. +func (c *Collectors) RecordQueueLatency(latency time.Duration) { + if c == nil { + return + } + c.QueueLatency.With(prometheus.Labels{}).Observe(latency.Seconds()) +} + +// RecordError records an error by type. +func (c *Collectors) RecordError(errorType string) { + if c == nil { + return + } + c.ErrorsTotal.With(prometheus.Labels{"type": errorType}).Inc() +} + +// RecordRetry records a retry attempt. +func (c *Collectors) RecordRetry() { + if c == nil { + return + } + c.RetriesTotal.Inc() +} + +// RecordEventReceived records an event being received. +func (c *Collectors) RecordEventReceived(eventType string, resourceType string) { + if c == nil { + return + } + c.EventsReceived.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType}).Inc() +} + +// RecordEventProcessed records an event being processed. +func (c *Collectors) RecordEventProcessed(eventType string, resourceType string, result string) { + if c == nil { + return + } + c.EventsProcessed.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType, "result": result}).Inc() +} + +// RecordWorkloadsScanned records workloads scanned during a reconcile. +func (c *Collectors) RecordWorkloadsScanned(kind string, count int) { + if c == nil { + return + } + c.WorkloadsScanned.With(prometheus.Labels{"kind": kind}).Add(float64(count)) +} + +// RecordWorkloadsMatched records workloads matched for reload. +func (c *Collectors) RecordWorkloadsMatched(kind string, count int) { + if c == nil { + return + } + c.WorkloadsMatched.With(prometheus.Labels{"kind": kind}).Add(float64(count)) } func NewCollectors() Collectors { @@ -20,40 +175,196 @@ func NewCollectors() Collectors { Name: "reload_executed_total", Help: "Counter of reloads executed by Reloader.", }, - []string{ - "success", - }, + []string{"success"}, ) - - //set 0 as default value reloaded.With(prometheus.Labels{"success": "true"}).Add(0) reloaded.With(prometheus.Labels{"success": "false"}).Add(0) - reloaded_by_namespace := prometheus.NewCounterVec( + reloadedByNamespace := prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "reloader", Name: "reload_executed_total_by_namespace", Help: "Counter of reloads executed by Reloader by namespace.", }, - []string{ - "success", - "namespace", + []string{"success", "namespace"}, + ) + + // === Comprehensive metrics === + + reconcileTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "reconcile_total", + Help: "Total number of reconcile/handler invocations by result.", }, + []string{"result"}, ) + + reconcileDuration := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "reconcile_duration_seconds", + Help: "Time spent in reconcile/handler in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, + }, + []string{"result"}, + ) + + actionTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "action_total", + Help: "Total number of reload actions by workload kind and result.", + }, + []string{"workload_kind", "result"}, + ) + + actionLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "action_latency_seconds", + Help: "Time from event received to action applied in seconds.", + Buckets: []float64{0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60}, + }, + []string{"workload_kind"}, + ) + + skippedTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "skipped_total", + Help: "Total number of skipped operations by reason.", + }, + []string{"reason"}, + ) + + queueDepth := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "reloader", + Name: "workqueue_depth", + Help: "Current depth of the work queue.", + }, + ) + + queueAdds := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workqueue_adds_total", + Help: "Total number of items added to the work queue.", + }, + ) + + queueLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "workqueue_latency_seconds", + Help: "Time spent in the work queue in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5}, + }, + []string{}, + ) + + errorsTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "errors_total", + Help: "Total number of errors by type.", + }, + []string{"type"}, + ) + + retriesTotal := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "retries_total", + Help: "Total number of retry attempts.", + }, + ) + + eventsReceived := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "events_received_total", + Help: "Total number of events received by type and resource.", + }, + []string{"event_type", "resource_type"}, + ) + + eventsProcessed := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "events_processed_total", + Help: "Total number of events processed by type, resource, and result.", + }, + []string{"event_type", "resource_type", "result"}, + ) + + workloadsScanned := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workloads_scanned_total", + Help: "Total number of workloads scanned by kind.", + }, + []string{"kind"}, + ) + + workloadsMatched := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workloads_matched_total", + Help: "Total number of workloads matched for reload by kind.", + }, + []string{"kind"}, + ) + return Collectors{ Reloaded: reloaded, - ReloadedByNamespace: reloaded_by_namespace, + ReloadedByNamespace: reloadedByNamespace, + countByNamespace: os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled", + + ReconcileTotal: reconcileTotal, + ReconcileDuration: reconcileDuration, + ActionTotal: actionTotal, + ActionLatency: actionLatency, + SkippedTotal: skippedTotal, + QueueDepth: queueDepth, + QueueAdds: queueAdds, + QueueLatency: queueLatency, + ErrorsTotal: errorsTotal, + RetriesTotal: retriesTotal, + EventsReceived: eventsReceived, + EventsProcessed: eventsProcessed, + WorkloadsScanned: workloadsScanned, + WorkloadsMatched: workloadsMatched, } } func SetupPrometheusEndpoint() Collectors { collectors := NewCollectors() - prometheus.MustRegister(collectors.Reloaded) + + ctrlmetrics.Registry.MustRegister(collectors.Reloaded) + ctrlmetrics.Registry.MustRegister(collectors.ReconcileTotal) + ctrlmetrics.Registry.MustRegister(collectors.ReconcileDuration) + ctrlmetrics.Registry.MustRegister(collectors.ActionTotal) + ctrlmetrics.Registry.MustRegister(collectors.ActionLatency) + ctrlmetrics.Registry.MustRegister(collectors.SkippedTotal) + ctrlmetrics.Registry.MustRegister(collectors.QueueDepth) + ctrlmetrics.Registry.MustRegister(collectors.QueueAdds) + ctrlmetrics.Registry.MustRegister(collectors.QueueLatency) + ctrlmetrics.Registry.MustRegister(collectors.ErrorsTotal) + ctrlmetrics.Registry.MustRegister(collectors.RetriesTotal) + ctrlmetrics.Registry.MustRegister(collectors.EventsReceived) + ctrlmetrics.Registry.MustRegister(collectors.EventsProcessed) + ctrlmetrics.Registry.MustRegister(collectors.WorkloadsScanned) + ctrlmetrics.Registry.MustRegister(collectors.WorkloadsMatched) if os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled" { - prometheus.MustRegister(collectors.ReloadedByNamespace) + ctrlmetrics.Registry.MustRegister(collectors.ReloadedByNamespace) } + // Note: For controller-runtime based Reloader, the metrics are served + // by controller-runtime's metrics server. This http.Handle is kept for + // the legacy informer-based Reloader which uses its own HTTP server. http.Handle("/metrics", promhttp.Handler()) return collectors diff --git a/internal/pkg/metrics/prometheus_test.go b/internal/pkg/metrics/prometheus_test.go new file mode 100644 index 000000000..5715c243e --- /dev/null +++ b/internal/pkg/metrics/prometheus_test.go @@ -0,0 +1,187 @@ +package metrics + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +func TestNewCollectors_CreatesCounters(t *testing.T) { + collectors := NewCollectors() + + if collectors.Reloaded == nil { + t.Error("NewCollectors() should create Reloaded counter") + } + if collectors.ReloadedByNamespace == nil { + t.Error("NewCollectors() should create ReloadedByNamespace counter") + } +} + +func TestNewCollectors_InitializesWithZero(t *testing.T) { + collectors := NewCollectors() + + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 0 { + t.Errorf("Initial success=true counter = %v, want 0", metric.Counter.GetValue()) + } + + err = collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 0 { + t.Errorf("Initial success=false counter = %v, want 0", metric.Counter.GetValue()) + } +} + +func TestRecordReload_Success(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(true, "default") + + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=true counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_Failure(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(false, "default") + + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=false counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_MultipleIncrements(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(true, "default") + collectors.RecordReload(true, "default") + collectors.RecordReload(false, "default") + + metric := &dto.Metric{} + + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 2 { + t.Errorf("success=true counter = %v, want 2", metric.Counter.GetValue()) + } + + err = collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=false counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_WithNamespaceTracking(t *testing.T) { + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") + + collectors := NewCollectors() + collectors.RecordReload(true, "kube-system") + + metric := &dto.Metric{} + err := collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "kube-system", + }, + ).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("namespace counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_WithoutNamespaceTracking(t *testing.T) { + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "") + + collectors := NewCollectors() + collectors.RecordReload(true, "kube-system") + + if collectors.countByNamespace { + t.Error("countByNamespace should be false when env var is not set") + } +} + +func TestNilCollectors_NoPanic(t *testing.T) { + var c *Collectors = nil + + c.RecordReload(true, "default") + c.RecordReload(false, "default") +} + +func TestRecordReload_DifferentNamespaces(t *testing.T) { + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") + + collectors := NewCollectors() + collectors.RecordReload(true, "namespace-a") + collectors.RecordReload(true, "namespace-b") + collectors.RecordReload(true, "namespace-a") + + metric := &dto.Metric{} + + err := collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "namespace-a", + }, + ).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 2 { + t.Errorf("namespace-a counter = %v, want 2", metric.Counter.GetValue()) + } + + err = collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "namespace-b", + }, + ).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("namespace-b counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestCollectors_MetricNames(t *testing.T) { + collectors := NewCollectors() + + ch := make(chan *prometheus.Desc, 10) + collectors.Reloaded.Describe(ch) + close(ch) + + found := false + for desc := range ch { + if desc.String() != "" { + found = true + } + } + if !found { + t.Error("Expected Reloaded metric to have a description") + } +} diff --git a/internal/pkg/openshift/detect.go b/internal/pkg/openshift/detect.go new file mode 100644 index 000000000..403c0d27f --- /dev/null +++ b/internal/pkg/openshift/detect.go @@ -0,0 +1,34 @@ +package openshift + +import ( + "github.com/go-logr/logr" + "k8s.io/client-go/discovery" +) + +const ( + // DeploymentConfigAPIGroup is the API group for DeploymentConfig. + DeploymentConfigAPIGroup = "apps.openshift.io" + // DeploymentConfigAPIVersion is the API version for DeploymentConfig. + DeploymentConfigAPIVersion = "v1" + // DeploymentConfigResource is the resource name for DeploymentConfig. + DeploymentConfigResource = "deploymentconfigs" +) + +// HasDeploymentConfigSupport checks if the cluster supports DeploymentConfig +func HasDeploymentConfigSupport(client discovery.DiscoveryInterface, log logr.Logger) bool { + resources, err := client.ServerResourcesForGroupVersion(DeploymentConfigAPIGroup + "/" + DeploymentConfigAPIVersion) + if err != nil { + log.V(1).Info("DeploymentConfig API not available", "error", err) + return false + } + + for _, r := range resources.APIResources { + if r.Name == DeploymentConfigResource { + log.Info("DeploymentConfig API detected, enabling support") + return true + } + } + + log.V(1).Info("DeploymentConfig resource not found in apps.openshift.io/v1") + return false +} diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go deleted file mode 100644 index 0f99be8af..000000000 --- a/internal/pkg/options/flags.go +++ /dev/null @@ -1,92 +0,0 @@ -package options - -import "github.com/stakater/Reloader/internal/pkg/constants" - -type ArgoRolloutStrategy int - -const ( - // RestartStrategy is the annotation value for restart strategy for rollouts - RestartStrategy ArgoRolloutStrategy = iota - // RolloutStrategy is the annotation value for rollout strategy for rollouts - RolloutStrategy -) - -var ( - // Auto reload all resources when their corresponding configmaps/secrets are updated - AutoReloadAll = false - // ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in - // configmaps specified by name - ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload" - // SecretUpdateOnChangeAnnotation is an annotation to detect changes in - // secrets specified by name - SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload" - // ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps - ReloaderAutoAnnotation = "reloader.stakater.com/auto" - // IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps - IgnoreResourceAnnotation = "reloader.stakater.com/ignore" - // ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps - ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto" - // SecretReloaderAutoAnnotation is an annotation to detect changes in secrets - SecretReloaderAutoAnnotation = "secret.reloader.stakater.com/auto" - // ConfigmapReloaderAutoAnnotation is a comma separated list of configmaps that excludes detecting changes on cms - ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload" - // SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets - SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload" - // AutoSearchAnnotation is an annotation to detect changes in - // configmaps or triggers with the SearchMatchAnnotation - AutoSearchAnnotation = "reloader.stakater.com/search" - // SearchMatchAnnotation is an annotation to tag secrets to be found with - // AutoSearchAnnotation - SearchMatchAnnotation = "reloader.stakater.com/match" - // RolloutStrategyAnnotation is an annotation to define rollout update strategy - RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" - // PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after - // a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration - // only positive values are allowed - PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period" - // Annotation set by reloader to indicate that the deployment has been paused - PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at" - // LogFormat is the log format to use (json, or empty string for default) - LogFormat = "" - // LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic) - LogLevel = "" - // IsArgoRollouts Adds support for argo rollouts - IsArgoRollouts = "false" - // ReloadStrategy Specify the update strategy - ReloadStrategy = constants.EnvVarsReloadStrategy - // ReloadOnCreate Adds support to watch create events - ReloadOnCreate = "false" - // ReloadOnDelete Adds support to watch delete events - ReloadOnDelete = "false" - SyncAfterRestart = false - // EnableHA adds support for running multiple replicas via leadership election - EnableHA = false - // Url to send a request to instead of triggering a reload - WebhookUrl = "" - // ResourcesToIgnore is a list of resources to ignore when watching for changes - ResourcesToIgnore = []string{} - // WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes - WorkloadTypesToIgnore = []string{} - // NamespacesToIgnore is a list of namespace names to ignore when watching for changes - NamespacesToIgnore = []string{} - // NamespaceSelectors is a list of namespace selectors to watch for changes - NamespaceSelectors = []string{} - // ResourceSelectors is a list of resource selectors to watch for changes - ResourceSelectors = []string{} - // EnablePProf enables pprof for profiling - EnablePProf = false - // PProfAddr is the address to start pprof server on - // Default is :6060 - PProfAddr = ":6060" -) - -func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy { - switch s { - case "restart": - return RestartStrategy - case "rollout": - fallthrough - default: - return RolloutStrategy - } -} diff --git a/internal/pkg/reload/change.go b/internal/pkg/reload/change.go new file mode 100644 index 000000000..b7fa4443d --- /dev/null +++ b/internal/pkg/reload/change.go @@ -0,0 +1,56 @@ +package reload + +import ( + corev1 "k8s.io/api/core/v1" +) + +// EventType represents the type of change event. +type EventType string + +const ( + // EventTypeCreate indicates a resource was created. + EventTypeCreate EventType = "create" + // EventTypeUpdate indicates a resource was updated. + EventTypeUpdate EventType = "update" + // EventTypeDelete indicates a resource was deleted. + EventTypeDelete EventType = "delete" +) + +// ResourceChange represents a change event for a ConfigMap or Secret. +type ResourceChange interface { + IsNil() bool + GetEventType() EventType + GetName() string + GetNamespace() string + GetAnnotations() map[string]string + GetResourceType() ResourceType + ComputeHash(hasher *Hasher) string +} + +// ConfigMapChange represents a change event for a ConfigMap. +type ConfigMapChange struct { + ConfigMap *corev1.ConfigMap + EventType EventType +} + +func (c ConfigMapChange) IsNil() bool { return c.ConfigMap == nil } +func (c ConfigMapChange) GetEventType() EventType { return c.EventType } +func (c ConfigMapChange) GetName() string { return c.ConfigMap.Name } +func (c ConfigMapChange) GetNamespace() string { return c.ConfigMap.Namespace } +func (c ConfigMapChange) GetAnnotations() map[string]string { return c.ConfigMap.Annotations } +func (c ConfigMapChange) GetResourceType() ResourceType { return ResourceTypeConfigMap } +func (c ConfigMapChange) ComputeHash(h *Hasher) string { return h.HashConfigMap(c.ConfigMap) } + +// SecretChange represents a change event for a Secret. +type SecretChange struct { + Secret *corev1.Secret + EventType EventType +} + +func (c SecretChange) IsNil() bool { return c.Secret == nil } +func (c SecretChange) GetEventType() EventType { return c.EventType } +func (c SecretChange) GetName() string { return c.Secret.Name } +func (c SecretChange) GetNamespace() string { return c.Secret.Namespace } +func (c SecretChange) GetAnnotations() map[string]string { return c.Secret.Annotations } +func (c SecretChange) GetResourceType() ResourceType { return ResourceTypeSecret } +func (c SecretChange) ComputeHash(h *Hasher) string { return h.HashSecret(c.Secret) } diff --git a/internal/pkg/reload/decision.go b/internal/pkg/reload/decision.go new file mode 100644 index 000000000..625925828 --- /dev/null +++ b/internal/pkg/reload/decision.go @@ -0,0 +1,30 @@ +package reload + +import ( + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ReloadDecision contains the result of evaluating whether to reload a workload. +type ReloadDecision struct { + // Workload is the workload accessor. + Workload workload.Workload + // ShouldReload indicates whether the workload should be reloaded. + ShouldReload bool + // AutoReload indicates if this is an auto-reload. + AutoReload bool + // Reason provides a human-readable explanation. + Reason string + // Hash is the computed hash of the resource content. + Hash string +} + +// FilterDecisions returns only decisions where ShouldReload is true. +func FilterDecisions(decisions []ReloadDecision) []ReloadDecision { + var result []ReloadDecision + for _, d := range decisions { + if d.ShouldReload { + result = append(result, d) + } + } + return result +} diff --git a/internal/pkg/reload/decision_test.go b/internal/pkg/reload/decision_test.go new file mode 100644 index 000000000..5b7a6135e --- /dev/null +++ b/internal/pkg/reload/decision_test.go @@ -0,0 +1,125 @@ +package reload + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/workload" +) + +func TestFilterDecisions(t *testing.T) { + wl1 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy1", Namespace: "default"}, + }, + ) + wl2 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy2", Namespace: "default"}, + }, + ) + wl3 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy3", Namespace: "default"}, + }, + ) + + tests := []struct { + name string + decisions []ReloadDecision + wantCount int + wantNames []string + }{ + { + name: "empty list", + decisions: []ReloadDecision{}, + wantCount: 0, + wantNames: nil, + }, + { + name: "all should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: true, Reason: "test"}, + {Workload: wl2, ShouldReload: true, Reason: "test"}, + }, + wantCount: 2, + wantNames: []string{"deploy1", "deploy2"}, + }, + { + name: "none should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: false, Reason: "test"}, + {Workload: wl2, ShouldReload: false, Reason: "test"}, + }, + wantCount: 0, + wantNames: nil, + }, + { + name: "mixed - some should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: true, Reason: "test"}, + {Workload: wl2, ShouldReload: false, Reason: "test"}, + {Workload: wl3, ShouldReload: true, Reason: "test"}, + }, + wantCount: 2, + wantNames: []string{"deploy1", "deploy3"}, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + result := FilterDecisions(tt.decisions) + + if len(result) != tt.wantCount { + t.Errorf("FilterDecisions() returned %d decisions, want %d", len(result), tt.wantCount) + } + + if tt.wantNames != nil { + for i, d := range result { + if d.Workload.GetName() != tt.wantNames[i] { + t.Errorf( + "FilterDecisions()[%d].Workload.GetName() = %s, want %s", + i, d.Workload.GetName(), tt.wantNames[i], + ) + } + } + } + }, + ) + } +} + +func TestReloadDecision_Fields(t *testing.T) { + wl := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }, + ) + + decision := ReloadDecision{ + Workload: wl, + ShouldReload: true, + AutoReload: true, + Reason: "test reason", + Hash: "abc123", + } + + if decision.Workload.GetName() != "test" { + t.Errorf("ReloadDecision.Workload.GetName() = %v, want test", decision.Workload.GetName()) + } + if !decision.ShouldReload { + t.Error("ReloadDecision.ShouldReload should be true") + } + if !decision.AutoReload { + t.Error("ReloadDecision.AutoReload should be true") + } + if decision.Reason != "test reason" { + t.Errorf("ReloadDecision.Reason = %v, want 'test reason'", decision.Reason) + } + if decision.Hash != "abc123" { + t.Errorf("ReloadDecision.Hash = %v, want 'abc123'", decision.Hash) + } +} diff --git a/internal/pkg/reload/hasher.go b/internal/pkg/reload/hasher.go new file mode 100644 index 000000000..0d259ac3a --- /dev/null +++ b/internal/pkg/reload/hasher.go @@ -0,0 +1,74 @@ +// Package reload provides core reload logic for ConfigMaps and Secrets. +package reload + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + "io" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// Hasher computes content hashes for ConfigMaps and Secrets. +type Hasher struct{} + +// NewHasher creates a new Hasher instance. +func NewHasher() *Hasher { + return &Hasher{} +} + +// HashConfigMap computes a SHA1 hash of the ConfigMap's data and binaryData. +func (h *Hasher) HashConfigMap(cm *corev1.ConfigMap) string { + if cm == nil { + return h.computeSHA("") + } + return h.hashConfigMapData(cm.Data, cm.BinaryData) +} + +// HashSecret computes a SHA1 hash of the Secret's data. +func (h *Hasher) HashSecret(secret *corev1.Secret) string { + if secret == nil { + return h.computeSHA("") + } + return h.hashSecretData(secret.Data) +} + +func (h *Hasher) hashConfigMapData(data map[string]string, binaryData map[string][]byte) string { + values := make([]string, 0, len(data)+len(binaryData)) + + for k, v := range data { + values = append(values, k+"="+v) + } + + for k, v := range binaryData { + values = append(values, k+"="+base64.StdEncoding.EncodeToString(v)) + } + + sort.Strings(values) + return h.computeSHA(strings.Join(values, ";")) +} + +func (h *Hasher) hashSecretData(data map[string][]byte) string { + values := make([]string, 0, len(data)) + + for k, v := range data { + values = append(values, k+"="+string(v)) + } + + sort.Strings(values) + return h.computeSHA(strings.Join(values, ";")) +} + +func (h *Hasher) computeSHA(data string) string { + hasher := sha1.New() + _, _ = io.WriteString(hasher, data) + return fmt.Sprintf("%x", hasher.Sum(nil)) +} + +// EmptyHash returns an empty string to signal resource deletion. +func (h *Hasher) EmptyHash() string { + return "" +} diff --git a/internal/pkg/reload/hasher_test.go b/internal/pkg/reload/hasher_test.go new file mode 100644 index 000000000..ff5693ff7 --- /dev/null +++ b/internal/pkg/reload/hasher_test.go @@ -0,0 +1,236 @@ +package reload + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" +) + +func TestHasher_HashConfigMap(t *testing.T) { + hasher := NewHasher() + + tests := []struct { + name string + cm *corev1.ConfigMap + wantHash string + }{ + { + name: "empty configmap", + cm: &corev1.ConfigMap{ + Data: nil, + BinaryData: nil, + }, + wantHash: hasher.HashConfigMap(&corev1.ConfigMap{}), + }, + { + name: "configmap with data", + cm: &corev1.ConfigMap{ + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + wantHash: hasher.HashConfigMap( + &corev1.ConfigMap{ + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + ), + }, + { + name: "configmap with binary data", + cm: &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "binary1": []byte("binaryvalue1"), + }, + }, + wantHash: hasher.HashConfigMap( + &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "binary1": []byte("binaryvalue1"), + }, + }, + ), + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := hasher.HashConfigMap(tt.cm) + if got != tt.wantHash { + t.Errorf("HashConfigMap() = %v, want %v", got, tt.wantHash) + } + }, + ) + } +} + +func TestHasher_HashConfigMap_Deterministic(t *testing.T) { + hasher := NewHasher() + + cm := &corev1.ConfigMap{ + Data: map[string]string{ + "z-key": "value-z", + "a-key": "value-a", + "m-key": "value-m", + }, + } + + hash1 := hasher.HashConfigMap(cm) + hash2 := hasher.HashConfigMap(cm) + hash3 := hasher.HashConfigMap(cm) + + if hash1 != hash2 || hash2 != hash3 { + t.Errorf("Hash is not deterministic: %s, %s, %s", hash1, hash2, hash3) + } +} + +func TestHasher_HashConfigMap_DifferentValues(t *testing.T) { + hasher := NewHasher() + + cm1 := &corev1.ConfigMap{ + Data: map[string]string{ + "key": "value1", + }, + } + + cm2 := &corev1.ConfigMap{ + Data: map[string]string{ + "key": "value2", + }, + } + + hash1 := hasher.HashConfigMap(cm1) + hash2 := hasher.HashConfigMap(cm2) + + if hash1 == hash2 { + t.Errorf("Different values should produce different hashes") + } +} + +func TestHasher_HashSecret(t *testing.T) { + hasher := NewHasher() + + tests := []struct { + name string + secret *corev1.Secret + wantHash string + }{ + { + name: "empty secret", + secret: &corev1.Secret{ + Data: nil, + }, + wantHash: hasher.HashSecret(&corev1.Secret{}), + }, + { + name: "secret with data", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, + }, + wantHash: hasher.HashSecret( + &corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, + }, + ), + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := hasher.HashSecret(tt.secret) + if got != tt.wantHash { + t.Errorf("HashSecret() = %v, want %v", got, tt.wantHash) + } + }, + ) + } +} + +func TestHasher_HashSecret_Deterministic(t *testing.T) { + hasher := NewHasher() + + secret := &corev1.Secret{ + Data: map[string][]byte{ + "z-key": []byte("value-z"), + "a-key": []byte("value-a"), + "m-key": []byte("value-m"), + }, + } + + hash1 := hasher.HashSecret(secret) + hash2 := hasher.HashSecret(secret) + hash3 := hasher.HashSecret(secret) + + if hash1 != hash2 || hash2 != hash3 { + t.Errorf("Hash is not deterministic: %s, %s, %s", hash1, hash2, hash3) + } +} + +func TestHasher_HashSecret_DifferentValues(t *testing.T) { + hasher := NewHasher() + + secret1 := &corev1.Secret{ + Data: map[string][]byte{ + "key": []byte("value1"), + }, + } + + secret2 := &corev1.Secret{ + Data: map[string][]byte{ + "key": []byte("value2"), + }, + } + + hash1 := hasher.HashSecret(secret1) + hash2 := hasher.HashSecret(secret2) + + if hash1 == hash2 { + t.Errorf("Different values should produce different hashes") + } +} + +func TestHasher_EmptyHash(t *testing.T) { + hasher := NewHasher() + + emptyHash := hasher.EmptyHash() + if emptyHash != "" { + t.Errorf("EmptyHash should be empty string, got %s", emptyHash) + } + + cm := &corev1.ConfigMap{} + cmHash := hasher.HashConfigMap(cm) + if cmHash == "" { + t.Error("Empty ConfigMap should have a non-empty hash") + } + + secret := &corev1.Secret{} + secretHash := hasher.HashSecret(secret) + if secretHash == "" { + t.Error("Empty Secret should have a non-empty hash") + } +} + +func TestHasher_NilInput(t *testing.T) { + hasher := NewHasher() + + cmHash := hasher.HashConfigMap(nil) + if cmHash == "" { + t.Error("nil ConfigMap should return a valid hash") + } + + secretHash := hasher.HashSecret(nil) + if secretHash == "" { + t.Error("nil Secret should return a valid hash") + } +} diff --git a/internal/pkg/reload/matcher.go b/internal/pkg/reload/matcher.go new file mode 100644 index 000000000..e817f7f59 --- /dev/null +++ b/internal/pkg/reload/matcher.go @@ -0,0 +1,259 @@ +package reload + +import ( + "regexp" + "strings" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// MatchResult contains the result of checking if a workload should be reloaded. +type MatchResult struct { + ShouldReload bool + AutoReload bool + Reason string +} + +// Matcher determines whether a workload should be reloaded based on annotations. +type Matcher struct { + cfg *config.Config +} + +// NewMatcher creates a new Matcher with the given configuration. +func NewMatcher(cfg *config.Config) *Matcher { + return &Matcher{cfg: cfg} +} + +// MatchInput contains all the information needed to determine if a reload should occur. +type MatchInput struct { + ResourceName string + ResourceNamespace string + ResourceType ResourceType + ResourceAnnotations map[string]string + WorkloadAnnotations map[string]string + PodAnnotations map[string]string +} + +// ShouldReload determines if a workload should be reloaded based on its annotations. +func (m *Matcher) ShouldReload(input MatchInput) MatchResult { + if m.isResourceIgnored(input.ResourceAnnotations) { + return MatchResult{ + ShouldReload: false, + Reason: "resource has ignore annotation", + } + } + + annotations := m.selectAnnotations(input) + + if m.isResourceExcluded(input.ResourceName, input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: false, + Reason: "resource is in exclude list", + } + } + + if m.matchesExplicitAnnotation(input.ResourceName, input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: false, + Reason: "matches explicit reload annotation", + } + } + + if m.matchesSearchPattern(input.ResourceAnnotations, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "matches search/match pattern", + } + } + + if m.matchesAutoAnnotation(input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "auto annotation enabled", + } + } + + if m.matchesAutoReloadAll(input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "auto-reload-all enabled", + } + } + + return MatchResult{ + ShouldReload: false, + Reason: "no matching annotations", + } +} + +func (m *Matcher) isResourceIgnored(resourceAnnotations map[string]string) bool { + if resourceAnnotations == nil { + return false + } + return resourceAnnotations[m.cfg.Annotations.Ignore] == "true" +} + +func (m *Matcher) selectAnnotations(input MatchInput) map[string]string { + if m.hasRelevantAnnotations(input.WorkloadAnnotations, input.ResourceType) { + return input.WorkloadAnnotations + } + if m.hasRelevantAnnotations(input.PodAnnotations, input.ResourceType) { + return input.PodAnnotations + } + return input.WorkloadAnnotations +} + +func (m *Matcher) hasRelevantAnnotations(annotations map[string]string, resourceType ResourceType) bool { + if annotations == nil { + return false + } + + explicitAnn := m.getExplicitAnnotation(resourceType) + if _, ok := annotations[explicitAnn]; ok { + return true + } + + if _, ok := annotations[m.cfg.Annotations.Search]; ok { + return true + } + + if _, ok := annotations[m.cfg.Annotations.Auto]; ok { + return true + } + + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + if _, ok := annotations[typedAutoAnn]; ok { + return true + } + + return false +} + +func (m *Matcher) isResourceExcluded(resourceName string, resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + var excludeAnn string + switch resourceType { + case ResourceTypeConfigMap: + excludeAnn = m.cfg.Annotations.ConfigmapExclude + case ResourceTypeSecret: + excludeAnn = m.cfg.Annotations.SecretExclude + } + + excludeList, ok := annotations[excludeAnn] + if !ok || excludeList == "" { + return false + } + + for _, excluded := range strings.Split(excludeList, ",") { + if strings.TrimSpace(excluded) == resourceName { + return true + } + } + + return false +} + +func (m *Matcher) matchesExplicitAnnotation(resourceName string, resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + explicitAnn := m.getExplicitAnnotation(resourceType) + annotationValue, ok := annotations[explicitAnn] + if !ok || annotationValue == "" { + return false + } + + for _, value := range strings.Split(annotationValue, ",") { + value = strings.TrimSpace(value) + if value == "" { + continue + } + re, err := regexp.Compile("^" + value + "$") + if err != nil { + if value == resourceName { + return true + } + continue + } + if re.MatchString(resourceName) { + return true + } + } + + return false +} + +func (m *Matcher) matchesSearchPattern(resourceAnnotations, workloadAnnotations map[string]string) bool { + if workloadAnnotations == nil || resourceAnnotations == nil { + return false + } + + searchValue, ok := workloadAnnotations[m.cfg.Annotations.Search] + if !ok || searchValue != "true" { + return false + } + + matchValue, ok := resourceAnnotations[m.cfg.Annotations.Match] + return ok && matchValue == "true" +} + +func (m *Matcher) matchesAutoAnnotation(resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + if annotations[m.cfg.Annotations.Auto] == "true" { + return true + } + + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + return annotations[typedAutoAnn] == "true" +} + +func (m *Matcher) matchesAutoReloadAll(resourceType ResourceType, annotations map[string]string) bool { + if !m.cfg.AutoReloadAll { + return false + } + + if annotations != nil { + if annotations[m.cfg.Annotations.Auto] == "false" { + return false + } + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + if annotations[typedAutoAnn] == "false" { + return false + } + } + + return true +} + +func (m *Matcher) getExplicitAnnotation(resourceType ResourceType) string { + switch resourceType { + case ResourceTypeConfigMap: + return m.cfg.Annotations.ConfigmapReload + case ResourceTypeSecret: + return m.cfg.Annotations.SecretReload + default: + return "" + } +} + +func (m *Matcher) getTypedAutoAnnotation(resourceType ResourceType) string { + switch resourceType { + case ResourceTypeConfigMap: + return m.cfg.Annotations.ConfigmapAuto + case ResourceTypeSecret: + return m.cfg.Annotations.SecretAuto + default: + return "" + } +} diff --git a/internal/pkg/reload/matcher_test.go b/internal/pkg/reload/matcher_test.go new file mode 100644 index 000000000..1c58fd303 --- /dev/null +++ b/internal/pkg/reload/matcher_test.go @@ -0,0 +1,474 @@ +package reload + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestMatcher_ShouldReload(t *testing.T) { + defaultCfg := config.NewDefault() + matcher := NewMatcher(defaultCfg) + + tests := []struct { + name string + input MatchInput + wantReload bool + wantAutoReload bool + description string + }{ + { + name: "ignore annotation on resource skips reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Resources with ignore annotation should never trigger reload", + }, + { + name: "ignore annotation false allows reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "false"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Resources with ignore=false should allow reload", + }, + { + name: "exclude annotation skips reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Excluded ConfigMaps should not trigger reload", + }, + { + name: "exclude annotation with multiple values", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmaps.exclude.reloader.stakater.com/reload": "other-config,my-config,another-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMaps in comma-separated exclude list should not trigger reload", + }, + { + name: "explicit reload annotation with auto enabled - should reload", + input: MatchInput{ + ResourceName: "external-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmap.reloader.stakater.com/reload": "external-config", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, // Explicit, not auto + description: "BUG FIX: Explicit reload annotation should work even when auto is enabled", + }, + { + name: "explicit reload annotation matches pattern - should reload", + input: MatchInput{ + ResourceName: "app-config-v2", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "app-config-.*", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, + description: "Regex pattern in reload annotation should match", + }, + { + name: "explicit reload annotation does not match - should not reload", + input: MatchInput{ + ResourceName: "other-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "app-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMaps not in reload list should not trigger reload", + }, + { + name: "auto annotation on workload triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Auto annotation on workload should trigger reload", + }, + { + name: "auto annotation on pod template triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + }, + wantReload: true, + wantAutoReload: true, + description: "Auto annotation on pod template should trigger reload", + }, + { + name: "configmap-specific auto annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"configmap.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "ConfigMap-specific auto annotation should trigger reload", + }, + { + name: "secret-specific auto annotation for secret", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"secret.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Secret-specific auto annotation should trigger reload for secrets", + }, + { + name: "configmap-specific auto annotation does not match secret", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"configmap.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMap-specific auto annotation should not match secrets", + }, + { + name: "search annotation with matching resource", + input: MatchInput{ + ResourceName: "app-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/match": "true"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/search": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, // Search mode is an auto-discovery mechanism + description: "Search annotation with matching resource should trigger reload", + }, + { + name: "search annotation without matching resource", + input: MatchInput{ + ResourceName: "app-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/search": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Search annotation without matching resource should not trigger reload", + }, + { + name: "no annotations does not trigger reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Without any annotations, should not trigger reload", + }, + { + name: "secret reload annotation", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "secret.reloader.stakater.com/reload": "my-secret", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, + description: "Secret reload annotation should trigger reload", + }, + { + name: "secret exclude annotation", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "secrets.exclude.reloader.stakater.com/reload": "my-secret", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Secret exclude annotation should prevent reload", + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) + + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } + + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } + + t.Logf("✓ %s", tt.description) + }, + ) + } +} + +func TestMatcher_ShouldReload_AutoReloadAll(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + matcher := NewMatcher(cfg) + + tests := []struct { + name string + input MatchInput + wantReload bool + wantAutoReload bool + description string + }{ + { + name: "auto-reload-all triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "With auto-reload-all enabled, all ConfigMaps should trigger reload", + }, + { + name: "auto-reload-all respects ignore annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Even with auto-reload-all, ignore annotation should be respected", + }, + { + name: "auto-reload-all respects exclude annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Even with auto-reload-all, exclude annotation should be respected", + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) + + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } + + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } + + t.Logf("✓ %s", tt.description) + }, + ) + } +} + +// TestMatcher_AutoDoesNotIgnoreExplicit tests the fix for the bug where +// having reloader.stakater.com/auto: "true" would cause explicit reload annotations +// to be ignored due to an early return. +func TestMatcher_AutoDoesNotIgnoreExplicit(t *testing.T) { + cfg := config.NewDefault() + matcher := NewMatcher(cfg) + + input := MatchInput{ + ResourceName: "external-config", // Not referenced by workload + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", // Enables auto-reload + "configmap.reloader.stakater.com/reload": "external-config", // Explicit list + }, + PodAnnotations: nil, + } + + result := matcher.ShouldReload(input) + + if !result.ShouldReload { + t.Errorf("BUG: Explicit reload annotation ignored when auto is enabled") + t.Errorf("Expected ShouldReload=true for explicitly listed ConfigMap, got false") + } + + if result.AutoReload { + t.Errorf("Expected AutoReload=false for explicit match, got true") + } + + t.Log("✓ Explicit reload annotation works even when auto is enabled") +} + +// TestMatcher_PrecedenceOrder verifies the correct order of precedence: +// 1. Ignore annotation → skip +// 2. Exclude annotation → skip +// 3. Explicit reload annotation → reload (BUG FIX: before auto!) +// 4. Search/Match → reload +// 5. Auto annotation → reload +// 6. Auto-reload-all → reload +func TestMatcher_PrecedenceOrder(t *testing.T) { + cfg := config.NewDefault() + matcher := NewMatcher(cfg) + + t.Run( + "explicit takes precedence over auto", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.AutoReload { + t.Error("Expected explicit match (AutoReload=false), got auto match") + } + if !result.ShouldReload { + t.Error("Expected ShouldReload=true") + } + }, + ) + + t.Run( + "ignore takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected ignore to take precedence, but got ShouldReload=true") + } + }, + ) + + t.Run( + "exclude takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected exclude to take precedence, but got ShouldReload=true") + } + }, + ) +} diff --git a/internal/pkg/reload/pause.go b/internal/pkg/reload/pause.go new file mode 100644 index 000000000..e995dc33c --- /dev/null +++ b/internal/pkg/reload/pause.go @@ -0,0 +1,128 @@ +package reload + +import ( + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// PauseHandler handles pause deployment logic. +type PauseHandler struct { + cfg *config.Config +} + +// NewPauseHandler creates a new PauseHandler. +func NewPauseHandler(cfg *config.Config) *PauseHandler { + return &PauseHandler{cfg: cfg} +} + +// ShouldPause checks if a deployment should be paused after reload. +func (h *PauseHandler) ShouldPause(wl workload.Workload) bool { + if wl.Kind() != workload.KindDeployment { + return false + } + + annotations := wl.GetAnnotations() + if annotations == nil { + return false + } + + pausePeriod := annotations[h.cfg.Annotations.PausePeriod] + return pausePeriod != "" +} + +// GetPausePeriod returns the configured pause period for a workload. +func (h *PauseHandler) GetPausePeriod(wl workload.Workload) (time.Duration, error) { + annotations := wl.GetAnnotations() + if annotations == nil { + return 0, fmt.Errorf("no annotations on workload") + } + + pausePeriodStr := annotations[h.cfg.Annotations.PausePeriod] + if pausePeriodStr == "" { + return 0, fmt.Errorf("no pause period annotation") + } + + return time.ParseDuration(pausePeriodStr) +} + +// ApplyPause pauses a deployment and sets the paused-at annotation. +func (h *PauseHandler) ApplyPause(wl workload.Workload) error { + deployWl, ok := wl.(*workload.DeploymentWorkload) + if !ok { + return fmt.Errorf("workload is not a deployment") + } + + deploy := deployWl.GetDeployment() + + deploy.Spec.Paused = true + + if deploy.Annotations == nil { + deploy.Annotations = make(map[string]string) + } + deploy.Annotations[h.cfg.Annotations.PausedAt] = time.Now().UTC().Format(time.RFC3339) + + return nil +} + +// CheckPauseExpired checks if the pause period has expired for a deployment. +func (h *PauseHandler) CheckPauseExpired(deploy *appsv1.Deployment) (expired bool, remainingTime time.Duration, err error) { + annotations := deploy.GetAnnotations() + if annotations == nil { + return false, 0, fmt.Errorf("no annotations on deployment") + } + + pausePeriodStr := annotations[h.cfg.Annotations.PausePeriod] + if pausePeriodStr == "" { + return false, 0, fmt.Errorf("no pause period annotation") + } + + pausedAtStr := annotations[h.cfg.Annotations.PausedAt] + if pausedAtStr == "" { + return false, 0, fmt.Errorf("no paused-at annotation") + } + + pausePeriod, err := time.ParseDuration(pausePeriodStr) + if err != nil { + return false, 0, fmt.Errorf("invalid pause period %q: %w", pausePeriodStr, err) + } + + pausedAt, err := time.Parse(time.RFC3339, pausedAtStr) + if err != nil { + return false, 0, fmt.Errorf("invalid paused-at time %q: %w", pausedAtStr, err) + } + + elapsed := time.Since(pausedAt) + if elapsed >= pausePeriod { + return true, 0, nil + } + + return false, pausePeriod - elapsed, nil +} + +// ClearPause removes the pause from a deployment. +func (h *PauseHandler) ClearPause(deploy *appsv1.Deployment) { + deploy.Spec.Paused = false + delete(deploy.Annotations, h.cfg.Annotations.PausedAt) +} + +// IsPausedByReloader checks if a deployment was paused by Reloader. +func (h *PauseHandler) IsPausedByReloader(deploy *appsv1.Deployment) bool { + if !deploy.Spec.Paused { + return false + } + + annotations := deploy.GetAnnotations() + if annotations == nil { + return false + } + + _, hasPausedAt := annotations[h.cfg.Annotations.PausedAt] + _, hasPausePeriod := annotations[h.cfg.Annotations.PausePeriod] + + return hasPausedAt && hasPausePeriod +} diff --git a/internal/pkg/reload/pause_test.go b/internal/pkg/reload/pause_test.go new file mode 100644 index 000000000..1962194d1 --- /dev/null +++ b/internal/pkg/reload/pause_test.go @@ -0,0 +1,328 @@ +package reload + +import ( + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +func TestPauseHandler_ShouldPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + workload workload.Workload + want bool + }{ + { + name: "deployment with pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + want: true, + }, + { + name: "deployment without pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }), + want: false, + }, + { + name: "daemonset with pause period (ignored)", + workload: workload.NewDaemonSetWorkload(&appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := handler.ShouldPause(tt.workload) + if got != tt.want { + t.Errorf("ShouldPause() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPauseHandler_GetPausePeriod(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + workload workload.Workload + wantPeriod time.Duration + wantErr bool + }{ + { + name: "valid pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + wantPeriod: 5 * time.Minute, + wantErr: false, + }, + { + name: "invalid pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "invalid", + }, + }, + }), + wantErr: true, + }, + { + name: "no pause period annotation", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := handler.GetPausePeriod(tt.workload) + if (err != nil) != tt.wantErr { + t.Errorf("GetPausePeriod() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && got != tt.wantPeriod { + t.Errorf("GetPausePeriod() = %v, want %v", got, tt.wantPeriod) + } + }) + } +} + +func TestPauseHandler_ApplyPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deploy", + }, + Spec: appsv1.DeploymentSpec{ + Paused: false, + }, + } + + wl := workload.NewDeploymentWorkload(deploy) + err := handler.ApplyPause(wl) + if err != nil { + t.Fatalf("ApplyPause() error = %v", err) + } + + if !deploy.Spec.Paused { + t.Error("Expected deployment to be paused") + } + + pausedAt := deploy.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } + + // Verify the timestamp is valid + _, err = time.Parse(time.RFC3339, pausedAt) + if err != nil { + t.Errorf("Invalid paused-at timestamp: %v", err) + } +} + +func TestPauseHandler_CheckPauseExpired(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + deploy *appsv1.Deployment + wantExpired bool + wantErr bool + }{ + { + name: "pause expired", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "1ms", + cfg.Annotations.PausedAt: time.Now().Add(-time.Second).UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + wantExpired: true, + wantErr: false, + }, + { + name: "pause not expired", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "1h", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + wantExpired: false, + wantErr: false, + }, + { + name: "no paused-at annotation", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid pause period", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "invalid", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expired, _, err := handler.CheckPauseExpired(tt.deploy) + if (err != nil) != tt.wantErr { + t.Errorf("CheckPauseExpired() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && expired != tt.wantExpired { + t.Errorf("CheckPauseExpired() expired = %v, want %v", expired, tt.wantExpired) + } + }) + } +} + +func TestPauseHandler_ClearPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{ + Paused: true, + }, + } + + handler.ClearPause(deploy) + + if deploy.Spec.Paused { + t.Error("Expected deployment to be unpaused") + } + + if _, exists := deploy.Annotations[cfg.Annotations.PausedAt]; exists { + t.Error("Expected paused-at annotation to be removed") + } + + // Pause period should be preserved (user's config) + if deploy.Annotations[cfg.Annotations.PausePeriod] != "5m" { + t.Error("Expected pause-period annotation to be preserved") + } +} + +func TestPauseHandler_IsPausedByReloader(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + deploy *appsv1.Deployment + want bool + }{ + { + name: "paused by reloader", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: true, + }, + { + name: "paused but not by reloader (no paused-at)", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: false, + }, + { + name: "not paused", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: false}, + }, + want: false, + }, + { + name: "no annotations", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := handler.IsPausedByReloader(tt.deploy) + if got != tt.want { + t.Errorf("IsPausedByReloader() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go new file mode 100644 index 000000000..c866364aa --- /dev/null +++ b/internal/pkg/reload/predicate.go @@ -0,0 +1,159 @@ +package reload + +import ( + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// resourcePredicates returns predicates for filtering resource events. +// The hashFn computes a hash from old and new objects to detect content changes. +func resourcePredicates(cfg *config.Config, hashFn func(old, new client.Object) (string, string, bool)) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return cfg.ReloadOnCreate || cfg.SyncAfterRestart + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldHash, newHash, ok := hashFn(e.ObjectOld, e.ObjectNew) + if !ok { + return false + } + return oldHash != newHash + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} + +// ConfigMapPredicates returns predicates for filtering ConfigMap events. +func ConfigMapPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { + return resourcePredicates( + cfg, func(old, new client.Object) (string, string, bool) { + oldCM, okOld := old.(*corev1.ConfigMap) + newCM, okNew := new.(*corev1.ConfigMap) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashConfigMap(oldCM), hasher.HashConfigMap(newCM), true + }, + ) +} + +// SecretPredicates returns predicates for filtering Secret events. +func SecretPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { + return resourcePredicates( + cfg, func(old, new client.Object) (string, string, bool) { + oldSecret, okOld := old.(*corev1.Secret) + newSecret, okNew := new.(*corev1.Secret) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashSecret(oldSecret), hasher.HashSecret(newSecret), true + }, + ) +} + +// NamespaceChecker defines the interface for checking if a namespace is allowed. +type NamespaceChecker interface { + Contains(name string) bool +} + +// NamespaceFilterPredicate returns a predicate that filters resources by namespace. +func NamespaceFilterPredicate(cfg *config.Config) predicate.Predicate { + return NamespaceFilterPredicateWithCache(cfg, nil) +} + +// NamespaceFilterPredicateWithCache returns a predicate that filters resources by namespace, +// using the provided NamespaceChecker for namespace selector filtering. +func NamespaceFilterPredicateWithCache(cfg *config.Config, nsCache NamespaceChecker) predicate.Predicate { + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + namespace := obj.GetNamespace() + + if cfg.IsNamespaceIgnored(namespace) { + return false + } + + if nsCache != nil && !nsCache.Contains(namespace) { + return false + } + + return true + }, + ) +} + +// LabelSelectorPredicate returns a predicate that filters resources by labels. +func LabelSelectorPredicate(cfg *config.Config) predicate.Predicate { + if len(cfg.ResourceSelectors) == 0 { + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + return true + }, + ) + } + + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + for _, selector := range cfg.ResourceSelectors { + if selector.Matches(LabelsSet(labels)) { + return true + } + } + + return false + }, + ) +} + +// LabelsSet implements the k8s.io/apimachinery/pkg/labels.Labels interface +// for a map[string]string. This allows using label maps with label selectors. +type LabelsSet map[string]string + +// Has returns whether the provided label key exists in the set. +func (ls LabelsSet) Has(key string) bool { + _, ok := ls[key] + return ok +} + +// Get returns the value for the provided label key. +func (ls LabelsSet) Get(key string) string { + return ls[key] +} + +// Lookup returns the value for the provided label key and whether it exists. +func (ls LabelsSet) Lookup(key string) (string, bool) { + value, ok := ls[key] + return value, ok +} + +// IgnoreAnnotationPredicate returns a predicate that filters out resources with the ignore annotation. +func IgnoreAnnotationPredicate(cfg *config.Config) predicate.Predicate { + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return true + } + + return annotations[cfg.Annotations.Ignore] != "true" + }, + ) +} + +// CombinedPredicates combines multiple predicates with AND logic. +func CombinedPredicates(predicates ...predicate.Predicate) predicate.Predicate { + return predicate.And(predicates...) +} diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go new file mode 100644 index 000000000..b6d48340c --- /dev/null +++ b/internal/pkg/reload/predicate_test.go @@ -0,0 +1,936 @@ +package reload + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestNamespaceFilterPredicate_Create(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + eventNamespace string + wantAllow bool + }{ + { + name: "allow non-ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + eventNamespace: "default", + wantAllow: true, + }, + { + name: "block ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + eventNamespace: "kube-system", + wantAllow: false, + }, + { + name: "allow when no namespaces ignored", + ignoredNamespaces: []string{}, + eventNamespace: "kube-system", + wantAllow: true, + }, + { + name: "block multiple ignored namespaces", + ignoredNamespaces: []string{"kube-system", "kube-public", "test-ns"}, + eventNamespace: "test-ns", + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestNamespaceFilterPredicate_Update(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.UpdateEvent{ObjectNew: cm} + if !predicate.Update(e) { + t.Error("Update() should allow non-ignored namespace") + } + + cm.Namespace = "kube-system" + e = event.UpdateEvent{ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should block ignored namespace") + } +} + +func TestNamespaceFilterPredicate_Delete(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.DeleteEvent{Object: cm} + if !predicate.Delete(e) { + t.Error("Delete() should allow non-ignored namespace") + } +} + +func TestNamespaceFilterPredicate_Generic(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.GenericEvent{Object: cm} + if !predicate.Generic(e) { + t.Error("Generic() should allow non-ignored namespace") + } +} + +func TestLabelSelectorPredicate_Create(t *testing.T) { + tests := []struct { + name string + selector string + objectLabels map[string]string + wantAllow bool + }{ + { + name: "match single label", + selector: "app=reloader", + objectLabels: map[string]string{"app": "reloader"}, + wantAllow: true, + }, + { + name: "no match single label", + selector: "app=reloader", + objectLabels: map[string]string{"app": "other"}, + wantAllow: false, + }, + { + name: "match multiple labels", + selector: "app=reloader,env=prod", + objectLabels: map[string]string{"app": "reloader", "env": "prod", "extra": "value"}, + wantAllow: true, + }, + { + name: "partial match fails", + selector: "app=reloader,env=prod", + objectLabels: map[string]string{"app": "reloader"}, + wantAllow: false, + }, + { + name: "empty labels no match", + selector: "app=reloader", + objectLabels: map[string]string{}, + wantAllow: false, + }, + { + name: "nil labels no match", + selector: "app=reloader", + objectLabels: nil, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + selector, err := labels.Parse(tt.selector) + if err != nil { + t.Fatalf("Failed to parse selector: %v", err) + } + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.objectLabels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestLabelSelectorPredicate_NoSelectors(t *testing.T) { + cfg := config.NewDefault() + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"any": "label"}, + }, + } + + e := event.CreateEvent{Object: cm} + if !predicate.Create(e) { + t.Error("Create() should allow all when no selectors configured") + } +} + +func TestLabelSelectorPredicate_MultipleSelectors(t *testing.T) { + cfg := config.NewDefault() + selector1, _ := labels.Parse("app=reloader") + selector2, _ := labels.Parse("type=config") + cfg.ResourceSelectors = []labels.Selector{selector1, selector2} + predicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + labels map[string]string + wantAllow bool + }{ + { + name: "matches first selector", + labels: map[string]string{"app": "reloader"}, + wantAllow: true, + }, + { + name: "matches second selector", + labels: map[string]string{"type": "config"}, + wantAllow: true, + }, + { + name: "matches both selectors", + labels: map[string]string{"app": "reloader", "type": "config"}, + wantAllow: true, + }, + { + name: "matches neither selector", + labels: map[string]string{"other": "value"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestLabelSelectorPredicate_Update(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cmMatching := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + cmNotMatching := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "other"}, + }, + } + + e := event.UpdateEvent{ObjectNew: cmMatching} + if !predicate.Update(e) { + t.Error("Update() should allow matching labels") + } + + e = event.UpdateEvent{ObjectNew: cmNotMatching} + if predicate.Update(e) { + t.Error("Update() should block non-matching labels") + } +} + +func TestLabelSelectorPredicate_Delete(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + e := event.DeleteEvent{Object: cm} + if !predicate.Delete(e) { + t.Error("Delete() should allow matching labels") + } +} + +func TestLabelSelectorPredicate_Generic(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + e := event.GenericEvent{Object: cm} + if !predicate.Generic(e) { + t.Error("Generic() should allow matching labels") + } +} + +func TestCombinedFiltering(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + selector, _ := labels.Parse("managed=true") + cfg.ResourceSelectors = []labels.Selector{selector} + + nsPredicate := NamespaceFilterPredicate(cfg) + labelPredicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + namespace string + labels map[string]string + wantNSAllow bool + wantLabelAllow bool + }{ + { + name: "allowed namespace and matching labels", + namespace: "default", + labels: map[string]string{"managed": "true"}, + wantNSAllow: true, + wantLabelAllow: true, + }, + { + name: "allowed namespace but non-matching labels", + namespace: "default", + labels: map[string]string{"managed": "false"}, + wantNSAllow: true, + wantLabelAllow: false, + }, + { + name: "ignored namespace with matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "true"}, + wantNSAllow: false, + wantLabelAllow: true, + }, + { + name: "ignored namespace and non-matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "false"}, + wantNSAllow: false, + wantLabelAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + + gotNS := nsPredicate.Create(e) + if gotNS != tt.wantNSAllow { + t.Errorf("Namespace predicate Create() = %v, want %v", gotNS, tt.wantNSAllow) + } + + gotLabel := labelPredicate.Create(e) + if gotLabel != tt.wantLabelAllow { + t.Errorf("Label predicate Create() = %v, want %v", gotLabel, tt.wantLabelAllow) + } + + combinedAllow := gotNS && gotLabel + expectedCombined := tt.wantNSAllow && tt.wantLabelAllow + if combinedAllow != expectedCombined { + t.Errorf("Combined allow = %v, want %v", combinedAllow, expectedCombined) + } + }, + ) + } +} + +func TestFilteringWithSecrets(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + nsPredicate := NamespaceFilterPredicate(cfg) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + } + + e := event.CreateEvent{Object: secret} + if !nsPredicate.Create(e) { + t.Error("Should allow secret in non-ignored namespace") + } + + secret.Namespace = "kube-system" + e = event.CreateEvent{Object: secret} + if nsPredicate.Create(e) { + t.Error("Should block secret in ignored namespace") + } +} + +func TestExistsLabelSelector(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("managed") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + labels map[string]string + wantAllow bool + }{ + { + name: "label exists with value true", + labels: map[string]string{"managed": "true"}, + wantAllow: true, + }, + { + name: "label exists with value false", + labels: map[string]string{"managed": "false"}, + wantAllow: true, + }, + { + name: "label exists with empty value", + labels: map[string]string{"managed": ""}, + wantAllow: true, + }, + { + name: "label does not exist", + labels: map[string]string{"other": "value"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +// mockNamespaceChecker implements NamespaceChecker for testing. +type mockNamespaceChecker struct { + allowed map[string]bool +} + +func (m *mockNamespaceChecker) Contains(name string) bool { + return m.allowed[name] +} + +func TestNamespaceFilterPredicateWithCache(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + cacheAllowed map[string]bool + eventNamespace string + wantAllow bool + }{ + { + name: "allowed by cache and not ignored", + ignoredNamespaces: []string{"kube-system"}, + cacheAllowed: map[string]bool{"production": true}, + eventNamespace: "production", + wantAllow: true, + }, + { + name: "blocked by cache", + ignoredNamespaces: []string{}, + cacheAllowed: map[string]bool{"production": true}, + eventNamespace: "staging", + wantAllow: false, + }, + { + name: "blocked by ignore list even if in cache", + ignoredNamespaces: []string{"kube-system"}, + cacheAllowed: map[string]bool{"kube-system": true}, + eventNamespace: "kube-system", + wantAllow: false, + }, + { + name: "ignore list checked before cache", + ignoredNamespaces: []string{"blocked-ns"}, + cacheAllowed: map[string]bool{"blocked-ns": true}, + eventNamespace: "blocked-ns", + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + + cache := &mockNamespaceChecker{allowed: tt.cacheAllowed} + predicate := NamespaceFilterPredicateWithCache(cfg, cache) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestNamespaceFilterPredicateWithCache_NilCache(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + predicate := NamespaceFilterPredicateWithCache(cfg, nil) + + tests := []struct { + namespace string + wantAllow bool + }{ + {"default", true}, + {"production", true}, + {"kube-system", false}, // Should still respect ignore list + } + + for _, tt := range tests { + t.Run( + tt.namespace, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v for namespace %s", got, tt.wantAllow, tt.namespace) + } + }, + ) + } +} + +func TestIgnoreAnnotationPredicate_Create(t *testing.T) { + cfg := config.NewDefault() + predicate := IgnoreAnnotationPredicate(cfg) + + tests := []struct { + name string + annotations map[string]string + wantAllow bool + }{ + { + name: "no annotations", + annotations: nil, + wantAllow: true, + }, + { + name: "empty annotations", + annotations: map[string]string{}, + wantAllow: true, + }, + { + name: "other annotations only", + annotations: map[string]string{"other": "value"}, + wantAllow: true, + }, + { + name: "ignore annotation true", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + { + name: "ignore annotation false", + annotations: map[string]string{cfg.Annotations.Ignore: "false"}, + wantAllow: true, + }, + { + name: "ignore annotation with other value", + annotations: map[string]string{cfg.Annotations.Ignore: "yes"}, + wantAllow: true, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestIgnoreAnnotationPredicate_AllEventTypes(t *testing.T) { + cfg := config.NewDefault() + predicate := IgnoreAnnotationPredicate(cfg) + + ignoredCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ignored-cm", + Namespace: "default", + Annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + }, + } + + allowedCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allowed-cm", + Namespace: "default", + }, + } + + if predicate.Update(event.UpdateEvent{ObjectNew: ignoredCM}) { + t.Error("Update() should block ignored resource") + } + if !predicate.Update(event.UpdateEvent{ObjectNew: allowedCM}) { + t.Error("Update() should allow non-ignored resource") + } + + if predicate.Delete(event.DeleteEvent{Object: ignoredCM}) { + t.Error("Delete() should block ignored resource") + } + if !predicate.Delete(event.DeleteEvent{Object: allowedCM}) { + t.Error("Delete() should allow non-ignored resource") + } + + if predicate.Generic(event.GenericEvent{Object: ignoredCM}) { + t.Error("Generic() should block ignored resource") + } + if !predicate.Generic(event.GenericEvent{Object: allowedCM}) { + t.Error("Generic() should allow non-ignored resource") + } +} + +func TestCombinedPredicates(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + nsPredicate := NamespaceFilterPredicate(cfg) + ignorePredicate := IgnoreAnnotationPredicate(cfg) + + combined := CombinedPredicates(nsPredicate, ignorePredicate) + + tests := []struct { + name string + namespace string + annotations map[string]string + wantAllow bool + }{ + { + name: "both predicates pass", + namespace: "default", + annotations: nil, + wantAllow: true, + }, + { + name: "namespace predicate fails", + namespace: "kube-system", + annotations: nil, + wantAllow: false, + }, + { + name: "ignore predicate fails", + namespace: "default", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + { + name: "both predicates fail", + namespace: "kube-system", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := combined.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) + } +} + +func TestConfigMapPredicates_Update(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + oldCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + newCMSameContent := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + newCMDifferentContent := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value2"}, + } + + e := event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMSameContent} + if predicate.Update(e) { + t.Error("Update() should return false when content is the same") + } + + e = event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMDifferentContent} + if !predicate.Update(e) { + t.Error("Update() should return true when content changed") + } +} + +func TestConfigMapPredicates_InvalidTypes(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: secret, ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should return false for mismatched types") + } + + e = event.UpdateEvent{ObjectOld: secret, ObjectNew: secret} + if predicate.Update(e) { + t.Error("Update() should return false for non-ConfigMap types") + } +} + +func TestConfigMapPredicates_CreateDeleteGeneric(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = true + cfg.ReloadOnDelete = true + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + if !predicate.Create(event.CreateEvent{Object: cm}) { + t.Error("Create() should return true when ReloadOnCreate is true") + } + + if !predicate.Delete(event.DeleteEvent{Object: cm}) { + t.Error("Delete() should return true when ReloadOnDelete is true") + } + + if predicate.Generic(event.GenericEvent{Object: cm}) { + t.Error("Generic() should always return false") + } +} + +func TestSecretPredicates_Update(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := SecretPredicates(cfg, hasher) + + oldSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value1")}, + } + newSecretSameContent := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value1")}, + } + newSecretDifferentContent := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value2")}, + } + + e := event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretSameContent} + if predicate.Update(e) { + t.Error("Update() should return false when content is the same") + } + + e = event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretDifferentContent} + if !predicate.Update(e) { + t.Error("Update() should return true when content changed") + } +} + +func TestSecretPredicates_InvalidTypes(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := SecretPredicates(cfg, hasher) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: secret} + if predicate.Update(e) { + t.Error("Update() should return false for mismatched types") + } + + e = event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should return false for non-Secret types") + } +} + +func TestLabelsSet(t *testing.T) { + ls := LabelsSet{"app": "test", "env": "prod"} + + if !ls.Has("app") { + t.Error("Has(app) should return true") + } + if ls.Has("nonexistent") { + t.Error("Has(nonexistent) should return false") + } + + if ls.Get("app") != "test" { + t.Errorf("Get(app) = %v, want test", ls.Get("app")) + } + if ls.Get("env") != "prod" { + t.Errorf("Get(env) = %v, want prod", ls.Get("env")) + } + if ls.Get("nonexistent") != "" { + t.Errorf("Get(nonexistent) = %v, want empty string", ls.Get("nonexistent")) + } +} diff --git a/internal/pkg/reload/resource_type.go b/internal/pkg/reload/resource_type.go new file mode 100644 index 000000000..0404e815f --- /dev/null +++ b/internal/pkg/reload/resource_type.go @@ -0,0 +1,23 @@ +package reload + +// ResourceType represents the type of Kubernetes resource. +type ResourceType string + +const ( + // ResourceTypeConfigMap represents a ConfigMap resource. + ResourceTypeConfigMap ResourceType = "configmap" + // ResourceTypeSecret represents a Secret resource. + ResourceTypeSecret ResourceType = "secret" +) + +// Kind returns the capitalized Kubernetes Kind (e.g., "ConfigMap", "Secret"). +func (r ResourceType) Kind() string { + switch r { + case ResourceTypeConfigMap: + return "ConfigMap" + case ResourceTypeSecret: + return "Secret" + default: + return string(r) + } +} diff --git a/internal/pkg/reload/resource_type_test.go b/internal/pkg/reload/resource_type_test.go new file mode 100644 index 000000000..e577e82b6 --- /dev/null +++ b/internal/pkg/reload/resource_type_test.go @@ -0,0 +1,28 @@ +package reload + +import ( + "testing" +) + +func TestResourceType_Kind(t *testing.T) { + tests := []struct { + resourceType ResourceType + want string + }{ + {ResourceTypeConfigMap, "ConfigMap"}, + {ResourceTypeSecret, "Secret"}, + {ResourceType("unknown"), "unknown"}, + {ResourceType("custom"), "custom"}, + } + + for _, tt := range tests { + t.Run( + string(tt.resourceType), func(t *testing.T) { + got := tt.resourceType.Kind() + if got != tt.want { + t.Errorf("ResourceType(%q).Kind() = %v, want %v", tt.resourceType, got, tt.want) + } + }, + ) + } +} diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go new file mode 100644 index 000000000..076cf786e --- /dev/null +++ b/internal/pkg/reload/service.go @@ -0,0 +1,320 @@ +package reload + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// Service orchestrates the reload logic for ConfigMaps and Secrets. +type Service struct { + cfg *config.Config + log logr.Logger + hasher *Hasher + matcher *Matcher + strategy Strategy +} + +// NewService creates a new reload Service with the given configuration. +func NewService(cfg *config.Config, log logr.Logger) *Service { + return &Service{ + cfg: cfg, + log: log, + hasher: NewHasher(), + matcher: NewMatcher(cfg), + strategy: NewStrategy(cfg), + } +} + +// Process evaluates all workloads to determine which should be reloaded. +func (s *Service) Process(change ResourceChange, workloads []workload.Workload) []ReloadDecision { + if change.IsNil() { + return nil + } + + if !s.shouldProcessEvent(change.GetEventType()) { + return nil + } + + hash := change.ComputeHash(s.hasher) + if change.GetEventType() == EventTypeDelete { + hash = s.hasher.EmptyHash() + } + + return s.processResource( + change.GetName(), + change.GetNamespace(), + change.GetAnnotations(), + change.GetResourceType(), + hash, + workloads, + ) +} + +func (s *Service) processResource( + resourceName string, + resourceNamespace string, + resourceAnnotations map[string]string, + resourceType ResourceType, + hash string, + workloads []workload.Workload, +) []ReloadDecision { + var decisions []ReloadDecision + + for _, wl := range workloads { + if wl.GetNamespace() != resourceNamespace { + continue + } + + if s.cfg.IsWorkloadIgnored(string(wl.Kind())) { + continue + } + + var usesResource bool + switch resourceType { + case ResourceTypeConfigMap: + usesResource = wl.UsesConfigMap(resourceName) + case ResourceTypeSecret: + usesResource = wl.UsesSecret(resourceName) + } + + input := MatchInput{ + ResourceName: resourceName, + ResourceNamespace: resourceNamespace, + ResourceType: resourceType, + ResourceAnnotations: resourceAnnotations, + WorkloadAnnotations: wl.GetAnnotations(), + PodAnnotations: wl.GetPodTemplateAnnotations(), + } + + matchResult := s.matcher.ShouldReload(input) + + shouldReload := matchResult.ShouldReload + if matchResult.AutoReload && !usesResource { + shouldReload = false + } + + decisions = append( + decisions, ReloadDecision{ + Workload: wl, + ShouldReload: shouldReload, + AutoReload: matchResult.AutoReload, + Reason: matchResult.Reason, + Hash: hash, + }, + ) + } + + return decisions +} + +func (s *Service) shouldProcessEvent(eventType EventType) bool { + switch eventType { + case EventTypeCreate: + return s.cfg.ReloadOnCreate + case EventTypeDelete: + return s.cfg.ReloadOnDelete + case EventTypeUpdate: + return true + default: + return false + } +} + +// ApplyReload applies the reload strategy to a workload. +func (s *Service) ApplyReload( + ctx context.Context, + wl workload.Workload, + resourceName string, + resourceType ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + container := s.findTargetContainer(wl, resourceName, resourceType, autoReload) + + input := StrategyInput{ + ResourceName: resourceName, + ResourceType: resourceType, + Namespace: namespace, + Hash: hash, + Container: container, + PodAnnotations: wl.GetPodTemplateAnnotations(), + AutoReload: autoReload, + } + + updated, err := s.strategy.Apply(input) + if err != nil { + return false, err + } + + if updated { + // Attribution annotation is informational; log errors but don't fail reloads + if err := s.setAttributionAnnotation(wl, resourceName, resourceType, namespace, hash, container); err != nil { + s.log.V(1).Info("failed to set attribution annotation", "error", err, "workload", wl.GetName()) + } + } + + return updated, nil +} + +func (s *Service) setAttributionAnnotation( + wl workload.Workload, + resourceName string, + resourceType ResourceType, + namespace string, + hash string, + container *corev1.Container, +) error { + containerName := "" + if container != nil { + containerName = container.Name + } + + source := ReloadSource{ + Kind: string(resourceType), + Name: resourceName, + Namespace: namespace, + Hash: hash, + Containers: []string{containerName}, + ReloadedAt: time.Now().UTC(), + } + + sourceJSON, err := json.Marshal(source) + if err != nil { + return fmt.Errorf("failed to marshal reload source: %w", err) + } + + wl.SetPodTemplateAnnotation(s.cfg.Annotations.LastReloadedFrom, string(sourceJSON)) + return nil +} + +func (s *Service) findTargetContainer( + wl workload.Workload, + resourceName string, + resourceType ResourceType, + autoReload bool, +) *corev1.Container { + containers := wl.GetContainers() + if len(containers) == 0 { + return nil + } + + if !autoReload { + return &containers[0] + } + + volumes := wl.GetVolumes() + initContainers := wl.GetInitContainers() + + volumeName := s.findVolumeUsingResource(volumes, resourceName, resourceType) + if volumeName != "" { + container := s.findContainerWithVolumeMount(containers, volumeName) + if container != nil { + return container + } + container = s.findContainerWithVolumeMount(initContainers, volumeName) + if container != nil { + return &containers[0] + } + } + + container := s.findContainerWithEnvRef(containers, resourceName, resourceType) + if container != nil { + return container + } + + container = s.findContainerWithEnvRef(initContainers, resourceName, resourceType) + if container != nil { + return &containers[0] + } + + return &containers[0] +} + +func (s *Service) findVolumeUsingResource(volumes []corev1.Volume, resourceName string, resourceType ResourceType) string { + for _, vol := range volumes { + switch resourceType { + case ResourceTypeConfigMap: + if vol.ConfigMap != nil && vol.ConfigMap.Name == resourceName { + return vol.Name + } + if vol.Projected != nil { + for _, src := range vol.Projected.Sources { + if src.ConfigMap != nil && src.ConfigMap.Name == resourceName { + return vol.Name + } + } + } + case ResourceTypeSecret: + if vol.Secret != nil && vol.Secret.SecretName == resourceName { + return vol.Name + } + if vol.Projected != nil { + for _, src := range vol.Projected.Sources { + if src.Secret != nil && src.Secret.Name == resourceName { + return vol.Name + } + } + } + } + } + return "" +} + +func (s *Service) findContainerWithVolumeMount(containers []corev1.Container, volumeName string) *corev1.Container { + for i := range containers { + for _, mount := range containers[i].VolumeMounts { + if mount.Name == volumeName { + return &containers[i] + } + } + } + return nil +} + +func (s *Service) findContainerWithEnvRef(containers []corev1.Container, resourceName string, resourceType ResourceType) *corev1.Container { + for i := range containers { + for _, env := range containers[i].Env { + if env.ValueFrom == nil { + continue + } + switch resourceType { + case ResourceTypeConfigMap: + if env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == resourceName { + return &containers[i] + } + case ResourceTypeSecret: + if env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == resourceName { + return &containers[i] + } + } + } + + for _, envFrom := range containers[i].EnvFrom { + switch resourceType { + case ResourceTypeConfigMap: + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == resourceName { + return &containers[i] + } + case ResourceTypeSecret: + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == resourceName { + return &containers[i] + } + } + } + } + return nil +} + +// Hasher returns the hasher used by this service. +func (s *Service) Hasher() *Hasher { + return s.hasher +} diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go new file mode 100644 index 000000000..24356b1e6 --- /dev/null +++ b/internal/pkg/reload/service_test.go @@ -0,0 +1,1361 @@ +package reload + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" + "github.com/stakater/Reloader/internal/pkg/workload" +) + +func TestService_ProcessConfigMap_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Create a deployment with auto annotation that uses the configmap + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true") + } + + if !decisions[0].AutoReload { + t.Error("Expected AutoReload to be true") + } + + if decisions[0].Hash == "" { + t.Error("Expected Hash to be non-empty") + } +} + +func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true for explicit annotation") + } + + if decisions[0].AutoReload { + t.Error("Expected AutoReload to be false for explicit annotation") + } +} + +func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Create a deployment with auto annotation + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + // ConfigMap with ignore annotation + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/ignore": "true", + }, + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + // Should still get a decision, but ShouldReload should be false + for _, d := range decisions { + if d.ShouldReload { + t.Error("Expected ShouldReload to be false for ignored resource") + } + } +} + +func TestService_ProcessSecret_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Create a deployment with auto annotation that uses the secret + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secret", + }, + }, + }, + } + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "key": []byte("value"), + }, + } + + change := SecretChange{ + Secret: secret, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true") + } + + if !decisions[0].AutoReload { + t.Error("Expected AutoReload to be true") + } +} + +func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + svc := NewService(cfg, testr.New(t)) + + // Create a deployment with explicit configmap annotation + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeDelete, + } + + decisions := svc.Process(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true for delete event") + } + + // Hash should be empty for delete events + if decisions[0].Hash != "" { + t.Errorf("Expected empty hash for delete event, got %s", decisions[0].Hash) + } +} + +func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = false // Disabled by default + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeDelete, + } + + decisions := svc.Process(change, workloads) + + // Should return nil when delete events are disabled + if decisions != nil { + t.Error("Expected nil decisions when delete events are disabled") + } +} + +func TestService_ApplyReload_EnvVarStrategy(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test-deploy", "default", nil) + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true") + } + + // Verify env var was added + containers := accessor.GetContainers() + if len(containers) == 0 { + t.Fatal("No containers found") + } + + found := false + for _, env := range containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" && env.Value == "abc123hash" { + found = true + break + } + } + + if !found { + t.Error("Expected env var STAKATER_TEST_CM_CONFIGMAP to be set") + } + + // Verify attribution annotation was set + annotations := accessor.GetPodTemplateAnnotations() + if annotations["reloader.stakater.com/last-reloaded-from"] == "" { + t.Error("Expected last-reloaded-from annotation to be set") + } +} + +func TestService_ApplyReload_AnnotationStrategy(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test-deploy", "default", nil) + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true") + } + + // Verify annotation was added + annotations := accessor.GetPodTemplateAnnotations() + if annotations["reloader.stakater.com/last-reloaded-from"] == "" { + t.Error("Expected last-reloaded-from annotation to be set") + } +} + +func TestService_ApplyReload_EnvVarDeletion(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test-deploy", "default", nil) + // Pre-add an env var + deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "oldhash"}, + {Name: "OTHER_VAR", Value: "keep"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + // Empty hash signals deletion + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true for env var removal") + } + + // Verify env var was removed + containers := accessor.GetContainers() + for _, env := range containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" { + t.Error("Expected env var STAKATER_TEST_CM_CONFIGMAP to be removed") + } + } + + // Verify other env var was kept + found := false + for _, env := range containers[0].Env { + if env.Name == "OTHER_VAR" { + found = true + break + } + } + if !found { + t.Error("Expected OTHER_VAR to be kept") + } +} + +func TestService_ApplyReload_NoChangeIfSameHash(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test-deploy", "default", nil) + // Pre-add env var with same hash + deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "abc123hash"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if updated { + t.Error("Expected updated to be false when hash is unchanged") + } +} + +func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Create multiple workloads + deploy1 := testutil.NewDeployment( + "deploy1", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "shared-cm", + }, + }, + }, + }, + } + + deploy2 := testutil.NewDeployment( + "deploy2", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "shared-cm", + }, + }, + }, + }, + } + + // Deploy3 doesn't use the configmap + deploy3 := testutil.NewDeployment( + "deploy3", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy1), + workload.NewDeploymentWorkload(deploy2), + workload.NewDeploymentWorkload(deploy3), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shared-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + if len(decisions) != 3 { + t.Fatalf("Expected 3 decisions, got %d", len(decisions)) + } + + // Count how many should reload + reloadCount := 0 + for _, d := range decisions { + if d.ShouldReload { + reloadCount++ + } + } + + // Only deploy1 and deploy2 should reload (they use the configmap) + if reloadCount != 2 { + t.Errorf("Expected 2 workloads to reload, got %d", reloadCount) + } +} + +func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Create deployments in different namespaces + deploy1 := testutil.NewDeployment( + "deploy1", "namespace-a", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + deploy2 := testutil.NewDeployment( + "deploy2", "namespace-b", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.Workload{ + workload.NewDeploymentWorkload(deploy1), + workload.NewDeploymentWorkload(deploy2), + } + + // ConfigMap in namespace-a + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "namespace-a", + }, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + + // Should only affect deploy1 (same namespace) + reloadCount := 0 + for _, d := range decisions { + if d.ShouldReload { + reloadCount++ + } + } + + if reloadCount != 1 { + t.Errorf("Expected 1 workload to reload (same namespace), got %d", reloadCount) + } +} + +func TestService_Hasher(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + hasher := svc.Hasher() + if hasher == nil { + t.Fatal("Expected Hasher to return non-nil hasher") + } + + // Verify it's functional + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string]string{"key": "value"}, + } + hash := hasher.HashConfigMap(cm) + if hash == "" { + t.Error("Expected hasher to produce non-empty hash") + } +} + +func TestService_shouldProcessEvent(t *testing.T) { + tests := []struct { + name string + reloadOnCreate bool + reloadOnDelete bool + eventType EventType + expected bool + }{ + {"create enabled", true, false, EventTypeCreate, true}, + {"create disabled", false, false, EventTypeCreate, false}, + {"delete enabled", false, true, EventTypeDelete, true}, + {"delete disabled", false, false, EventTypeDelete, false}, + {"update always true", false, false, EventTypeUpdate, true}, + {"unknown event", false, false, EventType("unknown"), false}, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = tt.reloadOnCreate + cfg.ReloadOnDelete = tt.reloadOnDelete + svc := NewService(cfg, testr.New(t)) + + result := svc.shouldProcessEvent(tt.eventType) + if result != tt.expected { + t.Errorf("shouldProcessEvent(%s) = %v, want %v", tt.eventType, result, tt.expected) + } + }, + ) + } +} + +func TestService_findVolumeUsingResource_ConfigMap(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + tests := []struct { + name string + volumes []corev1.Volume + resourceName string + resourceType ResourceType + wantVolume string + }{ + { + name: "direct configmap volume", + volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + }, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "config-vol", + }, + { + name: "projected configmap volume", + volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "projected-cm"}, + }, + }, + }, + }, + }, + }, + }, + resourceName: "projected-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "projected-vol", + }, + { + name: "no match", + volumes: []corev1.Volume{ + { + Name: "other-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "other-cm"}, + }, + }, + }, + }, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "", + }, + { + name: "empty volumes", + volumes: []corev1.Volume{}, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "", + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, tt.resourceType) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }, + ) + } +} + +func TestService_findVolumeUsingResource_Secret(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + tests := []struct { + name string + volumes []corev1.Volume + resourceName string + wantVolume string + }{ + { + name: "direct secret volume", + volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + resourceName: "my-secret", + wantVolume: "secret-vol", + }, + { + name: "projected secret volume", + volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "projected-secret"}, + }, + }, + }, + }, + }, + }, + }, + resourceName: "projected-secret", + wantVolume: "projected-vol", + }, + { + name: "no match", + volumes: []corev1.Volume{ + { + Name: "other-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "other-secret", + }, + }, + }, + }, + resourceName: "my-secret", + wantVolume: "", + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, ResourceTypeSecret) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }, + ) + } +} + +func TestService_findContainerWithVolumeMount(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + tests := []struct { + name string + containers []corev1.Container + volumeName string + wantName string + shouldMatch bool + }{ + { + name: "container with matching volume mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + }, + volumeName: "config-vol", + wantName: "container1", + shouldMatch: true, + }, + { + name: "second container with matching mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{}, + }, + { + Name: "container2", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + }, + volumeName: "config-vol", + wantName: "container2", + shouldMatch: true, + }, + { + name: "no matching mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{ + {Name: "other-vol", MountPath: "/other"}, + }, + }, + }, + volumeName: "config-vol", + shouldMatch: false, + }, + { + name: "empty containers", + containers: []corev1.Container{}, + volumeName: "config-vol", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithVolumeMount(tt.containers, tt.volumeName) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithVolumeMount() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }, + ) + } +} + +func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + tests := []struct { + name string + containers []corev1.Container + resourceName string + wantName string + shouldMatch bool + }{ + { + name: "container with ConfigMapKeyRef", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-cm", + wantName: "app", + shouldMatch: true, + }, + { + name: "container with ConfigMapRef in EnvFrom", + containers: []corev1.Container{ + { + Name: "app", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + }, + }, + resourceName: "my-cm", + wantName: "app", + shouldMatch: true, + }, + { + name: "no matching env ref", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SIMPLE_VAR", + Value: "value", + }, + }, + }, + }, + resourceName: "my-cm", + shouldMatch: false, + }, + { + name: "env without ValueFrom", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + {Name: "VAR1", Value: "val"}, + }, + }, + }, + resourceName: "my-cm", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeConfigMap) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }, + ) + } +} + +func TestService_findContainerWithEnvRef_Secret(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + tests := []struct { + name string + containers []corev1.Container + resourceName string + wantName string + shouldMatch bool + }{ + { + name: "container with SecretKeyRef", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}, + Key: "password", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + wantName: "app", + shouldMatch: true, + }, + { + name: "container with SecretRef in EnvFrom", + containers: []corev1.Container{ + { + Name: "app", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}, + }, + }, + }, + }, + }, + resourceName: "my-secret", + wantName: "app", + shouldMatch: true, + }, + { + name: "no matching env ref", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SIMPLE_VAR", + Value: "value", + }, + }, + }, + }, + resourceName: "my-secret", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeSecret) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }, + ) + } +} + +func TestService_findTargetContainer_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Test with autoReload=true and volume mount + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_EnvRef(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Test with autoReload=true and env ref (no volume) + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "sidecar", + Image: "busybox", + }, + { + Name: "app", + Image: "nginx", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VAL", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_InitContainer(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Test with autoReload=true where init container uses the volume + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + } + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + // Should return first main container when init container uses the volume + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_InitContainerEnvRef(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // Test with autoReload=true where init container has env ref + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + Image: "busybox", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VAL", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + // Should return first main container when init container has the env ref + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_NoContainers(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{} + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, false) + if container != nil { + t.Error("Expected nil container for empty container list") + } +} + +func TestService_findTargetContainer_NonAutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "first", Image: "nginx"}, + {Name: "second", Image: "busybox"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + // Without autoReload, should return first container + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, false) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "first" { + t.Errorf("Expected first container, got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_FallbackToFirst(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + // autoReload=true but no matching volume or env ref - should fallback to first container + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "first", Image: "nginx"}, + {Name: "second", Image: "busybox"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "non-existent", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "first" { + t.Errorf("Expected first container as fallback, got %q", container.Name) + } +} + +func TestService_ProcessNilChange(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment("test", "default", nil) + workloads := []workload.Workload{workload.NewDeploymentWorkload(deploy)} + + // Test with nil ConfigMap + change := ConfigMapChange{ + ConfigMap: nil, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + if decisions != nil { + t.Errorf("Expected nil decisions for nil change, got %v", decisions) + } +} + +func TestService_ProcessCreateEventDisabled(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = false + svc := NewService(cfg, testr.New(t)) + + deploy := testutil.NewDeployment( + "test", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + workloads := []workload.Workload{workload.NewDeploymentWorkload(deploy)} + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeCreate, + } + + decisions := svc.Process(change, workloads) + if decisions != nil { + t.Errorf("Expected nil decisions when create events disabled, got %v", decisions) + } +} diff --git a/internal/pkg/reload/strategy.go b/internal/pkg/reload/strategy.go new file mode 100644 index 000000000..4386bf4b6 --- /dev/null +++ b/internal/pkg/reload/strategy.go @@ -0,0 +1,194 @@ +package reload + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +const ( + // EnvVarPrefix is the prefix for environment variables added by Reloader. + EnvVarPrefix = "STAKATER_" + // ConfigmapEnvVarPostfix is the postfix for ConfigMap environment variables. + ConfigmapEnvVarPostfix = "CONFIGMAP" + // SecretEnvVarPostfix is the postfix for Secret environment variables. + SecretEnvVarPostfix = "SECRET" +) + +// Strategy defines how workload restarts are triggered. +type Strategy interface { + Apply(input StrategyInput) (bool, error) + Name() string +} + +// StrategyInput contains the information needed to apply a reload strategy. +type StrategyInput struct { + ResourceName string + ResourceType ResourceType + Namespace string + Hash string + Container *corev1.Container + PodAnnotations map[string]string + AutoReload bool +} + +// ReloadSource contains metadata about what triggered a reload. +type ReloadSource struct { + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Hash string `json:"hash"` + Containers []string `json:"containers"` + ReloadedAt time.Time `json:"reloadedAt"` +} + +// EnvVarStrategy triggers reloads by adding/updating environment variables. +type EnvVarStrategy struct{} + +// NewEnvVarStrategy creates a new EnvVarStrategy. +func NewEnvVarStrategy() *EnvVarStrategy { + return &EnvVarStrategy{} +} + +func (s *EnvVarStrategy) Name() string { + return string(config.ReloadStrategyEnvVars) +} + +// Apply adds, updates, or removes an environment variable to trigger a restart. +func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { + if input.Container == nil { + return false, fmt.Errorf("container is required for env-var strategy") + } + + envVarName := s.envVarName(input.ResourceName, input.ResourceType) + + if input.Hash == "" { + return s.removeEnvVar(input.Container, envVarName), nil + } + + for i := range input.Container.Env { + if input.Container.Env[i].Name == envVarName { + if input.Container.Env[i].Value == input.Hash { + return false, nil + } + input.Container.Env[i].Value = input.Hash + return true, nil + } + } + + input.Container.Env = append(input.Container.Env, corev1.EnvVar{ + Name: envVarName, + Value: input.Hash, + }) + + return true, nil +} + +func (s *EnvVarStrategy) removeEnvVar(container *corev1.Container, name string) bool { + for i := range container.Env { + if container.Env[i].Name == name { + container.Env[i] = container.Env[len(container.Env)-1] + container.Env = container.Env[:len(container.Env)-1] + return true + } + } + return false +} + +func (s *EnvVarStrategy) envVarName(resourceName string, resourceType ResourceType) string { + var postfix string + switch resourceType { + case ResourceTypeConfigMap: + postfix = ConfigmapEnvVarPostfix + case ResourceTypeSecret: + postfix = SecretEnvVarPostfix + } + return EnvVarPrefix + convertToEnvVarName(resourceName) + "_" + postfix +} + +func convertToEnvVarName(text string) string { + var buffer bytes.Buffer + upper := strings.ToUpper(text) + lastCharValid := false + + for i := 0; i < len(upper); i++ { + ch := upper[i] + if (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') { + buffer.WriteByte(ch) + lastCharValid = true + } else { + if lastCharValid { + buffer.WriteByte('_') + } + lastCharValid = false + } + } + + return buffer.String() +} + +// AnnotationStrategy triggers reloads by adding/updating pod template annotations. +type AnnotationStrategy struct { + cfg *config.Config +} + +// NewAnnotationStrategy creates a new AnnotationStrategy. +func NewAnnotationStrategy(cfg *config.Config) *AnnotationStrategy { + return &AnnotationStrategy{cfg: cfg} +} + +func (s *AnnotationStrategy) Name() string { + return string(config.ReloadStrategyAnnotations) +} + +// Apply adds or updates a pod annotation to trigger a restart. +func (s *AnnotationStrategy) Apply(input StrategyInput) (bool, error) { + if input.PodAnnotations == nil { + return false, fmt.Errorf("pod annotations map is required for annotation strategy") + } + + containerName := "" + if input.Container != nil { + containerName = input.Container.Name + } + + source := ReloadSource{ + Kind: string(input.ResourceType), + Name: input.ResourceName, + Namespace: input.Namespace, + Hash: input.Hash, + Containers: []string{containerName}, + ReloadedAt: time.Now().UTC(), + } + + sourceJSON, err := json.Marshal(source) + if err != nil { + return false, fmt.Errorf("failed to marshal reload source: %w", err) + } + + annotationKey := s.cfg.Annotations.LastReloadedFrom + existingValue := input.PodAnnotations[annotationKey] + + if existingValue == string(sourceJSON) { + return false, nil + } + + input.PodAnnotations[annotationKey] = string(sourceJSON) + return true, nil +} + +// NewStrategy creates a Strategy based on the configuration. +func NewStrategy(cfg *config.Config) Strategy { + switch cfg.ReloadStrategy { + case config.ReloadStrategyAnnotations: + return NewAnnotationStrategy(cfg) + default: + return NewEnvVarStrategy() + } +} diff --git a/internal/pkg/reload/strategy_test.go b/internal/pkg/reload/strategy_test.go new file mode 100644 index 000000000..3ea4f2458 --- /dev/null +++ b/internal/pkg/reload/strategy_test.go @@ -0,0 +1,293 @@ +package reload + +import ( + "encoding/json" + "testing" + + corev1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestEnvVarStrategy_Apply(t *testing.T) { + strategy := NewEnvVarStrategy() + + t.Run("adds new env var", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{}, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for new env var") + } + + // Verify env var was added + found := false + for _, env := range container.Env { + if env.Name == "STAKATER_MY_CONFIG_CONFIGMAP" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Errorf("expected env var STAKATER_MY_CONFIG_CONFIGMAP=abc123, got %+v", container.Env) + } + }) + + t.Run("updates existing env var", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{ + {Name: "STAKATER_MY_CONFIG_CONFIGMAP", Value: "old-hash"}, + }, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "new-hash", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for updated env var") + } + + // Verify env var was updated + if container.Env[0].Value != "new-hash" { + t.Errorf("expected env var value=new-hash, got %s", container.Env[0].Value) + } + }) + + t.Run("no change when hash is same", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{ + {Name: "STAKATER_MY_CONFIG_CONFIGMAP", Value: "same-hash"}, + }, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "same-hash", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if changed { + t.Error("expected changed=false when hash is unchanged") + } + }) + + t.Run("error when container is nil", func(t *testing.T) { + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: nil, + } + + _, err := strategy.Apply(input) + if err == nil { + t.Error("expected error for nil container") + } + }) + + t.Run("secret env var has correct postfix", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{}, + } + + input := StrategyInput{ + ResourceName: "my-secret", + ResourceType: ResourceTypeSecret, + Namespace: "default", + Hash: "abc123", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true") + } + + // Verify env var name has SECRET postfix + found := false + for _, env := range container.Env { + if env.Name == "STAKATER_MY_SECRET_SECRET" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Errorf("expected env var STAKATER_MY_SECRET_SECRET=abc123, got %+v", container.Env) + } + }) +} + +func TestEnvVarStrategy_EnvVarName(t *testing.T) { + strategy := NewEnvVarStrategy() + + tests := []struct { + resourceName string + resourceType ResourceType + expected string + }{ + {"my-config", ResourceTypeConfigMap, "STAKATER_MY_CONFIG_CONFIGMAP"}, + {"my-secret", ResourceTypeSecret, "STAKATER_MY_SECRET_SECRET"}, + {"app-config-v2", ResourceTypeConfigMap, "STAKATER_APP_CONFIG_V2_CONFIGMAP"}, + {"my.dotted.config", ResourceTypeConfigMap, "STAKATER_MY_DOTTED_CONFIG_CONFIGMAP"}, + {"MyMixedCase", ResourceTypeConfigMap, "STAKATER_MYMIXEDCASE_CONFIGMAP"}, + {"config-with-123-numbers", ResourceTypeConfigMap, "STAKATER_CONFIG_WITH_123_NUMBERS_CONFIGMAP"}, + } + + for _, tt := range tests { + t.Run(tt.resourceName, func(t *testing.T) { + got := strategy.envVarName(tt.resourceName, tt.resourceType) + if got != tt.expected { + t.Errorf("envVarName(%q, %q) = %q, want %q", + tt.resourceName, tt.resourceType, got, tt.expected) + } + }) + } +} + +func TestConvertToEnvVarName(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"my-config", "MY_CONFIG"}, + {"my.config", "MY_CONFIG"}, + {"my_config", "MY_CONFIG"}, + {"MY-CONFIG", "MY_CONFIG"}, + {"config123", "CONFIG123"}, + {"123config", "123CONFIG"}, + {"my--config", "MY_CONFIG"}, + {"my..config", "MY_CONFIG"}, + {"", ""}, + {"-leading-dash", "LEADING_DASH"}, + {"trailing-dash-", "TRAILING_DASH_"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertToEnvVarName(tt.input) + if got != tt.expected { + t.Errorf("convertToEnvVarName(%q) = %q, want %q", tt.input, got, tt.expected) + } + }) + } +} + +func TestAnnotationStrategy_Apply(t *testing.T) { + cfg := config.NewDefault() + strategy := NewAnnotationStrategy(cfg) + + t.Run("adds new annotation", func(t *testing.T) { + annotations := make(map[string]string) + container := &corev1.Container{Name: "test-container"} + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: container, + PodAnnotations: annotations, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for new annotation") + } + + // Verify annotation was added + annotationValue := annotations[cfg.Annotations.LastReloadedFrom] + if annotationValue == "" { + t.Error("expected annotation to be set") + } + + // Verify annotation content + var source ReloadSource + if err := json.Unmarshal([]byte(annotationValue), &source); err != nil { + t.Fatalf("failed to unmarshal annotation: %v", err) + } + if source.Kind != string(ResourceTypeConfigMap) { + t.Errorf("expected kind=%s, got %s", ResourceTypeConfigMap, source.Kind) + } + if source.Name != "my-config" { + t.Errorf("expected name=my-config, got %s", source.Name) + } + if source.Hash != "abc123" { + t.Errorf("expected hash=abc123, got %s", source.Hash) + } + }) + + t.Run("error when annotations map is nil", func(t *testing.T) { + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + PodAnnotations: nil, + } + + _, err := strategy.Apply(input) + if err == nil { + t.Error("expected error for nil annotations map") + } + }) +} + +func TestNewStrategy(t *testing.T) { + t.Run("default strategy is env-vars", func(t *testing.T) { + cfg := config.NewDefault() + strategy := NewStrategy(cfg) + + if strategy.Name() != string(config.ReloadStrategyEnvVars) { + t.Errorf("expected env-vars strategy, got %s", strategy.Name()) + } + }) + + t.Run("annotations strategy when configured", func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + strategy := NewStrategy(cfg) + + if strategy.Name() != string(config.ReloadStrategyAnnotations) { + t.Errorf("expected annotations strategy, got %s", strategy.Name()) + } + }) +} diff --git a/internal/pkg/testutil/fixtures.go b/internal/pkg/testutil/fixtures.go new file mode 100644 index 000000000..6ba8587a7 --- /dev/null +++ b/internal/pkg/testutil/fixtures.go @@ -0,0 +1,388 @@ +package testutil + +import ( + openshiftv1 "github.com/openshift/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDeploymentConfig creates a minimal DeploymentConfig for unit testing. +func NewDeploymentConfig(name, namespace string, annotations map[string]string) *openshiftv1.DeploymentConfig { + replicas := int32(1) + return &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: openshiftv1.DeploymentConfigSpec{ + Replicas: replicas, + Selector: map[string]string{"app": name}, + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// NewDeploymentConfigWithEnvFrom creates a DeploymentConfig with EnvFrom referencing a ConfigMap or Secret. +func NewDeploymentConfigWithEnvFrom(name, namespace string, configMapName, secretName string) *openshiftv1.DeploymentConfig { + dc := NewDeploymentConfig(name, namespace, nil) + if configMapName != "" { + dc.Spec.Template.Spec.Containers[0].EnvFrom = append( + dc.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + dc.Spec.Template.Spec.Containers[0].EnvFrom = append( + dc.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + return dc +} + +// NewScheme creates a scheme with common types for testing. +func NewScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = batchv1.AddToScheme(scheme) + _ = openshiftv1.AddToScheme(scheme) + return scheme +} + +// NewDeployment creates a minimal Deployment for unit testing. +func NewDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// NewDeploymentWithEnvFrom creates a Deployment with EnvFrom referencing a ConfigMap or Secret. +func NewDeploymentWithEnvFrom(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + if configMapName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + return d +} + +// NewDeploymentWithVolume creates a Deployment with a volume from ConfigMap or Secret. +func NewDeploymentWithVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } + + if configMapName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + }, + } + } + if secretName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }, + } + } + return d +} + +// NewDeploymentWithProjectedVolume creates a Deployment with a projected volume. +func NewDeploymentWithProjectedVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } + + sources := []corev1.VolumeProjection{} + if configMapName != "" { + sources = append( + sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + sources = append( + sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + }, + }, + } + return d +} + +// NewDaemonSet creates a minimal DaemonSet for unit testing. +func NewDaemonSet(name, namespace string, annotations map[string]string) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// NewStatefulSet creates a minimal StatefulSet for unit testing. +func NewStatefulSet(name, namespace string, annotations map[string]string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// NewJob creates a minimal Job for unit testing. +func NewJob(name, namespace string) *batchv1.Job { + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, + }, + }, + }, + } +} + +// NewJobWithAnnotations creates a Job with annotations. +func NewJobWithAnnotations(name, namespace string, annotations map[string]string) *batchv1.Job { + job := NewJob(name, namespace) + job.Annotations = annotations + return job +} + +// NewCronJob creates a minimal CronJob for unit testing. +func NewCronJob(name, namespace string) *batchv1.CronJob { + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: "test-uid", + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*/5 * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, + }, + }, + }, + }, + }, + } +} + +// NewCronJobWithAnnotations creates a CronJob with annotations. +func NewCronJobWithAnnotations(name, namespace string, annotations map[string]string) *batchv1.CronJob { + cj := NewCronJob(name, namespace) + cj.Annotations = annotations + return cj +} + +// NewConfigMap creates a ConfigMap for unit testing. +func NewConfigMap(name, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{"key": "value"}, + } +} + +// NewConfigMapWithAnnotations creates a ConfigMap with annotations. +func NewConfigMapWithAnnotations(name, namespace string, annotations map[string]string) *corev1.ConfigMap { + cm := NewConfigMap(name, namespace) + cm.Annotations = annotations + return cm +} + +// NewSecret creates a Secret for unit testing. +func NewSecret(name, namespace string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{"key": []byte("value")}, + } +} + +// NewSecretWithAnnotations creates a Secret with annotations. +func NewSecretWithAnnotations(name, namespace string, annotations map[string]string) *corev1.Secret { + secret := NewSecret(name, namespace) + secret.Annotations = annotations + return secret +} + +// NewNamespace creates a Namespace with optional labels. +func NewNamespace(name string, labels map[string]string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go deleted file mode 100644 index 1ad43e18d..000000000 --- a/internal/pkg/testutil/kube.go +++ /dev/null @@ -1,1231 +0,0 @@ -package testutil - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "sort" - "strconv" - "strings" - "time" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" - openshiftv1 "github.com/openshift/api/apps/v1" - appsclient "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" -) - -var ( - letters = []rune("abcdefghijklmnopqrstuvwxyz") - // ConfigmapResourceType is a resource type which controller watches for changes - ConfigmapResourceType = "configMaps" - // SecretResourceType is a resource type which controller watches for changes - SecretResourceType = "secrets" -) - -var ( - Clients = kube.GetClients() - Pod = "test-reloader-" + RandSeq(5) - Namespace = "test-reloader-" + RandSeq(5) - ConfigmapNamePrefix = "testconfigmap-reloader" - SecretNamePrefix = "testsecret-reloader" - Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - Collectors = metrics.NewCollectors() - SleepDuration = 3 * time.Second -) - -// CreateNamespace creates namespace for testing -func CreateNamespace(namespace string, client kubernetes.Interface) { - _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) - if err != nil { - logrus.Fatalf("Failed to create namespace for testing %v", err) - } else { - logrus.Infof("Creating namespace for testing = %s", namespace) - } -} - -// DeleteNamespace deletes namespace for testing -func DeleteNamespace(namespace string, client kubernetes.Interface) { - err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) - if err != nil { - logrus.Fatalf("Failed to delete namespace that was created for testing %v", err) - } else { - logrus.Infof("Deleting namespace for testing = %s", namespace) - } -} - -func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations), - } -} - -func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string { - annotations := make(map[string]string) - if autoReload { - annotations[options.ReloaderAutoAnnotation] = "true" - } - if secretAutoReload { - annotations[options.SecretReloaderAutoAnnotation] = "true" - } - if configmapAutoReload { - annotations[options.ConfigmapReloaderAutoAnnotation] = "true" - } - - if len(annotations) == 0 { - annotations = map[string]string{ - options.ConfigmapUpdateOnChangeAnnotation: name, - options.SecretUpdateOnChangeAnnotation: name} - } - for k, v := range extraAnnotations { - annotations[k] = v - } - return annotations -} - -func getEnvVarSources(name string) []v1.EnvFromSource { - return []v1.EnvFromSource{ - { - ConfigMapRef: &v1.ConfigMapEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - { - SecretRef: &v1.SecretEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - } -} - -func getVolumes(name string) []v1.Volume { - return []v1.Volume{ - { - Name: "projectedconfigmap", - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: []v1.VolumeProjection{ - { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - { - Name: "projectedsecret", - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: []v1.VolumeProjection{ - { - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - { - Name: "configmap", - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - { - Name: "secret", - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: name, - }, - }, - }, - } -} - -func getVolumeMounts() []v1.VolumeMount { - return []v1.VolumeMount{ - { - MountPath: "etc/config", - Name: "configmap", - }, - { - MountPath: "etc/sec", - Name: "secret", - }, - { - MountPath: "etc/projectedconfig", - Name: "projectedconfigmap", - }, - { - MountPath: "etc/projectedsec", - Name: "projectedsecret", - }, - } -} - -func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - { - Name: "CONFIGMAP_" + util.ConvertToEnvVarName(name), - ValueFrom: &v1.EnvVarSource{ - ConfigMapKeyRef: &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - Key: "test.url", - }, - }, - }, - { - Name: "SECRET_" + util.ConvertToEnvVarName(name), - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - Key: "test.url", - }, - }, - }, - }, - }, - }, - }, - } -} - -func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - EnvFrom: getEnvVarSources(name), - }, - }, - }, - } -} - -func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - VolumeMounts: getVolumeMounts(), - }, - }, - Volumes: getVolumes(name), - }, - } -} - -func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - VolumeMounts: getVolumeMounts(), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - Volumes: getVolumes(name), - }, - } -} - -func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - EnvFrom: getEnvVarSources(name), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - }, - } -} - -// GetDeployment provides deployment for testing -func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -// GetDeploymentConfig provides deployment for testing -func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithVolume, - }, - } -} - -// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts -func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainer(deploymentName), - }, - } -} - -// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource -func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName), - }, - } -} - -func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(deploymentName), - }, - } -} - -func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithEnvVars, - }, - } -} - -func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } -} - -func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment { - replicaset := int32(1) - deployment := &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } - if !both { - deployment.Annotations = nil - } - deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{}) - return deployment -} - -func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - var objectMeta metav1.ObjectMeta - switch resourceType { - case SecretResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{}) - case ConfigmapResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{}) - } - - return &appsv1.Deployment{ - ObjectMeta: objectMeta, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - - annotation := map[string]string{} - - switch resourceType { - case SecretResourceType: - annotation[options.SecretExcludeReloaderAnnotation] = deploymentName - case ConfigmapResourceType: - annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName - } - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - Annotations: annotation, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -// GetDaemonSet provides daemonset for testing -func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}), - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(daemonsetName), - }, - } -} - -func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}), - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(daemonSetName), - }, - } -} - -// GetStatefulSet provides statefulset for testing -func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}), - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(statefulsetName), - }, - } -} - -// GetStatefulSet provides statefulset for testing -func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}), - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(statefulsetName), - }, - } -} - -// GetConfigmap provides configmap for testing -func GetConfigmap(namespace string, configmapName string, testData string) *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configmapName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - }, - Data: map[string]string{"test.url": testData}, - } -} - -// GetConfigmapWithUpdatedLabel provides configmap for testing -func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLabel string, testData string) *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configmapName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": testLabel}, - }, - Data: map[string]string{"test.url": testData}, - } -} - -// GetSecret provides secret for testing -func GetSecret(namespace string, secretName string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - -func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob { - return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}), - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", // Run every 5 minutes - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithVolumes(cronJobName), - }, - }, - }, - } -} - -func GetJob(namespace string, jobName string) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}), - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithVolumes(jobName), - }, - } -} - -func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob { - return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}), - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", // Run every 5 minutes - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithEnvVars(cronJobName), - }, - }, - }, - } -} - -func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}), - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithEnvVars(jobName), - }, - } -} - -// GetSecretWithUpdatedLabel provides secret for testing -func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": label}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - -// GetResourceSHAFromEnvVar returns the SHA value of given environment variable -func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string { - for i := range containers { - envs := containers[i].Env - for j := range envs { - if envs[j].Name == envVar { - return envs[j].Value - } - } - } - return "" -} - -// GetResourceSHAFromAnnotation returns the SHA value of given environment variable -func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string { - lastReloadedResourceName := fmt.Sprintf("%s/%s", - constants.ReloaderAnnotationPrefix, - constants.LastReloadedFromAnnotation, - ) - - annotationJson, ok := podAnnotations[lastReloadedResourceName] - if !ok { - return "" - } - - var last common.ReloadSource - bytes := []byte(annotationJson) - err := json.Unmarshal(bytes, &last) - if err != nil { - return "" - } - - return last.Hash -} - -// ConvertResourceToSHA generates SHA from secret or configmap data -func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string { - values := []string{} - switch resourceType { - case SecretResourceType: - secret := GetSecret(namespace, resourceName, data) - for k, v := range secret.Data { - values = append(values, k+"="+string(v[:])) - } - case ConfigmapResourceType: - configmap := GetConfigmap(namespace, resourceName, data) - for k, v := range configmap.Data { - values = append(values, k+"="+v) - } - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -// CreateConfigMap creates a configmap in given namespace and returns the ConfigMapInterface -func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) { - logrus.Infof("Creating configmap") - configmapClient := client.CoreV1().ConfigMaps(namespace) - _, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return configmapClient, err -} - -// CreateSecret creates a secret in given namespace and returns the SecretInterface -func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) { - logrus.Infof("Creating secret") - secretClient := client.CoreV1().Secrets(namespace) - _, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return secretClient, err -} - -// CreateDeployment creates a deployment in given namespace and returns the Deployment -func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeployment(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName) - } - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeployment creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeployment(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName) - } - - for annotationKey, annotationValue := range additionalAnnotations { - deploymentObj.Annotations[annotationKey] = annotationValue - } - - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig -func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) { - logrus.Infof("Creating DeploymentConfig") - deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace) - var deploymentConfigObj *openshiftv1.DeploymentConfig - if volumeMount { - deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName) - } else { - deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName) - } - deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{}) - time.Sleep(5 * time.Second) - return deploymentConfig, err -} - -// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment -func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName) - } - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err - -} - -// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given -// namespace with given annotations. -func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deploymentObj.Annotations = annotations - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithTypedAutoAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithTypedAutoAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithTypedAutoAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithExcludeAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithExcludeAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithExcludeAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - return deployment, err -} - -// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet -func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) { - logrus.Infof("Creating DaemonSet") - daemonsetClient := client.AppsV1().DaemonSets(namespace) - var daemonsetObj *appsv1.DaemonSet - if volumeMount { - daemonsetObj = GetDaemonSet(namespace, daemonsetName) - } else { - daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName) - } - daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return daemonset, err -} - -// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet -func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*appsv1.StatefulSet, error) { - logrus.Infof("Creating StatefulSet") - statefulsetClient := client.AppsV1().StatefulSets(namespace) - var statefulsetObj *appsv1.StatefulSet - if volumeMount { - statefulsetObj = GetStatefulSet(namespace, statefulsetName) - } else { - statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName) - } - statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return statefulset, err -} - -// CreateCronJob creates a cronjob in given namespace and returns the CronJob -func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) { - logrus.Infof("Creating CronJob") - cronJobClient := client.BatchV1().CronJobs(namespace) - var cronJobObj *batchv1.CronJob - if volumeMount { - cronJobObj = GetCronJob(namespace, cronJobName) - } else { - cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName) - } - cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return cronJob, err -} - -// CreateJob creates a job in given namespace and returns the Job -func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) { - logrus.Infof("Creating Job") - jobClient := client.BatchV1().Jobs(namespace) - var jobObj *batchv1.Job - if volumeMount { - jobObj = GetJob(namespace, jobName) - } else { - jobObj = GetJobWithEnvVar(namespace, jobName) - } - job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return job, err -} - -// DeleteDeployment creates a deployment in given namespace and returns the error if any -func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error { - logrus.Infof("Deleting Deployment") - deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return deploymentError -} - -// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any -func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error { - logrus.Infof("Deleting DeploymentConfig") - deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return deploymentConfigError -} - -// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any -func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error { - logrus.Infof("Deleting DaemonSet %s", daemonsetName) - daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return daemonsetError -} - -// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any -func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error { - logrus.Infof("Deleting StatefulSet %s", statefulsetName) - statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return statefulsetError -} - -// DeleteCronJob deletes a cronJob in given namespace and returns the error if any -func DeleteCronJob(client kubernetes.Interface, namespace string, cronJobName string) error { - logrus.Infof("Deleting CronJob %s", cronJobName) - cronJobError := client.BatchV1().CronJobs(namespace).Delete(context.TODO(), cronJobName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return cronJobError -} - -// Deleteob deletes a job in given namespace and returns the error if any -func DeleteJob(client kubernetes.Interface, namespace string, jobName string) error { - logrus.Infof("Deleting Job %s", jobName) - jobError := client.BatchV1().Jobs(namespace).Delete(context.TODO(), jobName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return jobError -} - -// UpdateConfigMap updates a configmap in given namespace and returns the error if any -func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error { - logrus.Infof("Updating configmap %q.\n", configmapName) - var configmap *v1.ConfigMap - if label != "" { - configmap = GetConfigmapWithUpdatedLabel(namespace, configmapName, label, data) - } else { - configmap = GetConfigmap(namespace, configmapName, data) - } - _, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{}) - time.Sleep(3 * time.Second) - return updateErr -} - -// UpdateSecret updates a secret in given namespace and returns the error if any -func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secretName string, label string, data string) error { - logrus.Infof("Updating secret %q.\n", secretName) - var secret *v1.Secret - if label != "" { - secret = GetSecretWithUpdatedLabel(namespace, secretName, label, data) - } else { - secret = GetSecret(namespace, secretName, data) - } - _, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{}) - time.Sleep(3 * time.Second) - return updateErr -} - -// DeleteConfigMap deletes a configmap in given namespace and returns the error if any -func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error { - logrus.Infof("Deleting configmap %q.\n", configmapName) - err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return err -} - -// DeleteSecret deletes a secret in given namespace and returns the error if any -func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error { - logrus.Infof("Deleting secret %q.\n", secretName) - err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return err -} - -// RandSeq generates a random sequence -func RandSeq(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} - -// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not -func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - containers := upgradeFuncs.ContainersFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue) - matches := false - if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - updated := GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - return true - } - } - } - return false -} - -// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed -func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - containers := upgradeFuncs.ContainersFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue) - - matches := false - if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - value := GetResourceSHAFromEnvVar(containers, envName) - if value == "" { - return true - } - } - } - return false -} - -// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not -func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - podAnnotations := upgradeFuncs.PodAnnotationsFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - matches := false - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - updated := GetResourceSHAFromAnnotation(podAnnotations) - if updated == config.SHAValue { - return true - } - } - } - return false -} - -func GetSHAfromEmptyData() string { - return crypto.GenerateSHA("") -} - -// GetRollout provides rollout for testing -func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout { - replicaset := int32(1) - return &argorolloutv1alpha1.Rollout{ - ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations), - Spec: argorolloutv1alpha1.RolloutSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Template: getPodTemplateSpecWithVolumes(rolloutName), - }, - } -} - -// CreateRollout creates a rolout in given namespace and returns the Rollout -func CreateRollout(client argorollout.Interface, rolloutName string, namespace string, annotations map[string]string) (*argorolloutv1alpha1.Rollout, error) { - logrus.Infof("Creating Rollout") - rolloutClient := client.ArgoprojV1alpha1().Rollouts(namespace) - rolloutObj := GetRollout(namespace, rolloutName, annotations) - rollout, err := rolloutClient.Create(context.TODO(), rolloutObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return rollout, err -} diff --git a/internal/pkg/testutil/rand.go b/internal/pkg/testutil/rand.go new file mode 100644 index 000000000..bf88d4261 --- /dev/null +++ b/internal/pkg/testutil/rand.go @@ -0,0 +1,16 @@ +package testutil + +import ( + "math/rand/v2" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz" + +// RandSeq generates a random string of the specified length. +func RandSeq(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.IntN(len(letterBytes))] + } + return string(b) +} diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go new file mode 100644 index 000000000..c15aacb41 --- /dev/null +++ b/internal/pkg/testutil/testutil.go @@ -0,0 +1,630 @@ +package testutil + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "strings" + "time" + + openshiftv1 "github.com/openshift/api/apps/v1" + openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + // ConfigmapResourceType represents ConfigMap resource type + ConfigmapResourceType = "configmap" + // SecretResourceType represents Secret resource type + SecretResourceType = "secret" +) + +// CreateNamespace creates a namespace with the given name. +func CreateNamespace(name string, client kubernetes.Interface) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := client.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + return err +} + +// DeleteNamespace deletes the namespace with the given name. +func DeleteNamespace(name string, client kubernetes.Interface) error { + return client.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateConfigMap creates a ConfigMap with the given name and data. +func CreateConfigMap(client kubernetes.Interface, namespace, name, data string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{ + "url": data, + }, + } + return client.CoreV1().ConfigMaps(namespace).Create(context.Background(), cm, metav1.CreateOptions{}) +} + +// CreateConfigMapWithAnnotations creates a ConfigMap with the given name, data, and annotations. +func CreateConfigMapWithAnnotations(client kubernetes.Interface, namespace, name, data string, annotations map[string]string) ( + *corev1.ConfigMap, error, +) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: map[string]string{ + "url": data, + }, + } + return client.CoreV1().ConfigMaps(namespace).Create(context.Background(), cm, metav1.CreateOptions{}) +} + +// UpdateConfigMap updates the ConfigMap with new label and/or data. +func UpdateConfigMap(cm *corev1.ConfigMap, namespace, name, label, data string) error { + if label != "" { + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels["test-label"] = label + } + if data != "" { + cm.Data["url"] = data + } + return nil +} + +// UpdateConfigMapWithClient updates the ConfigMap with new label and/or data. +func UpdateConfigMapWithClient(client kubernetes.Interface, namespace, name, label, data string) error { + ctx := context.Background() + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if label != "" { + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels["test-label"] = label + } + if data != "" { + cm.Data["url"] = data + } + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// DeleteConfigMap deletes the ConfigMap with the given name. +func DeleteConfigMap(client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateSecret creates a Secret with the given name and data. +func CreateSecret(client kubernetes.Interface, namespace, name, data string) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + "password": []byte(data), + }, + } + return client.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) +} + +// UpdateSecretWithClient updates the Secret with new label and/or data. +func UpdateSecretWithClient(client kubernetes.Interface, namespace, name, label, data string) error { + ctx := context.Background() + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if label != "" { + if secret.Labels == nil { + secret.Labels = make(map[string]string) + } + secret.Labels["test-label"] = label + } + if data != "" { + secret.Data["password"] = []byte(data) + } + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// DeleteSecret deletes the Secret with the given name. +func DeleteSecret(client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().Secrets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateDeployment creates a Deployment that references a ConfigMap/Secret. +func CreateDeployment(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.Deployment, error, +) { + var deployment *appsv1.Deployment + if useConfigMap { + deployment = NewDeploymentWithEnvFrom(name, namespace, name, "") + } else { + deployment = NewDeploymentWithEnvFrom(name, namespace, "", name) + } + deployment.Annotations = annotations + // Override image for integration tests + deployment.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + deployment.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + return client.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) +} + +// DeleteDeployment deletes the Deployment with the given name. +func DeleteDeployment(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().Deployments(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateDeploymentWithBoth creates a Deployment that references both a ConfigMap and a Secret. +func CreateDeploymentWithBoth(client kubernetes.Interface, name, namespace, configMapName, secretName string, annotations map[string]string) ( + *appsv1.Deployment, error, +) { + deployment := NewDeploymentWithEnvFrom(name, namespace, configMapName, secretName) + deployment.Annotations = annotations + // Override image for integration tests + deployment.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + deployment.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + return client.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) +} + +// CreateDaemonSet creates a DaemonSet that references a ConfigMap/Secret. +func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.DaemonSet, error, +) { + daemonset := NewDaemonSet(name, namespace, annotations) + // Override image for integration tests + daemonset.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + daemonset.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + if useConfigMap { + daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } else { + daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } + + return client.AppsV1().DaemonSets(namespace).Create(context.Background(), daemonset, metav1.CreateOptions{}) +} + +// DeleteDaemonSet deletes the DaemonSet with the given name. +func DeleteDaemonSet(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().DaemonSets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateStatefulSet creates a StatefulSet that references a ConfigMap/Secret. +func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.StatefulSet, error, +) { + statefulset := NewStatefulSet(name, namespace, annotations) + statefulset.Spec.ServiceName = name + // Override image for integration tests + statefulset.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + statefulset.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + if useConfigMap { + statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } else { + statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } + + return client.AppsV1().StatefulSets(namespace).Create(context.Background(), statefulset, metav1.CreateOptions{}) +} + +// DeleteStatefulSet deletes the StatefulSet with the given name. +func DeleteStatefulSet(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().StatefulSets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateCronJob creates a CronJob that references a ConfigMap/Secret. +func CreateCronJob(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*batchv1.CronJob, error) { + cronjob := NewCronJob(name, namespace) + cronjob.Annotations = annotations + // Override image for integration tests + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "echo hello"} + cronjob.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + + if useConfigMap { + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } else { + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + } + } + + return client.BatchV1().CronJobs(namespace).Create(context.Background(), cronjob, metav1.CreateOptions{}) +} + +// DeleteCronJob deletes the CronJob with the given name. +func DeleteCronJob(client kubernetes.Interface, namespace, name string) error { + return client.BatchV1().CronJobs(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// ConvertResourceToSHA converts a resource data to SHA256 hash. +func ConvertResourceToSHA(resourceType, namespace, name, data string) string { + content := fmt.Sprintf("%s/%s/%s:%s", resourceType, namespace, name, data) + hash := sha256.Sum256([]byte(content)) + return base64.StdEncoding.EncodeToString(hash[:]) +} + +// WaitForDeploymentAnnotation waits for a deployment to have the specified annotation value. +func WaitForDeploymentAnnotation(client kubernetes.Interface, namespace, name, annotation, expectedValue string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + value, ok := deployment.Spec.Template.Annotations[annotation] + if !ok { + return false, nil // Keep waiting + } + return value == expectedValue, nil + }, + ) +} + +// WaitForDeploymentReloadedAnnotation waits for a deployment to have the specified reloaded annotation. +func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if deployment.Spec.Template.Annotations != nil { + if _, ok := deployment.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDaemonSetReloadedAnnotation waits for a daemonset to have the specified reloaded annotation. +func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if daemonset.Spec.Template.Annotations != nil { + if _, ok := daemonset.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForStatefulSetReloadedAnnotation waits for a statefulset to have the specified reloaded annotation. +func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if statefulset.Spec.Template.Annotations != nil { + if _, ok := statefulset.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// NewOpenshiftClient creates an OpenShift client from the given rest config. +func NewOpenshiftClient(restCfg *rest.Config) (openshiftclient.Interface, error) { + return openshiftclient.NewForConfig(restCfg) +} + +// CreateDeploymentConfig creates a DeploymentConfig that references a ConfigMap/Secret. +func CreateDeploymentConfig(client openshiftclient.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *openshiftv1.DeploymentConfig, error, +) { + var dc *openshiftv1.DeploymentConfig + if useConfigMap { + dc = NewDeploymentConfigWithEnvFrom(name, namespace, name, "") + } else { + dc = NewDeploymentConfigWithEnvFrom(name, namespace, "", name) + } + dc.Annotations = annotations + dc.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + dc.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + return client.AppsV1().DeploymentConfigs(namespace).Create(context.Background(), dc, metav1.CreateOptions{}) +} + +// DeleteDeploymentConfig deletes the DeploymentConfig with the given name. +func DeleteDeploymentConfig(client openshiftclient.Interface, namespace, name string) error { + return client.AppsV1().DeploymentConfigs(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// WaitForDeploymentConfigReloadedAnnotation waits for a DeploymentConfig to have the specified reloaded annotation. +func WaitForDeploymentConfigReloadedAnnotation(client openshiftclient.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + dc, err := client.AppsV1().DeploymentConfigs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if dc.Spec.Template != nil && dc.Spec.Template.Annotations != nil { + if _, ok := dc.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDeploymentPaused waits for a deployment to be paused (spec.Paused=true). +func WaitForDeploymentPaused(client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var paused bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if deployment.Spec.Paused { + paused = true + return true, nil + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return paused, nil + } + return paused, err +} + +// WaitForDeploymentUnpaused waits for a deployment to be unpaused (spec.Paused=false). +func WaitForDeploymentUnpaused(client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var unpaused bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if !deployment.Spec.Paused { + unpaused = true + return true, nil + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return unpaused, nil + } + return unpaused, err +} + +// WaitForCronJobTriggeredJob waits for a Job to be created by a CronJob (triggered by Reloader). +func WaitForCronJobTriggeredJob(client kubernetes.Interface, namespace, cronJobName string, timeout time.Duration) (bool, error) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, nil // Keep waiting + } + for _, job := range jobs.Items { + if strings.HasPrefix(job.Name, cronJobName+"-") { + if job.Annotations != nil { + if _, ok := job.Annotations["cronjob.kubernetes.io/instantiate"]; ok { + found = true + return true, nil + } + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDeploymentEnvVar waits for a deployment's containers to have the specified env var with a non-empty value. +func WaitForDeploymentEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range deployment.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDaemonSetEnvVar waits for a daemonset's containers to have the specified env var with a non-empty value. +func WaitForDaemonSetEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range daemonset.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForStatefulSetEnvVar waits for a statefulset's containers to have the specified env var with a non-empty value. +func WaitForStatefulSetEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range statefulset.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} diff --git a/internal/pkg/util/interface.go b/internal/pkg/util/interface.go deleted file mode 100644 index ff261ab00..000000000 --- a/internal/pkg/util/interface.go +++ /dev/null @@ -1,50 +0,0 @@ -package util - -import ( - "reflect" - "strconv" - - "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// InterfaceSlice converts an interface to an interface array -func InterfaceSlice(slice interface{}) []interface{} { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - logrus.Errorf("InterfaceSlice() given a non-slice type") - } - - ret := make([]interface{}, s.Len()) - - for i := 0; i < s.Len(); i++ { - ret[i] = s.Index(i).Interface() - } - - return ret -} - -type ObjectMeta struct { - metav1.ObjectMeta -} - -func ToObjectMeta(kubernetesObject interface{}) ObjectMeta { - objectValue := reflect.ValueOf(kubernetesObject) - fieldName := reflect.TypeOf((*metav1.ObjectMeta)(nil)).Elem().Name() - field := objectValue.FieldByName(fieldName).Interface().(metav1.ObjectMeta) - - return ObjectMeta{ - ObjectMeta: field, - } -} - -// ParseBool returns result in bool format after parsing -func ParseBool(value interface{}) bool { - if reflect.Bool == reflect.TypeOf(value).Kind() { - return value.(bool) - } else if reflect.String == reflect.TypeOf(value).Kind() { - result, _ := strconv.ParseBool(value.(string)) - return result - } - return false -} diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go deleted file mode 100644 index ec86d1c9c..000000000 --- a/internal/pkg/util/util.go +++ /dev/null @@ -1,128 +0,0 @@ -package util - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/options" - v1 "k8s.io/api/core/v1" -) - -// ConvertToEnvVarName converts the given text into a usable env var -// removing any special chars with '_' and transforming text to upper case -func ConvertToEnvVarName(text string) string { - var buffer bytes.Buffer - upper := strings.ToUpper(text) - lastCharValid := false - for i := 0; i < len(upper); i++ { - ch := upper[i] - if (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') { - buffer.WriteString(string(ch)) - lastCharValid = true - } else { - if lastCharValid { - buffer.WriteString("_") - } - lastCharValid = false - } - } - return buffer.String() -} - -func GetSHAfromConfigmap(configmap *v1.ConfigMap) string { - values := []string{} - for k, v := range configmap.Data { - values = append(values, k+"="+v) - } - for k, v := range configmap.BinaryData { - values = append(values, k+"="+base64.StdEncoding.EncodeToString(v)) - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -func GetSHAfromSecret(data map[string][]byte) string { - values := []string{} - for k, v := range data { - values = append(values, k+"="+string(v[:])) - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -type List []string - -func (l *List) Contains(s string) bool { - for _, v := range *l { - if v == s { - return true - } - } - return false -} - -func ConfigureReloaderFlags(cmd *cobra.Command) { - cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources") - cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name") - cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name") - cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps") - cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps") - cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets") - cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation") - cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search") - cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected") - cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader") - cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)") - cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)") - cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload") - cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')") - cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") - cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore") - cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces") - cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets") - cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts") - cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy") - cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events") - cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events") - cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election") - cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts") - cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling") - cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060") -} - -func GetIgnoredResourcesList() (List, error) { - - ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore") - - for _, v := range ignoredResourcesList { - if v != "configMaps" && v != "secrets" { - return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v) - } - } - - if len(ignoredResourcesList) > 1 { - return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both") - } - - return ignoredResourcesList, nil -} - -func GetIgnoredWorkloadTypesList() (List, error) { - - ignoredWorkloadTypesList := options.WorkloadTypesToIgnore - - for _, v := range ignoredWorkloadTypesList { - if v != "jobs" && v != "cronjobs" { - return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v) - } - } - - return ignoredWorkloadTypesList, nil -} diff --git a/internal/pkg/util/util_test.go b/internal/pkg/util/util_test.go deleted file mode 100644 index 338f329f3..000000000 --- a/internal/pkg/util/util_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stakater/Reloader/internal/pkg/options" - v1 "k8s.io/api/core/v1" -) - -func TestConvertToEnvVarName(t *testing.T) { - data := "www.stakater.com" - envVar := ConvertToEnvVarName(data) - if envVar != "WWW_STAKATER_COM" { - t.Errorf("Failed to convert data into environment variable") - } -} - -func TestGetHashFromConfigMap(t *testing.T) { - data := map[*v1.ConfigMap]string{ - { - Data: map[string]string{"test": "test"}, - }: "Only Data", - { - Data: map[string]string{"test": "test"}, - BinaryData: map[string][]byte{"bintest": []byte("test")}, - }: "Both Data and BinaryData", - { - BinaryData: map[string][]byte{"bintest": []byte("test")}, - }: "Only BinaryData", - } - converted := map[string]string{} - for cm, cmName := range data { - converted[cmName] = GetSHAfromConfigmap(cm) - } - - // Test that the has for each configmap is really unique - for cmName, cmHash := range converted { - count := 0 - for _, cmHash2 := range converted { - if cmHash == cmHash2 { - count++ - } - } - if count > 1 { - t.Errorf("Found duplicate hashes for %v", cmName) - } - } -} - -func TestGetIgnoredWorkloadTypesList(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - workloadTypes []string - expectError bool - expected []string - }{ - { - name: "Both jobs and cronjobs", - workloadTypes: []string{"jobs", "cronjobs"}, - expectError: false, - expected: []string{"jobs", "cronjobs"}, - }, - { - name: "Only jobs", - workloadTypes: []string{"jobs"}, - expectError: false, - expected: []string{"jobs"}, - }, - { - name: "Only cronjobs", - workloadTypes: []string{"cronjobs"}, - expectError: false, - expected: []string{"cronjobs"}, - }, - { - name: "Empty list", - workloadTypes: []string{}, - expectError: false, - expected: []string{}, - }, - { - name: "Invalid workload type", - workloadTypes: []string{"invalid"}, - expectError: true, - expected: nil, - }, - { - name: "Mixed valid and invalid", - workloadTypes: []string{"jobs", "invalid"}, - expectError: true, - expected: nil, - }, - { - name: "Duplicate values", - workloadTypes: []string{"jobs", "jobs"}, - expectError: false, - expected: []string{"jobs", "jobs"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the global option - options.WorkloadTypesToIgnore = tt.workloadTypes - - result, err := GetIgnoredWorkloadTypesList() - - if tt.expectError && err == nil { - t.Errorf("Expected error but got none") - } - - if !tt.expectError && err != nil { - t.Errorf("Expected no error but got: %v", err) - } - - if !tt.expectError { - if len(result) != len(tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - return - } - - for i, expected := range tt.expected { - if i >= len(result) || result[i] != expected { - t.Errorf("Expected %v, got %v", tt.expected, result) - break - } - } - } - }) - } -} - -func TestListContains(t *testing.T) { - tests := []struct { - name string - list List - item string - expected bool - }{ - { - name: "List contains item", - list: List{"jobs", "cronjobs"}, - item: "jobs", - expected: true, - }, - { - name: "List does not contain item", - list: List{"jobs"}, - item: "cronjobs", - expected: false, - }, - { - name: "Empty list", - list: List{}, - item: "jobs", - expected: false, - }, - { - name: "Case sensitive matching", - list: List{"jobs", "cronjobs"}, - item: "Jobs", - expected: false, - }, - { - name: "Multiple occurrences", - list: List{"jobs", "jobs", "cronjobs"}, - item: "jobs", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.list.Contains(tt.item) - if result != tt.expected { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } -} diff --git a/internal/pkg/webhook/webhook.go b/internal/pkg/webhook/webhook.go new file mode 100644 index 000000000..3653b22e2 --- /dev/null +++ b/internal/pkg/webhook/webhook.go @@ -0,0 +1,95 @@ +// Package webhook handles sending reload notifications to external endpoints. +// When --webhook-url is set, Reloader sends HTTP POST requests instead of modifying workloads. +package webhook + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/go-logr/logr" + + httputil "github.com/stakater/Reloader/internal/pkg/http" +) + +// Payload represents the data sent to the webhook endpoint. +type Payload struct { + Kind string `json:"kind"` + Namespace string `json:"namespace"` + ResourceName string `json:"resourceName"` + ResourceType string `json:"resourceType"` + Hash string `json:"hash"` + Timestamp time.Time `json:"timestamp"` + + // Workloads contains the list of workloads that would be reloaded. + Workloads []WorkloadInfo `json:"workloads"` +} + +// WorkloadInfo describes a workload that would be reloaded. +type WorkloadInfo struct { + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +// Client sends reload notifications to webhook endpoints. +type Client struct { + httpClient *http.Client + url string + log logr.Logger +} + +// NewClient creates a new webhook client. +func NewClient(url string, log logr.Logger) *Client { + return &Client{ + httpClient: httputil.NewDefaultClient(), + url: url, + log: log, + } +} + +// Send posts the payload to the configured webhook URL. +func (c *Client) Send(ctx context.Context, payload Payload) error { + if c.url == "" { + return nil + } + + data, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("marshaling payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", "Reloader/2.0") + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("webhook returned status %d", resp.StatusCode) + } + + c.log.V(1).Info("webhook notification sent", + "url", c.url, + "resourceType", payload.ResourceType, + "resourceName", payload.ResourceName, + "workloadCount", len(payload.Workloads), + ) + + return nil +} + +// IsConfigured returns true if the webhook URL is set. +func (c *Client) IsConfigured() bool { + return c != nil && c.url != "" +} diff --git a/internal/pkg/webhook/webhook_test.go b/internal/pkg/webhook/webhook_test.go new file mode 100644 index 000000000..b88ed246a --- /dev/null +++ b/internal/pkg/webhook/webhook_test.go @@ -0,0 +1,283 @@ +package webhook + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-logr/logr" +) + +func TestNewClient_SetsURL(t *testing.T) { + c := NewClient("http://example.com/webhook", logr.Discard()) + + if c == nil { + t.Fatal("NewClient should not return nil") + } + if c.url != "http://example.com/webhook" { + t.Errorf("URL = %q, want %q", c.url, "http://example.com/webhook") + } + if c.httpClient == nil { + t.Error("httpClient should not be nil") + } + if c.httpClient.Timeout != 30*time.Second { + t.Errorf("Timeout = %v, want %v", c.httpClient.Timeout, 30*time.Second) + } +} + +func TestIsConfigured_NilClient(t *testing.T) { + var c *Client = nil + + if c.IsConfigured() { + t.Error("IsConfigured() should return false for nil client") + } +} + +func TestIsConfigured_EmptyURL(t *testing.T) { + c := NewClient("", logr.Discard()) + + if c.IsConfigured() { + t.Error("IsConfigured() should return false for empty URL") + } +} + +func TestIsConfigured_ValidURL(t *testing.T) { + c := NewClient("http://example.com/webhook", logr.Discard()) + + if !c.IsConfigured() { + t.Error("IsConfigured() should return true for valid URL") + } +} + +func TestSend_EmptyURL_ReturnsNil(t *testing.T) { + c := NewClient("", logr.Discard()) + + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + } + + err := c.Send(context.Background(), payload) + if err != nil { + t.Errorf("Send() with empty URL should return nil, got %v", err) + } +} + +func TestSend_MarshalPayload(t *testing.T) { + var receivedPayload Payload + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + _ = json.Unmarshal(body, &receivedPayload) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + Hash: "abc123", + Timestamp: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), + Workloads: []WorkloadInfo{ + {Kind: "Deployment", Name: "my-deploy", Namespace: "default"}, + }, + } + + err := c.Send(context.Background(), payload) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if receivedPayload.Kind != "ConfigMap" { + t.Errorf("Received Kind = %q, want %q", receivedPayload.Kind, "ConfigMap") + } + if receivedPayload.Namespace != "default" { + t.Errorf("Received Namespace = %q, want %q", receivedPayload.Namespace, "default") + } + if receivedPayload.ResourceName != "my-config" { + t.Errorf("Received ResourceName = %q, want %q", receivedPayload.ResourceName, "my-config") + } + if receivedPayload.Hash != "abc123" { + t.Errorf("Received Hash = %q, want %q", receivedPayload.Hash, "abc123") + } + if len(receivedPayload.Workloads) != 1 { + t.Errorf("Received Workloads count = %d, want 1", len(receivedPayload.Workloads)) + } +} + +func TestSend_SetsCorrectHeaders(t *testing.T) { + var contentType, userAgent string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + contentType = r.Header.Get("Content-Type") + userAgent = r.Header.Get("User-Agent") + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if contentType != "application/json" { + t.Errorf("Content-Type = %q, want %q", contentType, "application/json") + } + if userAgent != "Reloader/2.0" { + t.Errorf("User-Agent = %q, want %q", userAgent, "Reloader/2.0") + } +} + +func TestSend_UsesPostMethod(t *testing.T) { + var method string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + method = r.Method + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if method != http.MethodPost { + t.Errorf("Method = %q, want %q", method, http.MethodPost) + } +} + +func TestSend_Non2xxResponse(t *testing.T) { + tests := []struct { + name string + statusCode int + wantErr bool + }{ + {"200 OK", 200, false}, + {"201 Created", 201, false}, + {"204 No Content", 204, false}, + {"299 upper bound", 299, false}, + {"300 redirect", 300, true}, + {"400 Bad Request", 400, true}, + {"404 Not Found", 404, true}, + {"500 Internal Error", 500, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + err := c.Send(context.Background(), Payload{}) + + if (err != nil) != tt.wantErr { + t.Errorf("Send() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestSend_NetworkError(t *testing.T) { + // Use a URL that won't connect + c := NewClient("http://127.0.0.1:1", logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err == nil { + t.Error("Send() should return error for network failure") + } +} + +func TestSend_ContextCancellation(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(100 * time.Millisecond) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + err := c.Send(ctx, Payload{}) + if err == nil { + t.Error("Send() should return error for cancelled context") + } +} + +func TestPayload_JSONSerialization(t *testing.T) { + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + Hash: "abc123", + Timestamp: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), + Workloads: []WorkloadInfo{ + {Kind: "Deployment", Name: "my-deploy", Namespace: "default"}, + {Kind: "StatefulSet", Name: "my-sts", Namespace: "default"}, + }, + } + + data, err := json.Marshal(payload) + if err != nil { + t.Fatalf("Failed to marshal payload: %v", err) + } + + var unmarshaled Payload + if err := json.Unmarshal(data, &unmarshaled); err != nil { + t.Fatalf("Failed to unmarshal payload: %v", err) + } + + if unmarshaled.Kind != payload.Kind { + t.Errorf("Kind = %q, want %q", unmarshaled.Kind, payload.Kind) + } + if len(unmarshaled.Workloads) != 2 { + t.Errorf("Workloads count = %d, want 2", len(unmarshaled.Workloads)) + } +} + +func TestWorkloadInfo_JSONSerialization(t *testing.T) { + info := WorkloadInfo{ + Kind: "Deployment", + Name: "my-deploy", + Namespace: "production", + } + + data, err := json.Marshal(info) + if err != nil { + t.Fatalf("Failed to marshal: %v", err) + } + + var unmarshaled WorkloadInfo + if err := json.Unmarshal(data, &unmarshaled); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + if unmarshaled.Kind != "Deployment" { + t.Errorf("Kind = %q, want %q", unmarshaled.Kind, "Deployment") + } + if unmarshaled.Name != "my-deploy" { + t.Errorf("Name = %q, want %q", unmarshaled.Name, "my-deploy") + } + if unmarshaled.Namespace != "production" { + t.Errorf("Namespace = %q, want %q", unmarshaled.Namespace, "production") + } +} diff --git a/internal/pkg/workload/base.go b/internal/pkg/workload/base.go new file mode 100644 index 000000000..e8479bfb4 --- /dev/null +++ b/internal/pkg/workload/base.go @@ -0,0 +1,189 @@ +package workload + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PodTemplateAccessor provides access to a workload's pod template. +// Each workload type implements this to provide access to its specific template location. +type PodTemplateAccessor interface { + // GetPodTemplateSpec returns a pointer to the pod template spec. + // Returns nil if the workload doesn't have a pod template + GetPodTemplateSpec() *corev1.PodTemplateSpec + + // GetObjectMeta returns the workload's object metadata. + GetObjectMeta() *metav1.ObjectMeta +} + +// BaseWorkload provides common functionality for all workload types. +// It uses composition with a PodTemplateAccessor to access type-specific fields. +type BaseWorkload[T client.Object] struct { + object T + original T + accessor PodTemplateAccessor + kind Kind +} + +// NewBaseWorkload creates a new BaseWorkload with the given object and accessor. +func NewBaseWorkload[T client.Object](obj T, original T, accessor PodTemplateAccessor, kind Kind) *BaseWorkload[T] { + return &BaseWorkload[T]{ + object: obj, + original: original, + accessor: accessor, + kind: kind, + } +} + +func (b *BaseWorkload[T]) Kind() Kind { + return b.kind +} + +func (b *BaseWorkload[T]) GetObject() client.Object { + return b.object +} + +func (b *BaseWorkload[T]) GetName() string { + return b.accessor.GetObjectMeta().Name +} + +func (b *BaseWorkload[T]) GetNamespace() string { + return b.accessor.GetObjectMeta().Namespace +} + +func (b *BaseWorkload[T]) GetAnnotations() map[string]string { + return b.accessor.GetObjectMeta().Annotations +} + +func (b *BaseWorkload[T]) GetPodTemplateAnnotations() map[string]string { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + if template.Annotations == nil { + template.Annotations = make(map[string]string) + } + return template.Annotations +} + +func (b *BaseWorkload[T]) SetPodTemplateAnnotation(key, value string) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + if template.Annotations == nil { + template.Annotations = make(map[string]string) + } + template.Annotations[key] = value +} + +func (b *BaseWorkload[T]) GetContainers() []corev1.Container { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.Containers +} + +func (b *BaseWorkload[T]) SetContainers(containers []corev1.Container) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + template.Spec.Containers = containers +} + +func (b *BaseWorkload[T]) GetInitContainers() []corev1.Container { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.InitContainers +} + +func (b *BaseWorkload[T]) SetInitContainers(containers []corev1.Container) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + template.Spec.InitContainers = containers +} + +func (b *BaseWorkload[T]) GetVolumes() []corev1.Volume { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.Volumes +} + +func (b *BaseWorkload[T]) GetEnvFromSources() []corev1.EnvFromSource { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + var sources []corev1.EnvFromSource + for _, container := range template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (b *BaseWorkload[T]) UsesConfigMap(name string) bool { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return false + } + return SpecUsesConfigMap(&template.Spec, name) +} + +func (b *BaseWorkload[T]) UsesSecret(name string) bool { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return false + } + return SpecUsesSecret(&template.Spec, name) +} + +func (b *BaseWorkload[T]) GetOwnerReferences() []metav1.OwnerReference { + return b.accessor.GetObjectMeta().OwnerReferences +} + +// Update performs a strategic merge patch update. +func (b *BaseWorkload[T]) Update(ctx context.Context, c client.Client) error { + return c.Patch(ctx, b.object, client.StrategicMergeFrom(b.original), client.FieldOwner(FieldManager)) +} + +// ResetOriginal resets the original state to the current object state. +func (b *BaseWorkload[T]) ResetOriginal() { + //nolint:errcheck // Type assertion is safe: DeepCopyObject returns same type T + b.original = b.object.DeepCopyObject().(T) +} + +// UpdateStrategy returns the default patch strategy. +// Workloads with special update logic should override this. +func (b *BaseWorkload[T]) UpdateStrategy() UpdateStrategy { + return UpdateStrategyPatch +} + +// PerformSpecialUpdate returns false for standard workloads. +// Workloads with special update logic should override this. +func (b *BaseWorkload[T]) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + return false, nil +} + +// Object returns the underlying Kubernetes object. +func (b *BaseWorkload[T]) Object() T { + return b.object +} + +// Original returns the original state of the object. +func (b *BaseWorkload[T]) Original() T { + return b.original +} diff --git a/internal/pkg/workload/cronjob.go b/internal/pkg/workload/cronjob.go new file mode 100644 index 000000000..222d4c61e --- /dev/null +++ b/internal/pkg/workload/cronjob.go @@ -0,0 +1,98 @@ +package workload + +import ( + "context" + "maps" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// cronJobAccessor implements PodTemplateAccessor for CronJob. +type cronJobAccessor struct { + cronjob *batchv1.CronJob +} + +func (a *cronJobAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + // CronJob has the pod template nested under JobTemplate.Spec.Template + return &a.cronjob.Spec.JobTemplate.Spec.Template +} + +func (a *cronJobAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.cronjob.ObjectMeta +} + +// CronJobWorkload wraps a Kubernetes CronJob. +// Note: CronJobs have a special update mechanism - instead of updating the CronJob itself, +// Reloader creates a new Job from the CronJob's template. +type CronJobWorkload struct { + *BaseWorkload[*batchv1.CronJob] +} + +// NewCronJobWorkload creates a new CronJobWorkload. +func NewCronJobWorkload(c *batchv1.CronJob) *CronJobWorkload { + original := c.DeepCopy() + accessor := &cronJobAccessor{cronjob: c} + return &CronJobWorkload{ + BaseWorkload: NewBaseWorkload(c, original, accessor, KindCronJob), + } +} + +// Ensure CronJobWorkload implements Workload. +var _ Workload = (*CronJobWorkload)(nil) + +// Update for CronJob is a no-op - use PerformSpecialUpdate instead. +// CronJobs trigger reloads by creating a new Job from their template. +func (w *CronJobWorkload) Update(ctx context.Context, c client.Client) error { + // CronJobs don't get updated directly - a new Job is created instead + // This is handled by PerformSpecialUpdate + return nil +} + +// ResetOriginal is a no-op for CronJobs since they don't use strategic merge patch. +// CronJobs create new Jobs instead of being patched. +func (w *CronJobWorkload) ResetOriginal() {} + +func (w *CronJobWorkload) UpdateStrategy() UpdateStrategy { + return UpdateStrategyCreateNew +} + +// PerformSpecialUpdate creates a new Job from the CronJob's template. +// This triggers an immediate execution of the CronJob with updated config. +func (w *CronJobWorkload) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + cronJob := w.Object() + + annotations := make(map[string]string) + annotations["cronjob.kubernetes.io/instantiate"] = "manual" + maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: cronJob.Name + "-", + Namespace: cronJob.Namespace, + Annotations: annotations, + Labels: cronJob.Spec.JobTemplate.Labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob")), + }, + }, + Spec: cronJob.Spec.JobTemplate.Spec, + } + + if err := c.Create(ctx, job, client.FieldOwner(FieldManager)); err != nil { + return false, err + } + + return true, nil +} + +func (w *CronJobWorkload) DeepCopy() Workload { + return NewCronJobWorkload(w.Object().DeepCopy()) +} + +// GetCronJob returns the underlying CronJob for special handling. +func (w *CronJobWorkload) GetCronJob() *batchv1.CronJob { + return w.Object() +} diff --git a/internal/pkg/workload/daemonset.go b/internal/pkg/workload/daemonset.go new file mode 100644 index 000000000..ee6b121e7 --- /dev/null +++ b/internal/pkg/workload/daemonset.go @@ -0,0 +1,46 @@ +package workload + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// daemonSetAccessor implements PodTemplateAccessor for DaemonSet. +type daemonSetAccessor struct { + daemonset *appsv1.DaemonSet +} + +func (a *daemonSetAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.daemonset.Spec.Template +} + +func (a *daemonSetAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.daemonset.ObjectMeta +} + +// DaemonSetWorkload wraps a Kubernetes DaemonSet. +type DaemonSetWorkload struct { + *BaseWorkload[*appsv1.DaemonSet] +} + +// NewDaemonSetWorkload creates a new DaemonSetWorkload. +func NewDaemonSetWorkload(d *appsv1.DaemonSet) *DaemonSetWorkload { + original := d.DeepCopy() + accessor := &daemonSetAccessor{daemonset: d} + return &DaemonSetWorkload{ + BaseWorkload: NewBaseWorkload(d, original, accessor, KindDaemonSet), + } +} + +// Ensure DaemonSetWorkload implements Workload. +var _ Workload = (*DaemonSetWorkload)(nil) + +func (w *DaemonSetWorkload) DeepCopy() Workload { + return NewDaemonSetWorkload(w.Object().DeepCopy()) +} + +// GetDaemonSet returns the underlying DaemonSet for special handling. +func (w *DaemonSetWorkload) GetDaemonSet() *appsv1.DaemonSet { + return w.Object() +} diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go new file mode 100644 index 000000000..ddb621cf3 --- /dev/null +++ b/internal/pkg/workload/deployment.go @@ -0,0 +1,46 @@ +package workload + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// deploymentAccessor implements PodTemplateAccessor for Deployment. +type deploymentAccessor struct { + deployment *appsv1.Deployment +} + +func (a *deploymentAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.deployment.Spec.Template +} + +func (a *deploymentAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.deployment.ObjectMeta +} + +// DeploymentWorkload wraps a Kubernetes Deployment. +type DeploymentWorkload struct { + *BaseWorkload[*appsv1.Deployment] +} + +// NewDeploymentWorkload creates a new DeploymentWorkload. +func NewDeploymentWorkload(d *appsv1.Deployment) *DeploymentWorkload { + original := d.DeepCopy() + accessor := &deploymentAccessor{deployment: d} + return &DeploymentWorkload{ + BaseWorkload: NewBaseWorkload(d, original, accessor, KindDeployment), + } +} + +// Ensure DeploymentWorkload implements Workload. +var _ Workload = (*DeploymentWorkload)(nil) + +func (w *DeploymentWorkload) DeepCopy() Workload { + return NewDeploymentWorkload(w.Object().DeepCopy()) +} + +// GetDeployment returns the underlying Deployment for special handling. +func (w *DeploymentWorkload) GetDeployment() *appsv1.Deployment { + return w.Object() +} diff --git a/internal/pkg/workload/deploymentconfig.go b/internal/pkg/workload/deploymentconfig.go new file mode 100644 index 000000000..736a486ed --- /dev/null +++ b/internal/pkg/workload/deploymentconfig.go @@ -0,0 +1,77 @@ +package workload + +import ( + openshiftv1 "github.com/openshift/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// deploymentConfigAccessor implements PodTemplateAccessor for DeploymentConfig. +type deploymentConfigAccessor struct { + dc *openshiftv1.DeploymentConfig +} + +func (a *deploymentConfigAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + // DeploymentConfig has a pointer to PodTemplateSpec which may be nil + return a.dc.Spec.Template +} + +func (a *deploymentConfigAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.dc.ObjectMeta +} + +// DeploymentConfigWorkload wraps an OpenShift DeploymentConfig. +type DeploymentConfigWorkload struct { + *BaseWorkload[*openshiftv1.DeploymentConfig] +} + +// NewDeploymentConfigWorkload creates a new DeploymentConfigWorkload. +func NewDeploymentConfigWorkload(dc *openshiftv1.DeploymentConfig) *DeploymentConfigWorkload { + original := dc.DeepCopy() + accessor := &deploymentConfigAccessor{dc: dc} + return &DeploymentConfigWorkload{ + BaseWorkload: NewBaseWorkload(dc, original, accessor, KindDeploymentConfig), + } +} + +// Ensure DeploymentConfigWorkload implements Workload. +var _ Workload = (*DeploymentConfigWorkload)(nil) + +// SetPodTemplateAnnotation overrides the base to ensure Template is initialized. +func (w *DeploymentConfigWorkload) SetPodTemplateAnnotation(key, value string) { + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} + } + if dc.Spec.Template.Annotations == nil { + dc.Spec.Template.Annotations = make(map[string]string) + } + dc.Spec.Template.Annotations[key] = value +} + +// SetContainers overrides the base to ensure Template is initialized. +func (w *DeploymentConfigWorkload) SetContainers(containers []corev1.Container) { + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} + } + dc.Spec.Template.Spec.Containers = containers +} + +// SetInitContainers overrides the base to ensure Template is initialized. +func (w *DeploymentConfigWorkload) SetInitContainers(containers []corev1.Container) { + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} + } + dc.Spec.Template.Spec.InitContainers = containers +} + +func (w *DeploymentConfigWorkload) DeepCopy() Workload { + return NewDeploymentConfigWorkload(w.Object().DeepCopy()) +} + +// GetDeploymentConfig returns the underlying DeploymentConfig for special handling. +func (w *DeploymentConfigWorkload) GetDeploymentConfig() *openshiftv1.DeploymentConfig { + return w.Object() +} diff --git a/internal/pkg/workload/interface.go b/internal/pkg/workload/interface.go new file mode 100644 index 000000000..6b4ccf7d4 --- /dev/null +++ b/internal/pkg/workload/interface.go @@ -0,0 +1,141 @@ +// Package workload provides an abstraction layer for Kubernetes workload types. +// It allows uniform handling of Deployments, DaemonSets, StatefulSets, Jobs, CronJobs, and Argo Rollouts. +// +// Note: Jobs and CronJobs have special update mechanisms: +// - Job: deleted and recreated with the same spec +// - CronJob: a new Job is created from the CronJob's template +package workload + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// FieldManager is the field manager name used for server-side apply and patch operations. +// This identifies Reloader as the actor making changes to workload resources. +const FieldManager = "reloader" + +// Kind represents the type of workload. +type Kind string + +const ( + KindDeployment Kind = "Deployment" + KindDaemonSet Kind = "DaemonSet" + KindStatefulSet Kind = "StatefulSet" + KindArgoRollout Kind = "Rollout" + KindJob Kind = "Job" + KindCronJob Kind = "CronJob" + KindDeploymentConfig Kind = "DeploymentConfig" +) + +// UpdateStrategy defines how a workload should be updated. +type UpdateStrategy int + +const ( + // UpdateStrategyPatch uses strategic merge patch (default for most workloads). + UpdateStrategyPatch UpdateStrategy = iota + // UpdateStrategyRecreate deletes and recreates the workload (Jobs). + UpdateStrategyRecreate + // UpdateStrategyCreateNew creates a new resource from template (CronJobs). + UpdateStrategyCreateNew +) + +// WorkloadIdentity provides basic identification for a workload. +type WorkloadIdentity interface { + // Kind returns the workload type. + Kind() Kind + + // GetObject returns the underlying Kubernetes object. + GetObject() client.Object + + // GetName returns the workload name. + GetName() string + + // GetNamespace returns the workload namespace. + GetNamespace() string +} + +// WorkloadReader provides read-only access to workload state. +type WorkloadReader interface { + WorkloadIdentity + + // GetAnnotations returns the workload's annotations. + GetAnnotations() map[string]string + + // GetPodTemplateAnnotations returns annotations from the pod template spec. + GetPodTemplateAnnotations() map[string]string + + // GetContainers returns all containers (including init containers). + GetContainers() []corev1.Container + + // GetInitContainers returns all init containers. + GetInitContainers() []corev1.Container + + // GetVolumes returns the pod template volumes. + GetVolumes() []corev1.Volume + + // GetEnvFromSources returns all envFrom sources from all containers. + GetEnvFromSources() []corev1.EnvFromSource + + // GetOwnerReferences returns the owner references of the workload. + GetOwnerReferences() []metav1.OwnerReference +} + +// WorkloadMatcher provides methods for checking resource usage. +type WorkloadMatcher interface { + // UsesConfigMap checks if the workload uses a specific ConfigMap. + UsesConfigMap(name string) bool + + // UsesSecret checks if the workload uses a specific Secret. + UsesSecret(name string) bool +} + +// WorkloadMutator provides methods for modifying workload state. +type WorkloadMutator interface { + // SetPodTemplateAnnotation sets an annotation on the pod template. + SetPodTemplateAnnotation(key, value string) + + // SetContainers updates the containers. + SetContainers(containers []corev1.Container) + + // SetInitContainers updates the init containers. + SetInitContainers(containers []corev1.Container) +} + +// WorkloadUpdater provides methods for persisting workload changes. +type WorkloadUpdater interface { + // Update persists changes to the workload. + Update(ctx context.Context, c client.Client) error + + // UpdateStrategy returns how this workload should be updated. + // Most workloads use UpdateStrategyPatch (strategic merge patch). + // Jobs use UpdateStrategyRecreate (delete and recreate). + // CronJobs use UpdateStrategyCreateNew (create a new Job from template). + UpdateStrategy() UpdateStrategy + + // PerformSpecialUpdate handles non-standard update logic. + // This is called when UpdateStrategy() != UpdateStrategyPatch. + // For UpdateStrategyPatch workloads, this returns (false, nil). + PerformSpecialUpdate(ctx context.Context, c client.Client) (updated bool, err error) + + // ResetOriginal resets the original state to the current object state. + // This should be called after re-fetching the object (e.g., after a conflict) + // to ensure strategic merge patch diffs are calculated correctly. + ResetOriginal() + + // DeepCopy returns a deep copy of the workload. + DeepCopy() Workload +} + +// Workload combines all workload interfaces for full workload access. +// Use specific interfaces (WorkloadReader, WorkloadMatcher, etc.) when possible +// to limit scope and improve testability. +type Workload interface { + WorkloadReader + WorkloadMatcher + WorkloadMutator + WorkloadUpdater +} diff --git a/internal/pkg/workload/job.go b/internal/pkg/workload/job.go new file mode 100644 index 000000000..557c8f6c4 --- /dev/null +++ b/internal/pkg/workload/job.go @@ -0,0 +1,107 @@ +package workload + +import ( + "context" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// jobAccessor implements PodTemplateAccessor for Job. +type jobAccessor struct { + job *batchv1.Job +} + +func (a *jobAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.job.Spec.Template +} + +func (a *jobAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.job.ObjectMeta +} + +// JobWorkload wraps a Kubernetes Job. +// Note: Jobs have a special update mechanism - instead of updating the Job, +// Reloader deletes and recreates it with the same spec. +type JobWorkload struct { + *BaseWorkload[*batchv1.Job] +} + +// NewJobWorkload creates a new JobWorkload. +func NewJobWorkload(j *batchv1.Job) *JobWorkload { + original := j.DeepCopy() + accessor := &jobAccessor{job: j} + return &JobWorkload{ + BaseWorkload: NewBaseWorkload(j, original, accessor, KindJob), + } +} + +// Ensure JobWorkload implements Workload. +var _ Workload = (*JobWorkload)(nil) + +// Update for Job is a no-op - use PerformSpecialUpdate instead. +// Jobs trigger reloads by being deleted and recreated. +func (w *JobWorkload) Update(ctx context.Context, c client.Client) error { + // Jobs don't get updated directly - they are deleted and recreated + // This is handled by PerformSpecialUpdate + return nil +} + +// ResetOriginal is a no-op for Jobs since they don't use strategic merge patch. +// Jobs are deleted and recreated instead of being patched. +func (w *JobWorkload) ResetOriginal() {} + +func (w *JobWorkload) UpdateStrategy() UpdateStrategy { + return UpdateStrategyRecreate +} + +// PerformSpecialUpdate deletes the Job and recreates it with the updated spec. +// This is necessary because Jobs are immutable after creation. +func (w *JobWorkload) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + oldJob := w.Object() + newJob := oldJob.DeepCopy() + + // Delete the old job with background propagation + policy := metav1.DeletePropagationBackground + if err := c.Delete(ctx, oldJob, &client.DeleteOptions{ + PropagationPolicy: &policy, + }); err != nil { + if !errors.IsNotFound(err) { + return false, err + } + } + + // Clear fields that should not be specified when creating a new Job + newJob.ResourceVersion = "" + newJob.UID = "" + newJob.CreationTimestamp = metav1.Time{} + newJob.Status = batchv1.JobStatus{} + + // Remove problematic labels that are auto-generated + delete(newJob.Spec.Template.Labels, "controller-uid") + delete(newJob.Spec.Template.Labels, batchv1.ControllerUidLabel) + delete(newJob.Spec.Template.Labels, batchv1.JobNameLabel) + delete(newJob.Spec.Template.Labels, "job-name") + + // Remove the selector to allow it to be auto-generated + newJob.Spec.Selector = nil + + // Create the new job with same spec + if err := c.Create(ctx, newJob, client.FieldOwner(FieldManager)); err != nil { + return false, err + } + + return true, nil +} + +func (w *JobWorkload) DeepCopy() Workload { + return NewJobWorkload(w.Object().DeepCopy()) +} + +// GetJob returns the underlying Job for special handling. +func (w *JobWorkload) GetJob() *batchv1.Job { + return w.Object() +} diff --git a/internal/pkg/workload/lister.go b/internal/pkg/workload/lister.go new file mode 100644 index 000000000..1b982fead --- /dev/null +++ b/internal/pkg/workload/lister.go @@ -0,0 +1,130 @@ +package workload + +import ( + "context" + + openshiftv1 "github.com/openshift/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IgnoreChecker checks if a workload kind should be ignored. +type IgnoreChecker interface { + IsWorkloadIgnored(kind string) bool +} + +// Lister lists workloads from the cluster. +type Lister struct { + Client client.Client + Registry *Registry + Checker IgnoreChecker +} + +// NewLister creates a new workload lister. +func NewLister(c client.Client, registry *Registry, checker IgnoreChecker) *Lister { + return &Lister{ + Client: c, + Registry: registry, + Checker: checker, + } +} + +// List returns all workloads in the given namespace. +func (l *Lister) List(ctx context.Context, namespace string) ([]Workload, error) { + var result []Workload + + for _, kind := range l.Registry.SupportedKinds() { + if l.Checker != nil && l.Checker.IsWorkloadIgnored(string(kind)) { + continue + } + + workloads, err := l.listByKind(ctx, namespace, kind) + if err != nil { + return nil, err + } + result = append(result, workloads...) + } + + return result, nil +} + +func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([]Workload, error) { + lister := l.Registry.ListerFor(kind) + if lister == nil { + return nil, nil + } + return lister(ctx, l.Client, namespace) +} + +func listDeployments(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list appsv1.DeploymentList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewDeploymentWorkload(&list.Items[i]) + } + return result, nil +} + +func listDaemonSets(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list appsv1.DaemonSetList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewDaemonSetWorkload(&list.Items[i]) + } + return result, nil +} + +func listStatefulSets(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list appsv1.StatefulSetList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewStatefulSetWorkload(&list.Items[i]) + } + return result, nil +} + +func listJobs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list batchv1.JobList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewJobWorkload(&list.Items[i]) + } + return result, nil +} + +func listCronJobs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list batchv1.CronJobList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewCronJobWorkload(&list.Items[i]) + } + return result, nil +} + +func listDeploymentConfigs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list openshiftv1.DeploymentConfigList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewDeploymentConfigWorkload(&list.Items[i]) + } + return result, nil +} diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go new file mode 100644 index 000000000..5392eca1b --- /dev/null +++ b/internal/pkg/workload/registry.go @@ -0,0 +1,144 @@ +package workload + +import ( + "context" + "fmt" + "strings" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftv1 "github.com/openshift/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// WorkloadLister is a function that lists workloads of a specific kind. +type WorkloadLister func(ctx context.Context, c client.Client, namespace string) ([]Workload, error) + +// RegistryOptions configures the workload registry. +type RegistryOptions struct { + ArgoRolloutsEnabled bool + DeploymentConfigEnabled bool + RolloutStrategyAnnotation string +} + +// Registry provides factory methods for creating Workload instances. +type Registry struct { + argoRolloutsEnabled bool + deploymentConfigEnabled bool + rolloutStrategyAnnotation string + listers map[Kind]WorkloadLister +} + +// NewRegistry creates a new workload registry. +func NewRegistry(opts RegistryOptions) *Registry { + r := &Registry{ + argoRolloutsEnabled: opts.ArgoRolloutsEnabled, + deploymentConfigEnabled: opts.DeploymentConfigEnabled, + rolloutStrategyAnnotation: opts.RolloutStrategyAnnotation, + listers: map[Kind]WorkloadLister{ + KindDeployment: listDeployments, + KindDaemonSet: listDaemonSets, + KindStatefulSet: listStatefulSets, + KindJob: listJobs, + KindCronJob: listCronJobs, + }, + } + if opts.ArgoRolloutsEnabled { + // Use closure to capture the strategy annotation + strategyAnnotation := opts.RolloutStrategyAnnotation + r.listers[KindArgoRollout] = func(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list argorolloutv1alpha1.RolloutList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewRolloutWorkload(&list.Items[i], strategyAnnotation) + } + return result, nil + } + } + if opts.DeploymentConfigEnabled { + r.listers[KindDeploymentConfig] = listDeploymentConfigs + } + return r +} + +// ListerFor returns the lister function for the given kind, or nil if not found. +func (r *Registry) ListerFor(kind Kind) WorkloadLister { + return r.listers[kind] +} + +// SupportedKinds returns all supported workload kinds. +func (r *Registry) SupportedKinds() []Kind { + kinds := []Kind{ + KindDeployment, + KindDaemonSet, + KindStatefulSet, + KindJob, + KindCronJob, + } + if r.argoRolloutsEnabled { + kinds = append(kinds, KindArgoRollout) + } + if r.deploymentConfigEnabled { + kinds = append(kinds, KindDeploymentConfig) + } + return kinds +} + +// FromObject creates a Workload from a Kubernetes object. +func (r *Registry) FromObject(obj client.Object) (Workload, error) { + switch o := obj.(type) { + case *appsv1.Deployment: + return NewDeploymentWorkload(o), nil + case *appsv1.DaemonSet: + return NewDaemonSetWorkload(o), nil + case *appsv1.StatefulSet: + return NewStatefulSetWorkload(o), nil + case *batchv1.Job: + return NewJobWorkload(o), nil + case *batchv1.CronJob: + return NewCronJobWorkload(o), nil + case *argorolloutv1alpha1.Rollout: + if !r.argoRolloutsEnabled { + return nil, fmt.Errorf("argo Rollouts support is not enabled") + } + return NewRolloutWorkload(o, r.rolloutStrategyAnnotation), nil + case *openshiftv1.DeploymentConfig: + if !r.deploymentConfigEnabled { + return nil, fmt.Errorf("openShift DeploymentConfig support is not enabled") + } + return NewDeploymentConfigWorkload(o), nil + default: + return nil, fmt.Errorf("unsupported object type: %T", obj) + } +} + +// kindAliases maps string representations to Kind constants. +// Supports lowercase, title case, and plural forms for user convenience. +var kindAliases = map[string]Kind{ + "deployment": KindDeployment, + "deployments": KindDeployment, + "daemonset": KindDaemonSet, + "daemonsets": KindDaemonSet, + "statefulset": KindStatefulSet, + "statefulsets": KindStatefulSet, + "rollout": KindArgoRollout, + "rollouts": KindArgoRollout, + "job": KindJob, + "jobs": KindJob, + "cronjob": KindCronJob, + "cronjobs": KindCronJob, + "deploymentconfig": KindDeploymentConfig, + "deploymentconfigs": KindDeploymentConfig, +} + +// KindFromString converts a string to a Kind. +func KindFromString(s string) (Kind, error) { + if k, ok := kindAliases[strings.ToLower(s)]; ok { + return k, nil + } + return "", fmt.Errorf("unknown workload kind: %s", s) +} diff --git a/internal/pkg/workload/registry_test.go b/internal/pkg/workload/registry_test.go new file mode 100644 index 000000000..b84830fae --- /dev/null +++ b/internal/pkg/workload/registry_test.go @@ -0,0 +1,366 @@ +package workload + +import ( + "testing" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftv1 "github.com/openshift/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNewRegistry_WithoutArgoRollouts(t *testing.T) { + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: false}) + + kinds := r.SupportedKinds() + if len(kinds) != 5 { + t.Errorf("SupportedKinds() = %d kinds, want 5", len(kinds)) + } + + for _, k := range kinds { + if k == KindArgoRollout { + t.Error("SupportedKinds() should not include ArgoRollout when disabled") + } + } + + if r.ListerFor(KindArgoRollout) != nil { + t.Error("ListerFor(KindArgoRollout) should return nil when disabled") + } +} + +func TestNewRegistry_WithArgoRollouts(t *testing.T) { + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) + + kinds := r.SupportedKinds() + if len(kinds) != 6 { + t.Errorf("SupportedKinds() = %d kinds, want 6", len(kinds)) + } + + found := false + for _, k := range kinds { + if k == KindArgoRollout { + found = true + break + } + } + if !found { + t.Error("SupportedKinds() should include ArgoRollout when enabled") + } + + if r.ListerFor(KindArgoRollout) == nil { + t.Error("ListerFor(KindArgoRollout) should return a function when enabled") + } +} + +func TestRegistry_ListerFor_AllKinds(t *testing.T) { + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) + + tests := []struct { + kind Kind + wantNil bool + }{ + {KindDeployment, false}, + {KindDaemonSet, false}, + {KindStatefulSet, false}, + {KindJob, false}, + {KindCronJob, false}, + {KindArgoRollout, false}, + {Kind("unknown"), true}, + } + + for _, tt := range tests { + lister := r.ListerFor(tt.kind) + if (lister == nil) != tt.wantNil { + t.Errorf("ListerFor(%s) = nil? %v, want nil? %v", tt.kind, lister == nil, tt.wantNil) + } + } +} + +func TestRegistry_FromObject_Deployment(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(deploy) + if err != nil { + t.Fatalf("FromObject(Deployment) error = %v", err) + } + if w.Kind() != KindDeployment { + t.Errorf("FromObject(Deployment).Kind() = %v, want %v", w.Kind(), KindDeployment) + } +} + +func TestRegistry_FromObject_DaemonSet(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(ds) + if err != nil { + t.Fatalf("FromObject(DaemonSet) error = %v", err) + } + if w.Kind() != KindDaemonSet { + t.Errorf("FromObject(DaemonSet).Kind() = %v, want %v", w.Kind(), KindDaemonSet) + } +} + +func TestRegistry_FromObject_StatefulSet(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(sts) + if err != nil { + t.Fatalf("FromObject(StatefulSet) error = %v", err) + } + if w.Kind() != KindStatefulSet { + t.Errorf("FromObject(StatefulSet).Kind() = %v, want %v", w.Kind(), KindStatefulSet) + } +} + +func TestRegistry_FromObject_Job(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(job) + if err != nil { + t.Fatalf("FromObject(Job) error = %v", err) + } + if w.Kind() != KindJob { + t.Errorf("FromObject(Job).Kind() = %v, want %v", w.Kind(), KindJob) + } +} + +func TestRegistry_FromObject_CronJob(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(cj) + if err != nil { + t.Fatalf("FromObject(CronJob) error = %v", err) + } + if w.Kind() != KindCronJob { + t.Errorf("FromObject(CronJob).Kind() = %v, want %v", w.Kind(), KindCronJob) + } +} + +func TestRegistry_FromObject_Rollout_Enabled(t *testing.T) { + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(rollout) + if err != nil { + t.Fatalf("FromObject(Rollout) error = %v", err) + } + if w.Kind() != KindArgoRollout { + t.Errorf("FromObject(Rollout).Kind() = %v, want %v", w.Kind(), KindArgoRollout) + } +} + +func TestRegistry_FromObject_Rollout_Disabled(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(rollout) + if err == nil { + t.Error("FromObject(Rollout) should return error when Argo Rollouts disabled") + } +} + +func TestRegistry_FromObject_UnsupportedType(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(cm) + if err == nil { + t.Error("FromObject(ConfigMap) should return error for unsupported type") + } +} + +func TestKindFromString(t *testing.T) { + tests := []struct { + input string + want Kind + wantErr bool + }{ + // Lowercase + {"deployment", KindDeployment, false}, + {"daemonset", KindDaemonSet, false}, + {"statefulset", KindStatefulSet, false}, + {"job", KindJob, false}, + {"cronjob", KindCronJob, false}, + {"rollout", KindArgoRollout, false}, + // Plural forms + {"deployments", KindDeployment, false}, + {"daemonsets", KindDaemonSet, false}, + {"statefulsets", KindStatefulSet, false}, + {"jobs", KindJob, false}, + {"cronjobs", KindCronJob, false}, + {"rollouts", KindArgoRollout, false}, + // Mixed case + {"Deployment", KindDeployment, false}, + {"DAEMONSET", KindDaemonSet, false}, + {"StatefulSet", KindStatefulSet, false}, + // Unknown + {"unknown", "", true}, + {"replicaset", "", true}, + {"", "", true}, + } + + for _, tt := range tests { + got, err := KindFromString(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("KindFromString(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + continue + } + if got != tt.want { + t.Errorf("KindFromString(%q) = %v, want %v", tt.input, got, tt.want) + } + } +} + +func TestNewLister(t *testing.T) { + r := NewRegistry(RegistryOptions{}) + l := NewLister(nil, r, nil) + + if l == nil { + t.Fatal("NewLister should not return nil") + } + if l.Registry != r { + t.Error("NewLister should set Registry") + } +} + +// DeploymentConfig registry tests +func TestNewRegistry_WithDeploymentConfig(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: true}) + + kinds := r.SupportedKinds() + if len(kinds) != 6 { + t.Errorf("SupportedKinds() = %d kinds, want 6", len(kinds)) + } + + found := false + for _, k := range kinds { + if k == KindDeploymentConfig { + found = true + break + } + } + if !found { + t.Error("SupportedKinds() should include DeploymentConfig when enabled") + } + + if r.ListerFor(KindDeploymentConfig) == nil { + t.Error("ListerFor(KindDeploymentConfig) should return a function when enabled") + } +} + +func TestNewRegistry_WithoutDeploymentConfig(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: false}) + + for _, k := range r.SupportedKinds() { + if k == KindDeploymentConfig { + t.Error("SupportedKinds() should not include DeploymentConfig when disabled") + } + } + + if r.ListerFor(KindDeploymentConfig) != nil { + t.Error("ListerFor(KindDeploymentConfig) should return nil when disabled") + } +} + +func TestNewRegistry_WithBothOptionalWorkloads(t *testing.T) { + r := NewRegistry(RegistryOptions{ + ArgoRolloutsEnabled: true, + DeploymentConfigEnabled: true, + }) + + kinds := r.SupportedKinds() + if len(kinds) != 7 { + t.Errorf("SupportedKinds() = %d kinds, want 7 (5 base + ArgoRollout + DeploymentConfig)", len(kinds)) + } + + foundRollout := false + foundDC := false + for _, k := range kinds { + if k == KindArgoRollout { + foundRollout = true + } + if k == KindDeploymentConfig { + foundDC = true + } + } + if !foundRollout { + t.Error("SupportedKinds() should include ArgoRollout") + } + if !foundDC { + t.Error("SupportedKinds() should include DeploymentConfig") + } +} + +func TestRegistry_FromObject_DeploymentConfig_Enabled(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: true}) + dc := &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(dc) + if err != nil { + t.Fatalf("FromObject(DeploymentConfig) error = %v", err) + } + if w.Kind() != KindDeploymentConfig { + t.Errorf("FromObject(DeploymentConfig).Kind() = %v, want %v", w.Kind(), KindDeploymentConfig) + } +} + +func TestRegistry_FromObject_DeploymentConfig_Disabled(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: false}) + dc := &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(dc) + if err == nil { + t.Error("FromObject(DeploymentConfig) should return error when DeploymentConfig disabled") + } +} + +func TestKindFromString_DeploymentConfig(t *testing.T) { + tests := []struct { + input string + want Kind + wantErr bool + }{ + {"deploymentconfig", KindDeploymentConfig, false}, + {"deploymentconfigs", KindDeploymentConfig, false}, + {"DeploymentConfig", KindDeploymentConfig, false}, + {"DEPLOYMENTCONFIG", KindDeploymentConfig, false}, + } + + for _, tt := range tests { + got, err := KindFromString(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("KindFromString(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + continue + } + if got != tt.want { + t.Errorf("KindFromString(%q) = %v, want %v", tt.input, got, tt.want) + } + } +} diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go new file mode 100644 index 000000000..ad8d8989f --- /dev/null +++ b/internal/pkg/workload/rollout.go @@ -0,0 +1,126 @@ +package workload + +import ( + "context" + "fmt" + "time" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// RolloutStrategy defines how Argo Rollouts are updated. +type RolloutStrategy string + +const ( + // RolloutStrategyRollout performs a standard rollout update. + RolloutStrategyRollout RolloutStrategy = "rollout" + + // RolloutStrategyRestart sets the restartAt field to trigger a restart. + RolloutStrategyRestart RolloutStrategy = "restart" +) + +// rolloutAccessor implements PodTemplateAccessor for Rollout. +type rolloutAccessor struct { + rollout *argorolloutv1alpha1.Rollout +} + +func (a *rolloutAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.rollout.Spec.Template +} + +func (a *rolloutAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.rollout.ObjectMeta +} + +// RolloutWorkload wraps an Argo Rollout. +type RolloutWorkload struct { + *BaseWorkload[*argorolloutv1alpha1.Rollout] + strategyAnnotation string +} + +// NewRolloutWorkload creates a new RolloutWorkload. +// The strategyAnnotation parameter specifies the annotation key used to determine +// the rollout strategy (from config.Annotations.RolloutStrategy). +func NewRolloutWorkload(r *argorolloutv1alpha1.Rollout, strategyAnnotation string) *RolloutWorkload { + original := r.DeepCopy() + accessor := &rolloutAccessor{rollout: r} + return &RolloutWorkload{ + BaseWorkload: NewBaseWorkload(r, original, accessor, KindArgoRollout), + strategyAnnotation: strategyAnnotation, + } +} + +// Ensure RolloutWorkload implements Workload. +var _ Workload = (*RolloutWorkload)(nil) + +// Update updates the Rollout. It uses the rollout strategy annotation to determine +// whether to do a standard rollout or set the restartAt field. +func (w *RolloutWorkload) Update(ctx context.Context, c client.Client) error { + strategy := w.getStrategy() + switch strategy { + case RolloutStrategyRestart: + // Set restartAt field to trigger a restart + restartAt := metav1.NewTime(time.Now()) + w.Object().Spec.RestartAt = &restartAt + } + return c.Patch(ctx, w.Object(), client.MergeFrom(w.Original()), client.FieldOwner(FieldManager)) +} + +// getStrategy returns the rollout strategy from the annotation. +func (w *RolloutWorkload) getStrategy() RolloutStrategy { + annotations := w.Object().GetAnnotations() + if annotations == nil { + return RolloutStrategyRollout + } + strategy := annotations[w.strategyAnnotation] + switch RolloutStrategy(strategy) { + case RolloutStrategyRestart: + return RolloutStrategyRestart + default: + return RolloutStrategyRollout + } +} + +func (w *RolloutWorkload) DeepCopy() Workload { + return NewRolloutWorkload(w.Object().DeepCopy(), w.strategyAnnotation) +} + +// GetRollout returns the underlying Rollout for special handling. +func (w *RolloutWorkload) GetRollout() *argorolloutv1alpha1.Rollout { + return w.Object() +} + +// GetStrategy returns the configured rollout strategy. +func (w *RolloutWorkload) GetStrategy() RolloutStrategy { + return w.getStrategy() +} + +// String returns a string representation of the strategy. +func (s RolloutStrategy) String() string { + return string(s) +} + +// ToRolloutStrategy converts a string to RolloutStrategy. +func ToRolloutStrategy(s string) RolloutStrategy { + switch RolloutStrategy(s) { + case RolloutStrategyRestart: + return RolloutStrategyRestart + case RolloutStrategyRollout: + return RolloutStrategyRollout + default: + return RolloutStrategyRollout + } +} + +// Validate checks if the rollout strategy is valid. +func (s RolloutStrategy) Validate() error { + switch s { + case RolloutStrategyRollout, RolloutStrategyRestart: + return nil + default: + return fmt.Errorf("invalid rollout strategy: %s", s) + } +} diff --git a/internal/pkg/workload/statefulset.go b/internal/pkg/workload/statefulset.go new file mode 100644 index 000000000..8e9d1e48c --- /dev/null +++ b/internal/pkg/workload/statefulset.go @@ -0,0 +1,46 @@ +package workload + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// statefulSetAccessor implements PodTemplateAccessor for StatefulSet. +type statefulSetAccessor struct { + statefulset *appsv1.StatefulSet +} + +func (a *statefulSetAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.statefulset.Spec.Template +} + +func (a *statefulSetAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.statefulset.ObjectMeta +} + +// StatefulSetWorkload wraps a Kubernetes StatefulSet. +type StatefulSetWorkload struct { + *BaseWorkload[*appsv1.StatefulSet] +} + +// NewStatefulSetWorkload creates a new StatefulSetWorkload. +func NewStatefulSetWorkload(s *appsv1.StatefulSet) *StatefulSetWorkload { + original := s.DeepCopy() + accessor := &statefulSetAccessor{statefulset: s} + return &StatefulSetWorkload{ + BaseWorkload: NewBaseWorkload(s, original, accessor, KindStatefulSet), + } +} + +// Ensure StatefulSetWorkload implements Workload. +var _ Workload = (*StatefulSetWorkload)(nil) + +func (w *StatefulSetWorkload) DeepCopy() Workload { + return NewStatefulSetWorkload(w.Object().DeepCopy()) +} + +// GetStatefulSet returns the underlying StatefulSet for special handling. +func (w *StatefulSetWorkload) GetStatefulSet() *appsv1.StatefulSet { + return w.Object() +} diff --git a/internal/pkg/workload/uses.go b/internal/pkg/workload/uses.go new file mode 100644 index 000000000..fd37a2f3c --- /dev/null +++ b/internal/pkg/workload/uses.go @@ -0,0 +1,77 @@ +package workload + +import corev1 "k8s.io/api/core/v1" + +// SpecUsesConfigMap checks if a PodSpec references the named ConfigMap. +func SpecUsesConfigMap(spec *corev1.PodSpec, name string) bool { + for _, vol := range spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + if containersUseConfigMap(spec.Containers, name) { + return true + } + return containersUseConfigMap(spec.InitContainers, name) +} + +func containersUseConfigMap(containers []corev1.Container, name string) bool { + for _, container := range containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + return false +} + +// SpecUsesSecret checks if a PodSpec references the named Secret. +func SpecUsesSecret(spec *corev1.PodSpec, name string) bool { + for _, vol := range spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + if containersUseSecret(spec.Containers, name) { + return true + } + return containersUseSecret(spec.InitContainers, name) +} + +func containersUseSecret(containers []corev1.Container, name string) bool { + for _, container := range containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + return false +} diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go new file mode 100644 index 000000000..084eb1e57 --- /dev/null +++ b/internal/pkg/workload/workload_test.go @@ -0,0 +1,1768 @@ +package workload + +import ( + "testing" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/testutil" +) + +// testRolloutStrategyAnnotation is the annotation key used in tests for rollout strategy. +const testRolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" + +// addEnvVar adds an environment variable with a ConfigMapKeyRef or SecretKeyRef to a container. +func addEnvVarConfigMapRef(containers []corev1.Container, envName, configMapName, key string) { + if len(containers) > 0 { + containers[0].Env = append(containers[0].Env, corev1.EnvVar{ + Name: envName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + Key: key, + }, + }, + }) + } +} + +func addEnvVarSecretRef(containers []corev1.Container, envName, secretName, key string) { + if len(containers) > 0 { + containers[0].Env = append(containers[0].Env, corev1.EnvVar{ + Name: envName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }) + } +} + +func TestDeploymentWorkload_BasicGetters(t *testing.T) { + deploy := testutil.NewDeployment("test-deploy", "test-ns", map[string]string{"key": "value"}) + + w := NewDeploymentWorkload(deploy) + + if w.Kind() != KindDeployment { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDeployment) + } + if w.GetName() != "test-deploy" { + t.Errorf("GetName() = %v, want test-deploy", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != deploy { + t.Error("GetObject() should return the underlying deployment") + } +} + +func TestDeploymentWorkload_PodTemplateAnnotations(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations["existing"] = "annotation" + + w := NewDeploymentWorkload(deploy) + + // Test get + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + // Test set + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDeploymentWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations = nil + + w := NewDeploymentWorkload(deploy) + + // Should initialize nil map + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + // Should work with nil initial map + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDeploymentWorkload_Containers(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} + + w := NewDeploymentWorkload(deploy) + + // Test get containers + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + // Test get init containers + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + // Test set containers + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + // Test set init containers + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDeploymentWorkload_Volumes(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + } + + w := NewDeploymentWorkload(deploy) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestDeploymentWorkload_UsesConfigMap_Volume(t *testing.T) { + deploy := testutil.NewDeploymentWithVolume("test", "default", "my-config", "") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("my-config") { + t.Error("UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDeploymentWorkload_UsesConfigMap_ProjectedVolume(t *testing.T) { + deploy := testutil.NewDeploymentWithProjectedVolume("test", "default", "projected-config", "") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("projected-config") { + t.Error("UsesConfigMap should return true for projected ConfigMap volume") + } +} + +func TestDeploymentWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + deploy := testutil.NewDeploymentWithEnvFrom("test", "default", "env-config", "") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("env-config") { + t.Error("UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestDeploymentWorkload_UsesConfigMap_EnvVar(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + addEnvVarConfigMapRef(deploy.Spec.Template.Spec.Containers, "CONFIG_VALUE", "var-config", "some-key") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("var-config") { + t.Error("UsesConfigMap should return true for env var ConfigMapKeyRef") + } +} + +func TestDeploymentWorkload_UsesConfigMap_InitContainer(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "init-config"}, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("init-config") { + t.Error("UsesConfigMap should return true for init container ConfigMap") + } +} + +func TestDeploymentWorkload_UsesSecret_Volume(t *testing.T) { + deploy := testutil.NewDeploymentWithVolume("test", "default", "", "my-secret") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("my-secret") { + t.Error("UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDeploymentWorkload_UsesSecret_ProjectedVolume(t *testing.T) { + deploy := testutil.NewDeploymentWithProjectedVolume("test", "default", "", "projected-secret") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("projected-secret") { + t.Error("UsesSecret should return true for projected Secret volume") + } +} + +func TestDeploymentWorkload_UsesSecret_EnvFrom(t *testing.T) { + deploy := testutil.NewDeploymentWithEnvFrom("test", "default", "", "env-secret") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("env-secret") { + t.Error("UsesSecret should return true for envFrom Secret") + } +} + +func TestDeploymentWorkload_UsesSecret_EnvVar(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + addEnvVarSecretRef(deploy.Spec.Template.Spec.Containers, "SECRET_VALUE", "var-secret", "some-key") + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("var-secret") { + t.Error("UsesSecret should return true for env var SecretKeyRef") + } +} + +func TestDeploymentWorkload_UsesSecret_InitContainer(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "init-secret"}, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("init-secret") { + t.Error("UsesSecret should return true for init container Secret") + } +} + +func TestDeploymentWorkload_GetEnvFromSources(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}}, + }, + { + Name: "sidecar", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, + }, + } + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "init-cm"}}}}, + }, + } + + w := NewDeploymentWorkload(deploy) + + sources := w.GetEnvFromSources() + if len(sources) != 3 { + t.Errorf("GetEnvFromSources() returned %d sources, want 3", len(sources)) + } +} + +func TestDeploymentWorkload_DeepCopy(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + + w := NewDeploymentWorkload(deploy) + copy := w.DeepCopy() + + // Modify original + w.SetPodTemplateAnnotation("modified", "true") + + // Copy should not be affected + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestDeploymentWorkload_GetOwnerReferences(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "ReplicaSet", Name: "test-rs"}, + } + + w := NewDeploymentWorkload(deploy) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-rs" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-rs", refs) + } +} + +// DaemonSet tests +func TestDaemonSetWorkload_BasicGetters(t *testing.T) { + ds := testutil.NewDaemonSet("test-ds", "test-ns", map[string]string{"key": "value"}) + + w := NewDaemonSetWorkload(ds) + + if w.Kind() != KindDaemonSet { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDaemonSet) + } + if w.GetName() != "test-ds" { + t.Errorf("GetName() = %v, want test-ds", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != ds { + t.Error("GetObject() should return the underlying daemonset") + } +} + +func TestDaemonSetWorkload_PodTemplateAnnotations(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Annotations["existing"] = "annotation" + + w := NewDaemonSetWorkload(ds) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDaemonSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Annotations = nil + + w := NewDaemonSetWorkload(ds) + + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDaemonSetWorkload_Containers(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} + + w := NewDaemonSetWorkload(ds) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDaemonSetWorkload_Volumes(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + } + + w := NewDaemonSetWorkload(ds) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "ds-config"}, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesConfigMap("ds-config") { + t.Error("DaemonSet UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDaemonSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "ds-env-config"}}}, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesConfigMap("ds-env-config") { + t.Error("DaemonSet UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestDaemonSetWorkload_UsesSecret(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: "ds-secret"}, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesSecret("ds-secret") { + t.Error("DaemonSet UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDaemonSetWorkload_GetEnvFromSources(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + } + ds.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, + }, + } + + w := NewDaemonSetWorkload(ds) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestDaemonSetWorkload_DeepCopy(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + + w := NewDaemonSetWorkload(ds) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestDaemonSetWorkload_GetOwnerReferences(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + ds.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "DaemonSet", Name: "test-owner"}, + } + + w := NewDaemonSetWorkload(ds) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) + } +} + +// StatefulSet tests +func TestStatefulSetWorkload_BasicGetters(t *testing.T) { + sts := testutil.NewStatefulSet("test-sts", "test-ns", map[string]string{"key": "value"}) + + w := NewStatefulSetWorkload(sts) + + if w.Kind() != KindStatefulSet { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindStatefulSet) + } + if w.GetName() != "test-sts" { + t.Errorf("GetName() = %v, want test-sts", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != sts { + t.Error("GetObject() should return the underlying statefulset") + } +} + +func TestStatefulSetWorkload_PodTemplateAnnotations(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Annotations["existing"] = "annotation" + + w := NewStatefulSetWorkload(sts) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestStatefulSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Annotations = nil + + w := NewStatefulSetWorkload(sts) + + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestStatefulSetWorkload_Containers(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} + + w := NewStatefulSetWorkload(sts) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestStatefulSetWorkload_Volumes(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + } + + w := NewStatefulSetWorkload(sts) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestStatefulSetWorkload_UsesConfigMap(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "sts-config"}, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesConfigMap("sts-config") { + t.Error("StatefulSet UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestStatefulSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "sts-env-config"}}}, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesConfigMap("sts-env-config") { + t.Error("StatefulSet UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestStatefulSetWorkload_UsesSecret(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: "sts-secret"}, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesSecret("sts-secret") { + t.Error("StatefulSet UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestStatefulSetWorkload_UsesSecret_EnvFrom(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "sts-env-secret"}}}, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesSecret("sts-env-secret") { + t.Error("StatefulSet UsesSecret should return true for envFrom Secret") + } +} + +func TestStatefulSetWorkload_GetEnvFromSources(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + } + sts.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, + }, + } + + w := NewStatefulSetWorkload(sts) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestStatefulSetWorkload_DeepCopy(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + + w := NewStatefulSetWorkload(sts) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestStatefulSetWorkload_GetOwnerReferences(t *testing.T) { + sts := testutil.NewStatefulSet("test", "default", nil) + sts.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "StatefulSet", Name: "test-owner"}, + } + + w := NewStatefulSetWorkload(sts) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) + } +} + +// Test that workloads implement the interface +func TestWorkloadInterface(t *testing.T) { + var _ Workload = (*DeploymentWorkload)(nil) + var _ Workload = (*DaemonSetWorkload)(nil) + var _ Workload = (*StatefulSetWorkload)(nil) + var _ Workload = (*RolloutWorkload)(nil) +} + +// RolloutWorkload tests +func TestRolloutWorkload_BasicGetters(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rollout", + Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if w.Kind() != KindArgoRollout { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindArgoRollout) + } + if w.GetName() != "test-rollout" { + t.Errorf("GetName() = %v, want test-rollout", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != rollout { + t.Error("GetObject() should return the underlying rollout") + } +} + +func TestRolloutWorkload_PodTemplateAnnotations(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + // Test get + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + // Test set + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestRolloutWorkload_GetStrategy_Default(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if w.GetStrategy() != RolloutStrategyRollout { + t.Errorf("GetStrategy() = %v, want %v (default)", w.GetStrategy(), RolloutStrategyRollout) + } +} + +func TestRolloutWorkload_GetStrategy_Restart(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + testRolloutStrategyAnnotation: "restart", + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if w.GetStrategy() != RolloutStrategyRestart { + t.Errorf("GetStrategy() = %v, want %v", w.GetStrategy(), RolloutStrategyRestart) + } +} + +func TestRolloutWorkload_UsesConfigMap_Volume(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "rollout-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if !w.UsesConfigMap("rollout-config") { + t.Error("Rollout UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("Rollout UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestRolloutWorkload_UsesSecret_EnvFrom(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "rollout-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if !w.UsesSecret("rollout-secret") { + t.Error("Rollout UsesSecret should return true for Secret envFrom") + } + if w.UsesSecret("other-secret") { + t.Error("Rollout UsesSecret should return false for non-existent Secret") + } +} + +func TestRolloutWorkload_DeepCopy(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "original": "value", + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + copy := w.DeepCopy() + + // Verify copy is independent + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.(*RolloutWorkload).GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestRolloutStrategy_Validate(t *testing.T) { + tests := []struct { + strategy RolloutStrategy + wantErr bool + }{ + {RolloutStrategyRollout, false}, + {RolloutStrategyRestart, false}, + {RolloutStrategy("invalid"), true}, + {RolloutStrategy(""), true}, + } + + for _, tt := range tests { + err := tt.strategy.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Validate(%s) error = %v, wantErr %v", tt.strategy, err, tt.wantErr) + } + } +} + +func TestToRolloutStrategy(t *testing.T) { + tests := []struct { + input string + expected RolloutStrategy + }{ + {"rollout", RolloutStrategyRollout}, + {"restart", RolloutStrategyRestart}, + {"invalid", RolloutStrategyRollout}, // defaults to rollout + {"", RolloutStrategyRollout}, // defaults to rollout + } + + for _, tt := range tests { + result := ToRolloutStrategy(tt.input) + if result != tt.expected { + t.Errorf("ToRolloutStrategy(%s) = %v, want %v", tt.input, result, tt.expected) + } + } +} + +// Job tests +func TestJobWorkload_BasicGetters(t *testing.T) { + job := testutil.NewJobWithAnnotations("test-job", "test-ns", map[string]string{"key": "value"}) + + w := NewJobWorkload(job) + + if w.Kind() != KindJob { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindJob) + } + if w.GetName() != "test-job" { + t.Errorf("GetName() = %v, want test-job", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != job { + t.Error("GetObject() should return the underlying job") + } +} + +func TestJobWorkload_PodTemplateAnnotations(t *testing.T) { + job := testutil.NewJob("test", "default") + job.Spec.Template.Annotations["existing"] = "annotation" + + w := NewJobWorkload(job) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestJobWorkload_UsesConfigMap(t *testing.T) { + job := testutil.NewJob("test", "default") + job.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "job-config"}, + }, + }, + }, + } + + w := NewJobWorkload(job) + + if !w.UsesConfigMap("job-config") { + t.Error("Job UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("Job UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestJobWorkload_UsesSecret(t *testing.T) { + job := testutil.NewJob("test", "default") + job.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "job-secret"}, + }, + }, + } + + w := NewJobWorkload(job) + + if !w.UsesSecret("job-secret") { + t.Error("Job UsesSecret should return true for Secret envFrom") + } +} + +func TestJobWorkload_DeepCopy(t *testing.T) { + job := testutil.NewJob("test", "default") + job.Spec.Template.Annotations["original"] = "value" + + w := NewJobWorkload(job) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +// CronJob tests +func TestCronJobWorkload_BasicGetters(t *testing.T) { + cj := testutil.NewCronJobWithAnnotations("test-cronjob", "test-ns", map[string]string{"key": "value"}) + + w := NewCronJobWorkload(cj) + + if w.Kind() != KindCronJob { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindCronJob) + } + if w.GetName() != "test-cronjob" { + t.Errorf("GetName() = %v, want test-cronjob", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != cj { + t.Error("GetObject() should return the underlying cronjob") + } +} + +func TestCronJobWorkload_PodTemplateAnnotations(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Annotations["existing"] = "annotation" + + w := NewCronJobWorkload(cj) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestCronJobWorkload_UsesConfigMap(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "cronjob-config"}, + }, + }, + }, + } + + w := NewCronJobWorkload(cj) + + if !w.UsesConfigMap("cronjob-config") { + t.Error("CronJob UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("CronJob UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestCronJobWorkload_UsesSecret(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + addEnvVarSecretRef(cj.Spec.JobTemplate.Spec.Template.Spec.Containers, "SECRET_VALUE", "cronjob-secret", "key") + + w := NewCronJobWorkload(cj) + + if !w.UsesSecret("cronjob-secret") { + t.Error("CronJob UsesSecret should return true for Secret envVar") + } +} + +func TestCronJobWorkload_DeepCopy(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Annotations["original"] = "value" + + w := NewCronJobWorkload(cj) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +// Test that Job and CronJob implement the interface +func TestJobCronJobWorkloadInterface(t *testing.T) { + var _ Workload = (*JobWorkload)(nil) + var _ Workload = (*CronJobWorkload)(nil) +} + +// DeploymentConfig tests +func TestDeploymentConfigWorkload_BasicGetters(t *testing.T) { + dc := testutil.NewDeploymentConfig("test-dc", "test-ns", map[string]string{"key": "value"}) + + w := NewDeploymentConfigWorkload(dc) + + if w.Kind() != KindDeploymentConfig { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDeploymentConfig) + } + if w.GetName() != "test-dc" { + t.Errorf("GetName() = %v, want test-dc", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != dc { + t.Error("GetObject() should return the underlying deploymentconfig") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = map[string]string{"existing": "annotation"} + + w := NewDeploymentConfigWorkload(dc) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + // Should handle nil template gracefully + annotations := w.GetPodTemplateAnnotations() + if annotations != nil { + t.Error("GetPodTemplateAnnotations should return nil for nil template") + } + + // SetPodTemplateAnnotation should initialize template + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil template") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = nil + + w := NewDeploymentConfigWorkload(dc) + + // Should initialize nil map + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDeploymentConfigWorkload_Containers(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "main", Image: "nginx"}, + } + dc.Spec.Template.Spec.InitContainers = []corev1.Container{ + {Name: "init", Image: "busybox"}, + } + + w := NewDeploymentConfigWorkload(dc) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDeploymentConfigWorkload_Containers_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetContainers() != nil { + t.Error("GetContainers should return nil for nil template") + } + if w.GetInitContainers() != nil { + t.Error("GetInitContainers should return nil for nil template") + } + + // SetContainers should initialize template + w.SetContainers([]corev1.Container{{Name: "main"}}) + if len(w.GetContainers()) != 1 { + t.Error("SetContainers should work with nil template") + } +} + +func TestDeploymentConfigWorkload_Volumes(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + } + + w := NewDeploymentConfigWorkload(dc) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestDeploymentConfigWorkload_Volumes_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetVolumes() != nil { + t.Error("GetVolumes should return nil for nil template") + } +} + +func TestDeploymentConfigWorkload_UsesConfigMap_Volume(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-config", + }, + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesConfigMap("dc-config") { + t.Error("DeploymentConfig UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDeploymentConfigWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-env-config", + }, + }, + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesConfigMap("dc-env-config") { + t.Error("DeploymentConfig UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestDeploymentConfigWorkload_UsesConfigMap_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.UsesConfigMap("any-config") { + t.Error("UsesConfigMap should return false for nil template") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_Volume(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "dc-secret", + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesSecret("dc-secret") { + t.Error("DeploymentConfig UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_EnvFrom(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-env-secret", + }, + }, + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesSecret("dc-env-secret") { + t.Error("DeploymentConfig UsesSecret should return true for envFrom Secret") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.UsesSecret("any-secret") { + t.Error("UsesSecret should return false for nil template") + } +} + +func TestDeploymentConfigWorkload_GetEnvFromSources(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + }, + }, + } + dc.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestDeploymentConfigWorkload_GetEnvFromSources_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetEnvFromSources() != nil { + t.Error("GetEnvFromSources should return nil for nil template") + } +} + +func TestDeploymentConfigWorkload_DeepCopy(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = map[string]string{"original": "value"} + + w := NewDeploymentConfigWorkload(dc) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestDeploymentConfigWorkload_GetOwnerReferences(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "apps.openshift.io/v1", + Kind: "DeploymentConfig", + Name: "test-owner", + }, + } + + w := NewDeploymentConfigWorkload(dc) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) + } +} + +func TestDeploymentConfigWorkload_GetDeploymentConfig(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + + w := NewDeploymentConfigWorkload(dc) + + if w.GetDeploymentConfig() != dc { + t.Error("GetDeploymentConfig should return the underlying DeploymentConfig") + } +} + +// Test that DeploymentConfig implements the interface +func TestDeploymentConfigWorkloadInterface(t *testing.T) { + var _ Workload = (*DeploymentConfigWorkload)(nil) +} + +// Tests for UpdateStrategy +func TestWorkload_UpdateStrategy(t *testing.T) { + tests := []struct { + name string + workload Workload + expected UpdateStrategy + }{ + { + name: "Deployment uses Patch strategy", + workload: NewDeploymentWorkload(testutil.NewDeployment("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "DaemonSet uses Patch strategy", + workload: NewDaemonSetWorkload(testutil.NewDaemonSet("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "StatefulSet uses Patch strategy", + workload: NewStatefulSetWorkload(testutil.NewStatefulSet("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "Job uses Recreate strategy", + workload: NewJobWorkload(testutil.NewJob("test", "default")), + expected: UpdateStrategyRecreate, + }, + { + name: "CronJob uses CreateNew strategy", + workload: NewCronJobWorkload(testutil.NewCronJob("test", "default")), + expected: UpdateStrategyCreateNew, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.workload.UpdateStrategy(); got != tt.expected { + t.Errorf("UpdateStrategy() = %v, want %v", got, tt.expected) + } + }) + } +} + +// Tests for ResetOriginal +func TestDeploymentWorkload_ResetOriginal(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + // Modify the workload + w.SetPodTemplateAnnotation("modified", "true") + + // Original should still not have the annotation + originalAnnotations := w.Original().Spec.Template.Annotations + if originalAnnotations != nil && originalAnnotations["modified"] == "true" { + t.Error("Original should not be modified yet") + } + + // Reset original + w.ResetOriginal() + + // Now original should have the annotation + if w.Original().Spec.Template.Annotations["modified"] != "true" { + t.Error("ResetOriginal should update original to match current state") + } +} + +func TestJobWorkload_ResetOriginal(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + // ResetOriginal should be a no-op for Jobs (they don't use strategic merge patch) + w.SetPodTemplateAnnotation("modified", "true") + w.ResetOriginal() // Should not panic or error +} + +func TestCronJobWorkload_ResetOriginal(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + // ResetOriginal should be a no-op for CronJobs + w.SetPodTemplateAnnotation("modified", "true") + w.ResetOriginal() // Should not panic or error +} + +// Tests for BaseWorkload.Original() +func TestDeploymentWorkload_Original(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations = map[string]string{"initial": "value"} + + w := NewDeploymentWorkload(deploy) + + // Modify the current object + w.SetPodTemplateAnnotation("new", "annotation") + + // Original should still have only the initial annotation + original := w.Original() + if original.Spec.Template.Annotations["new"] == "annotation" { + t.Error("Original should not reflect changes to current object") + } + if original.Spec.Template.Annotations["initial"] != "value" { + t.Error("Original should retain initial state") + } +} + +// Tests for PerformSpecialUpdate returning false for standard workloads +func TestDeploymentWorkload_PerformSpecialUpdate(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for Deployment") + } +} + +func TestDaemonSetWorkload_PerformSpecialUpdate(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + w := NewDaemonSetWorkload(ds) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for DaemonSet") + } +} + +func TestStatefulSetWorkload_PerformSpecialUpdate(t *testing.T) { + ss := testutil.NewStatefulSet("test", "default", nil) + w := NewStatefulSetWorkload(ss) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for StatefulSet") + } +} + +// Test Update returns nil for Job (no-op, uses PerformSpecialUpdate instead) +func TestJobWorkload_Update(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + err := w.Update(t.Context(), nil) + if err != nil { + t.Errorf("Update() should return nil for Job, got %v", err) + } +} + +// Test Update returns nil for CronJob (no-op, uses PerformSpecialUpdate instead) +func TestCronJobWorkload_Update(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + err := w.Update(t.Context(), nil) + if err != nil { + t.Errorf("Update() should return nil for CronJob, got %v", err) + } +} + +// Test GetJob and GetCronJob accessors +func TestJobWorkload_GetJob(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + if w.GetJob() != job { + t.Error("GetJob should return the underlying Job") + } +} + +func TestCronJobWorkload_GetCronJob(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + if w.GetCronJob() != cj { + t.Error("GetCronJob should return the underlying CronJob") + } +} + +func TestDeploymentWorkload_GetDeployment(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + if w.GetDeployment() != deploy { + t.Error("GetDeployment should return the underlying Deployment") + } +} + +func TestDaemonSetWorkload_GetDaemonSet(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + w := NewDaemonSetWorkload(ds) + + if w.GetDaemonSet() != ds { + t.Error("GetDaemonSet should return the underlying DaemonSet") + } +} + +func TestStatefulSetWorkload_GetStatefulSet(t *testing.T) { + ss := testutil.NewStatefulSet("test", "default", nil) + w := NewStatefulSetWorkload(ss) + + if w.GetStatefulSet() != ss { + t.Error("GetStatefulSet should return the underlying StatefulSet") + } +} + +func TestRolloutWorkload_GetRollout(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if w.GetRollout() != rollout { + t.Error("GetRollout should return the underlying Rollout") + } +} diff --git a/main.go b/main.go deleted file mode 100644 index 1c429710c..000000000 --- a/main.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "os" - - "github.com/stakater/Reloader/internal/pkg/app" -) - -func main() { - if err := app.Run(); err != nil { - os.Exit(1) - } - os.Exit(0) -} diff --git a/pkg/common/common.go b/pkg/common/common.go deleted file mode 100644 index 84d982748..000000000 --- a/pkg/common/common.go +++ /dev/null @@ -1,358 +0,0 @@ -package common - -import ( - "context" - "os" - "regexp" - "strconv" - "strings" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" -) - -type Map map[string]string - -type ReloadCheckResult struct { - ShouldReload bool - AutoReload bool -} - -// ReloaderOptions contains all configurable options for the Reloader controller. -// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets. -type ReloaderOptions struct { - // AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated - AutoReloadAll bool `json:"autoReloadAll"` - // ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name - ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"` - // SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name - SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"` - // ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets - ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"` - // IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched - IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"` - // ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only - ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"` - // SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only - SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"` - // ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching - ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"` - // SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching - SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"` - // AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation - AutoSearchAnnotation string `json:"autoSearchAnnotation"` - // SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation - SearchMatchAnnotation string `json:"searchMatchAnnotation"` - // RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads - RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"` - // PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after - PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"` - // PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader - PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"` - - // LogFormat specifies the log format to use (json, or empty string for default text format) - LogFormat string `json:"logFormat"` - // LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic) - LogLevel string `json:"logLevel"` - // IsArgoRollouts indicates whether support for Argo Rollouts is enabled - IsArgoRollouts bool `json:"isArgoRollouts"` - // ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations) - ReloadStrategy string `json:"reloadStrategy"` - // ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created - ReloadOnCreate bool `json:"reloadOnCreate"` - // ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted - ReloadOnDelete bool `json:"reloadOnDelete"` - // SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true) - SyncAfterRestart bool `json:"syncAfterRestart"` - // EnableHA indicates whether High Availability mode is enabled with leader election - EnableHA bool `json:"enableHA"` - // WebhookUrl is the URL to send webhook notifications to instead of performing reloads - WebhookUrl string `json:"webhookUrl"` - // ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets") - ResourcesToIgnore []string `json:"resourcesToIgnore"` - // WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs") - WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"` - // NamespaceSelectors is a list of label selectors to filter namespaces to watch - NamespaceSelectors []string `json:"namespaceSelectors"` - // ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch - ResourceSelectors []string `json:"resourceSelectors"` - // NamespacesToIgnore is a list of namespace names to ignore when watching for changes - NamespacesToIgnore []string `json:"namespacesToIgnore"` - // EnablePProf enables pprof for profiling - EnablePProf bool `json:"enablePProf"` - // PProfAddr is the address to start pprof server on - PProfAddr string `json:"pprofAddr"` -} - -var CommandLineOptions *ReloaderOptions - -func PublishMetaInfoConfigmap(clientset kubernetes.Interface) { - namespace := os.Getenv("RELOADER_NAMESPACE") - if namespace == "" { - logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") - return - } - - metaInfo := &MetaInfo{ - BuildInfo: *NewBuildInfo(), - ReloaderOptions: *GetCommandLineOptions(), - DeploymentInfo: metav1.ObjectMeta{ - Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"), - Namespace: namespace, - }, - } - - configMap := metaInfo.ToConfigMap() - - if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil { - logrus.Info("Meta info configmap already exists, updating it") - _, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{}) - if err != nil { - logrus.Warn("Failed to update existing meta info configmap: ", err) - } - return - } - - _, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) - if err != nil { - logrus.Warn("Failed to create meta info configmap: ", err) - } -} - -func GetNamespaceLabelSelector(slice []string) (string, error) { - for i, kv := range slice { - // Legacy support for ":" as a delimiter and "*" for wildcard. - if strings.Contains(kv, ":") { - split := strings.Split(kv, ":") - if split[1] == "*" { - slice[i] = split[0] - } else { - slice[i] = split[0] + "=" + split[1] - } - } - // Convert wildcard to valid apimachinery operator - if strings.Contains(kv, "=") { - split := strings.Split(kv, "=") - if split[1] == "*" { - slice[i] = split[0] - } - } - } - - namespaceLabelSelector := strings.Join(slice[:], ",") - _, err := labels.Parse(namespaceLabelSelector) - if err != nil { - logrus.Fatal(err) - } - - return namespaceLabelSelector, nil -} - -func GetResourceLabelSelector(slice []string) (string, error) { - for i, kv := range slice { - // Legacy support for ":" as a delimiter and "*" for wildcard. - if strings.Contains(kv, ":") { - split := strings.Split(kv, ":") - if split[1] == "*" { - slice[i] = split[0] - } else { - slice[i] = split[0] + "=" + split[1] - } - } - // Convert wildcard to valid apimachinery operator - if strings.Contains(kv, "=") { - split := strings.Split(kv, "=") - if split[1] == "*" { - slice[i] = split[0] - } - } - } - - resourceLabelSelector := strings.Join(slice[:], ",") - _, err := labels.Parse(resourceLabelSelector) - if err != nil { - logrus.Fatal(err) - } - - return resourceLabelSelector, nil -} - -// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options. -func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult { - - // Check if this workload type should be ignored - if len(options.WorkloadTypesToIgnore) > 0 { - ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList() - if err != nil { - logrus.Errorf("Failed to parse ignored workload types: %v", err) - } else { - // Map Kubernetes resource types to CLI-friendly names for comparison - var resourceToCheck string - switch resourceType { - case "Job": - resourceToCheck = "jobs" - case "CronJob": - resourceToCheck = "cronjobs" - default: - resourceToCheck = resourceType // For other types, use as-is - } - - // Check if current resource type should be ignored - if ignoredWorkloadTypes.Contains(resourceToCheck) { - return ReloadCheckResult{ - ShouldReload: false, - } - } - } - } - - ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation] - if ignoreResourceAnnotatonValue == "true" { - return ReloadCheckResult{ - ShouldReload: false, - } - } - - annotationValue, found := annotations[config.Annotation] - searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation] - excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation] - excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation] - - if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn { - annotations = podAnnotations - annotationValue = annotations[config.Annotation] - searchAnnotationValue = annotations[options.AutoSearchAnnotation] - reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation] - } - - isResourceExcluded := false - - switch config.Type { - case constants.ConfigmapEnvVarPostfix: - if foundExcludeConfigmap { - isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue) - } - case constants.SecretEnvVarPostfix: - if foundExcludeSecret { - isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue) - } - } - - if isResourceExcluded { - return ReloadCheckResult{ - ShouldReload: false, - } - } - - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: true, - } - } - - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.TrimSpace(value) - re := regexp.MustCompile("^" + value + "$") - if re.Match([]byte(config.ResourceName)) { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: false, - } - } - } - - if searchAnnotationValue == "true" { - matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation] - if matchAnnotationValue == "true" { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: true, - } - } - } - - return ReloadCheckResult{ - ShouldReload: false, - } -} - -func checkIfResourceIsExcluded(resourceName, excludedResources string) bool { - if excludedResources == "" { - return false - } - - excludedResourcesList := strings.Split(excludedResources, ",") - for _, excludedResource := range excludedResourcesList { - if strings.TrimSpace(excludedResource) == resourceName { - return true - } - } - - return false -} - -func init() { - GetCommandLineOptions() -} - -func GetCommandLineOptions() *ReloaderOptions { - if CommandLineOptions == nil { - CommandLineOptions = &ReloaderOptions{} - } - - CommandLineOptions.AutoReloadAll = options.AutoReloadAll - CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation - CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation - CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation - CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation - CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation - CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation - CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation - CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation - CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation - CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation - CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation - CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation - CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation - CommandLineOptions.LogFormat = options.LogFormat - CommandLineOptions.LogLevel = options.LogLevel - CommandLineOptions.ReloadStrategy = options.ReloadStrategy - CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart - CommandLineOptions.EnableHA = options.EnableHA - CommandLineOptions.WebhookUrl = options.WebhookUrl - CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore - CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore - CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors - CommandLineOptions.ResourceSelectors = options.ResourceSelectors - CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore - CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts) - CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate) - CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete) - CommandLineOptions.EnablePProf = options.EnablePProf - CommandLineOptions.PProfAddr = options.PProfAddr - - return CommandLineOptions -} - -func parseBool(value string) bool { - if value == "" { - return false - } - result, err := strconv.ParseBool(value) - if err != nil { - return false // Default to false if parsing fails - } - return result -} diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go deleted file mode 100644 index 532d3adfa..000000000 --- a/pkg/common/common_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stakater/Reloader/internal/pkg/options" -) - -func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - ignoredWorkloadTypes []string - resourceType string - shouldReload bool - description string - }{ - { - name: "Jobs ignored - Job should not reload", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "Job", - shouldReload: false, - description: "When jobs are ignored, Job resources should not be reloaded", - }, - { - name: "Jobs ignored - CronJob should reload", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "CronJob", - shouldReload: true, - description: "When jobs are ignored, CronJob resources should still be processed", - }, - { - name: "CronJobs ignored - CronJob should not reload", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "CronJob", - shouldReload: false, - description: "When cronjobs are ignored, CronJob resources should not be reloaded", - }, - { - name: "CronJobs ignored - Job should reload", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "Job", - shouldReload: true, - description: "When cronjobs are ignored, Job resources should still be processed", - }, - { - name: "Both ignored - Job should not reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Job", - shouldReload: false, - description: "When both are ignored, Job resources should not be reloaded", - }, - { - name: "Both ignored - CronJob should not reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "CronJob", - shouldReload: false, - description: "When both are ignored, CronJob resources should not be reloaded", - }, - { - name: "Both ignored - Deployment should reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Deployment", - shouldReload: true, - description: "When both are ignored, other workload types should still be processed", - }, - { - name: "None ignored - Job should reload", - ignoredWorkloadTypes: []string{}, - resourceType: "Job", - shouldReload: true, - description: "When nothing is ignored, all workload types should be processed", - }, - { - name: "None ignored - CronJob should reload", - ignoredWorkloadTypes: []string{}, - resourceType: "CronJob", - shouldReload: true, - description: "When nothing is ignored, all workload types should be processed", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the ignored workload types - options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes - - // Create minimal test config and options - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - // Create ReloaderOptions with the ignored workload types - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: tt.ignoredWorkloadTypes, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Call ShouldReload - result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts) - - // Check the result - if result.ShouldReload != tt.shouldReload { - t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v", - tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload) - } - - t.Logf("✓ %s", tt.description) - }) - } -} - -func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - // Test with invalid workload type - should still continue processing - options.WorkloadTypesToIgnore = []string{"invalid"} - - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: []string{"invalid"}, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Should not panic and should continue with normal processing - result := ShouldReload(config, "Job", annotations, Map{}, opts) - - // Since validation failed, it should continue with normal processing (should reload) - if !result.ShouldReload { - t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload) - } -} - -// Test that validates the fix for issue #996 -func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - ignoredWorkloadTypes []string - resourceType string - description string - }{ - { - name: "Issue #996 - ignoreJobs prevents Job processing", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "Job", - description: "Job resources are skipped entirely, preventing RBAC permission errors", - }, - { - name: "Issue #996 - ignoreCronJobs prevents CronJob processing", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "CronJob", - description: "CronJob resources are skipped entirely, preventing RBAC permission errors", - }, - { - name: "Issue #996 - both ignored prevent both types", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Job", - description: "Job resources are skipped entirely when both types are ignored", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the ignored workload types - options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes - - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: tt.ignoredWorkloadTypes, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Call ShouldReload - result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts) - - // Should not reload when workload type is ignored - if result.ShouldReload { - t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v", - tt.resourceType, result.ShouldReload) - } - - t.Logf("✓ %s", tt.description) - }) - } -} diff --git a/pkg/common/config.go b/pkg/common/config.go deleted file mode 100644 index 4227c2bc3..000000000 --- a/pkg/common/config.go +++ /dev/null @@ -1,48 +0,0 @@ -package common - -import ( - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - v1 "k8s.io/api/core/v1" -) - -// Config contains rolling upgrade configuration parameters -type Config struct { - Namespace string - ResourceName string - ResourceAnnotations map[string]string - Annotation string - TypedAutoAnnotation string - SHAValue string - Type string - Labels map[string]string -} - -// GetConfigmapConfig provides utility config for configmap -func GetConfigmapConfig(configmap *v1.ConfigMap) Config { - return Config{ - Namespace: configmap.Namespace, - ResourceName: configmap.Name, - ResourceAnnotations: configmap.Annotations, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation, - SHAValue: util.GetSHAfromConfigmap(configmap), - Type: constants.ConfigmapEnvVarPostfix, - Labels: configmap.Labels, - } -} - -// GetSecretConfig provides utility config for secret -func GetSecretConfig(secret *v1.Secret) Config { - return Config{ - Namespace: secret.Namespace, - ResourceName: secret.Name, - ResourceAnnotations: secret.Annotations, - Annotation: options.SecretUpdateOnChangeAnnotation, - TypedAutoAnnotation: options.SecretReloaderAutoAnnotation, - SHAValue: util.GetSHAfromSecret(secret.Data), - Type: constants.SecretEnvVarPostfix, - Labels: secret.Labels, - } -} diff --git a/pkg/common/metainfo.go b/pkg/common/metainfo.go deleted file mode 100644 index b792c5297..000000000 --- a/pkg/common/metainfo.go +++ /dev/null @@ -1,129 +0,0 @@ -package common - -import ( - "encoding/json" - "fmt" - "runtime" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Version, Commit, and BuildDate are set during the build process -// using the -X linker flag to inject these values into the binary. -// They provide metadata about the build version, commit hash, build date, and whether there are -// uncommitted changes in the source code at the time of build. -// This information is useful for debugging and tracking the specific build of the Reloader binary. -var Version = "dev" -var Commit = "unknown" -var BuildDate = "unknown" - -const ( - MetaInfoConfigmapName = "reloader-meta-info" - MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info" - MetaInfoConfigmapLabelValue = "reloader-oss" -) - -// MetaInfo contains comprehensive metadata about the Reloader instance. -// This includes build information, configuration options, and deployment details. -type MetaInfo struct { - // BuildInfo contains information about the build version, commit, and compilation details - BuildInfo BuildInfo `json:"buildInfo"` - // ReloaderOptions contains all the configuration options and flags used by this Reloader instance - ReloaderOptions ReloaderOptions `json:"reloaderOptions"` - // DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance - DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"` -} - -// BuildInfo contains information about the build and version of the Reloader binary. -// This includes Go version, release version, commit details, and build timestamp. -type BuildInfo struct { - // GoVersion is the version of Go used to compile the binary - GoVersion string `json:"goVersion"` - // ReleaseVersion is the version tag or branch of the Reloader release - ReleaseVersion string `json:"releaseVersion"` - // CommitHash is the Git commit hash of the source code used to build this binary - CommitHash string `json:"commitHash"` - // CommitTime is the timestamp of the Git commit used to build this binary - CommitTime time.Time `json:"commitTime"` -} - -func NewBuildInfo() *BuildInfo { - metaInfo := &BuildInfo{ - GoVersion: runtime.Version(), - ReleaseVersion: Version, - CommitHash: Commit, - CommitTime: ParseUTCTime(BuildDate), - } - - return metaInfo -} - -func (m *MetaInfo) ToConfigMap() *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: MetaInfoConfigmapName, - Namespace: m.DeploymentInfo.Namespace, - Labels: map[string]string{ - MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue, - }, - }, - Data: map[string]string{ - "buildInfo": toJson(m.BuildInfo), - "reloaderOptions": toJson(m.ReloaderOptions), - "deploymentInfo": toJson(m.DeploymentInfo), - }, - } -} - -func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) { - var buildInfo BuildInfo - if val, ok := configmap.Data["buildInfo"]; ok { - err := json.Unmarshal([]byte(val), &buildInfo) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err) - } - } - - var reloaderOptions ReloaderOptions - if val, ok := configmap.Data["reloaderOptions"]; ok { - err := json.Unmarshal([]byte(val), &reloaderOptions) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err) - } - } - - var deploymentInfo metav1.ObjectMeta - if val, ok := configmap.Data["deploymentInfo"]; ok { - err := json.Unmarshal([]byte(val), &deploymentInfo) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err) - } - } - - return &MetaInfo{ - BuildInfo: buildInfo, - ReloaderOptions: reloaderOptions, - DeploymentInfo: deploymentInfo, - }, nil -} - -func toJson(data interface{}) string { - jsonData, err := json.Marshal(data) - if err != nil { - return "" - } - return string(jsonData) -} - -func ParseUTCTime(value string) time.Time { - if value == "" { - return time.Time{} // Return zero time if value is empty - } - t, err := time.Parse(time.RFC3339, value) - if err != nil { - return time.Time{} // Return zero time if parsing fails - } - return t -} diff --git a/pkg/common/reload_source.go b/pkg/common/reload_source.go deleted file mode 100644 index 093826132..000000000 --- a/pkg/common/reload_source.go +++ /dev/null @@ -1,39 +0,0 @@ -package common - -import "time" - -type ReloadSource struct { - Type string `json:"type"` - Name string `json:"name"` - Namespace string `json:"namespace"` - Hash string `json:"hash"` - ContainerRefs []string `json:"containerRefs"` - ObservedAt int64 `json:"observedAt"` -} - -func NewReloadSource( - resourceName string, - resourceNamespace string, - resourceType string, - resourceHash string, - containerRefs []string, -) ReloadSource { - return ReloadSource{ - ObservedAt: time.Now().Unix(), - Name: resourceName, - Namespace: resourceNamespace, - Type: resourceType, - Hash: resourceHash, - ContainerRefs: containerRefs, - } -} - -func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource { - return NewReloadSource( - config.ResourceName, - config.Namespace, - config.Type, - config.SHAValue, - containerRefs, - ) -} diff --git a/pkg/kube/client.go b/pkg/kube/client.go deleted file mode 100644 index 423006392..000000000 --- a/pkg/kube/client.go +++ /dev/null @@ -1,118 +0,0 @@ -package kube - -import ( - "context" - "os" - - "k8s.io/client-go/tools/clientcmd" - - argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" - appsclient "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/sirupsen/logrus" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -// Clients struct exposes interfaces for kubernetes as well as openshift if available -type Clients struct { - KubernetesClient kubernetes.Interface - OpenshiftAppsClient appsclient.Interface - ArgoRolloutClient argorollout.Interface -} - -var ( - // IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes - IsOpenshift = isOpenshift() -) - -// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier -func GetClients() Clients { - client, err := GetKubernetesClient() - if err != nil { - logrus.Fatalf("Unable to create Kubernetes client error = %v", err) - } - - var appsClient *appsclient.Clientset - - if IsOpenshift { - appsClient, err = GetOpenshiftAppsClient() - if err != nil { - logrus.Warnf("Unable to create Openshift Apps client error = %v", err) - } - } - - var rolloutClient *argorollout.Clientset - - rolloutClient, err = GetArgoRolloutClient() - if err != nil { - logrus.Warnf("Unable to create ArgoRollout client error = %v", err) - } - - return Clients{ - KubernetesClient: client, - OpenshiftAppsClient: appsClient, - ArgoRolloutClient: rolloutClient, - } -} - -func GetArgoRolloutClient() (*argorollout.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return argorollout.NewForConfig(config) -} - -func isOpenshift() bool { - client, err := GetKubernetesClient() - if err != nil { - logrus.Fatalf("Unable to create Kubernetes client error = %v", err) - } - _, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw() - if err == nil { - logrus.Info("Environment: Openshift") - return true - } - logrus.Info("Environment: Kubernetes") - return false -} - -// GetOpenshiftAppsClient returns an Openshift Client that can query on Apps -func GetOpenshiftAppsClient() (*appsclient.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return appsclient.NewForConfig(config) -} - -// GetKubernetesClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config -func GetKubernetesClient() (*kubernetes.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return kubernetes.NewForConfig(config) -} - -func getConfig() (*rest.Config, error) { - var config *rest.Config - kubeconfigPath := os.Getenv("KUBECONFIG") - if kubeconfigPath == "" { - kubeconfigPath = os.Getenv("HOME") + "/.kube/config" - } - //If file exists so use that config settings - if _, err := os.Stat(kubeconfigPath); err == nil { - config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return nil, err - } - } else { //Use Incluster Configuration - config, err = rest.InClusterConfig() - if err != nil { - return nil, err - } - } - - return config, nil -} diff --git a/pkg/kube/resourcemapper.go b/pkg/kube/resourcemapper.go deleted file mode 100644 index 89ac2afc4..000000000 --- a/pkg/kube/resourcemapper.go +++ /dev/null @@ -1,13 +0,0 @@ -package kube - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ResourceMap are resources from where changes are going to be detected -var ResourceMap = map[string]runtime.Object{ - "configmaps": &v1.ConfigMap{}, - "secrets": &v1.Secret{}, - "namespaces": &v1.Namespace{}, -}