From b9a7b8997ce0f0bca8c07a176ec8df8f33506258 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:51 +0100 Subject: [PATCH 01/35] feat: add v2 foundation packages for config and workload abstraction --- go.mod | 4 +- go.sum | 14 ++ internal/pkg/config/config.go | 252 +++++++++++++++++++++++++++ internal/pkg/config/flags.go | 157 +++++++++++++++++ internal/pkg/config/validation.go | 148 ++++++++++++++++ internal/pkg/workload/cronjob.go | 208 ++++++++++++++++++++++ internal/pkg/workload/daemonset.go | 192 ++++++++++++++++++++ internal/pkg/workload/deployment.go | 194 +++++++++++++++++++++ internal/pkg/workload/interface.go | 120 +++++++++++++ internal/pkg/workload/job.go | 203 +++++++++++++++++++++ internal/pkg/workload/registry.go | 74 ++++++++ internal/pkg/workload/statefulset.go | 192 ++++++++++++++++++++ 12 files changed, 1757 insertions(+), 1 deletion(-) create mode 100644 internal/pkg/config/config.go create mode 100644 internal/pkg/config/flags.go create mode 100644 internal/pkg/config/validation.go create mode 100644 internal/pkg/workload/cronjob.go create mode 100644 internal/pkg/workload/daemonset.go create mode 100644 internal/pkg/workload/deployment.go create mode 100644 internal/pkg/workload/interface.go create mode 100644 internal/pkg/workload/job.go create mode 100644 internal/pkg/workload/registry.go create mode 100644 internal/pkg/workload/statefulset.go diff --git a/go.mod b/go.mod index 05edeccd1..af8cb9622 100644 --- a/go.mod +++ b/go.mod @@ -10,12 +10,14 @@ require ( github.com/prometheus/client_golang v1.22.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.9 github.com/stretchr/testify v1.10.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/kubectl v0.32.3 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + sigs.k8s.io/controller-runtime v0.19.4 ) require ( @@ -24,6 +26,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect @@ -50,7 +53,6 @@ require ( github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect github.com/smartystreets/goconvey v1.7.2 // indirect - github.com/spf13/pflag v1.0.9 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.39.0 // indirect golang.org/x/oauth2 v0.29.0 // indirect diff --git a/go.sum b/go.sum index 59339eaf7..dd99ea92f 100644 --- a/go.sum +++ b/go.sum @@ -13,10 +13,14 @@ github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -119,9 +123,15 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -175,6 +185,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= @@ -187,6 +199,8 @@ k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= +sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go new file mode 100644 index 000000000..7a228284c --- /dev/null +++ b/internal/pkg/config/config.go @@ -0,0 +1,252 @@ +// Package config provides configuration management for Reloader. +// It replaces the old global variables pattern with an immutable Config struct. +package config + +import ( + "time" + + "k8s.io/apimachinery/pkg/labels" +) + +// ReloadStrategy defines how Reloader triggers workload restarts. +type ReloadStrategy string + +const ( + // ReloadStrategyEnvVars adds/updates environment variables to trigger restart. + // This is the default and recommended strategy for GitOps compatibility. + ReloadStrategyEnvVars ReloadStrategy = "env-vars" + + // ReloadStrategyAnnotations adds/updates pod template annotations to trigger restart. + ReloadStrategyAnnotations ReloadStrategy = "annotations" +) + +// ArgoRolloutStrategy defines the strategy for Argo Rollout updates. +type ArgoRolloutStrategy string + +const ( + // ArgoRolloutStrategyRestart uses the restart mechanism for Argo Rollouts. + ArgoRolloutStrategyRestart ArgoRolloutStrategy = "restart" + + // ArgoRolloutStrategyRollout uses the rollout mechanism for Argo Rollouts. + ArgoRolloutStrategyRollout ArgoRolloutStrategy = "rollout" +) + +// Config holds all configuration for Reloader. +// This struct is immutable after creation - all fields should be set during initialization. +type Config struct { + // Annotations holds customizable annotation keys. + Annotations AnnotationConfig + + // AutoReloadAll enables automatic reload for all resources without requiring annotations. + AutoReloadAll bool + + // ReloadStrategy determines how workload restarts are triggered. + ReloadStrategy ReloadStrategy + + // ArgoRolloutsEnabled enables support for Argo Rollouts workload type. + ArgoRolloutsEnabled bool + + // ArgoRolloutStrategy determines how Argo Rollouts are updated. + ArgoRolloutStrategy ArgoRolloutStrategy + + // ReloadOnCreate enables watching for resource creation events. + ReloadOnCreate bool + + // ReloadOnDelete enables watching for resource deletion events. + ReloadOnDelete bool + + // SyncAfterRestart triggers a sync operation after a restart is performed. + SyncAfterRestart bool + + // EnableHA enables high-availability mode with leader election. + EnableHA bool + + // WebhookURL is an optional URL to send notifications to instead of triggering reload. + WebhookURL string + + // Filtering configuration + IgnoredResources []string // ConfigMaps/Secrets to ignore (case-insensitive) + IgnoredWorkloads []string // Workload types to ignore + IgnoredNamespaces []string // Namespaces to ignore + NamespaceSelectors []labels.Selector + ResourceSelectors []labels.Selector + + // Logging configuration + LogFormat string // "json" or "" for default + LogLevel string // trace, debug, info, warning, error, fatal, panic + + // Metrics configuration + MetricsAddr string // Address to serve metrics on (default :9090) + + // Profiling configuration + EnablePProf bool + PProfAddr string + + // Alerting configuration + Alerting AlertingConfig + + // Leader election configuration + LeaderElection LeaderElectionConfig + + // WatchedNamespace limits watching to a specific namespace (empty = all namespaces) + WatchedNamespace string + + // SyncPeriod is the period for re-syncing watched resources + SyncPeriod time.Duration +} + +// AnnotationConfig holds all customizable annotation keys. +type AnnotationConfig struct { + // Prefix is the base prefix for all annotations (default: reloader.stakater.com) + Prefix string + + // Auto annotations + Auto string // reloader.stakater.com/auto + ConfigmapAuto string // configmap.reloader.stakater.com/auto + SecretAuto string // secret.reloader.stakater.com/auto + + // Reload annotations (explicit resource names) + ConfigmapReload string // configmap.reloader.stakater.com/reload + SecretReload string // secret.reloader.stakater.com/reload + + // Exclude annotations + ConfigmapExclude string // configmaps.exclude.reloader.stakater.com/reload + SecretExclude string // secrets.exclude.reloader.stakater.com/reload + + // Ignore annotation + Ignore string // reloader.stakater.com/ignore + + // Search/Match annotations + Search string // reloader.stakater.com/search + Match string // reloader.stakater.com/match + + // Rollout strategy annotation + RolloutStrategy string // reloader.stakater.com/rollout-strategy + + // Pause annotations + PausePeriod string // deployment.reloader.stakater.com/pause-period + PausedAt string // deployment.reloader.stakater.com/paused-at + + // Last reloaded from annotation (set by Reloader) + LastReloadedFrom string // reloader.stakater.com/last-reloaded-from +} + +// AlertingConfig holds configuration for alerting integrations. +type AlertingConfig struct { + SlackWebhookURL string + TeamsWebhookURL string + GChatWebhookURL string +} + +// LeaderElectionConfig holds configuration for leader election. +type LeaderElectionConfig struct { + LockName string + Namespace string + Identity string +} + +// NewDefault creates a Config with default values. +func NewDefault() *Config { + return &Config{ + Annotations: DefaultAnnotations(), + AutoReloadAll: false, + ReloadStrategy: ReloadStrategyEnvVars, + ArgoRolloutsEnabled: false, + ArgoRolloutStrategy: ArgoRolloutStrategyRollout, + ReloadOnCreate: false, + ReloadOnDelete: false, + SyncAfterRestart: false, + EnableHA: false, + WebhookURL: "", + IgnoredResources: []string{}, + IgnoredWorkloads: []string{}, + IgnoredNamespaces: []string{}, + NamespaceSelectors: []labels.Selector{}, + ResourceSelectors: []labels.Selector{}, + LogFormat: "", + LogLevel: "info", + MetricsAddr: ":9090", + EnablePProf: false, + PProfAddr: ":6060", + Alerting: AlertingConfig{}, + LeaderElection: LeaderElectionConfig{ + LockName: "stakater-reloader-lock", + }, + WatchedNamespace: "", + SyncPeriod: 0, + } +} + +// DefaultAnnotations returns the default annotation configuration. +func DefaultAnnotations() AnnotationConfig { + return AnnotationConfig{ + Prefix: "reloader.stakater.com", + Auto: "reloader.stakater.com/auto", + ConfigmapAuto: "configmap.reloader.stakater.com/auto", + SecretAuto: "secret.reloader.stakater.com/auto", + ConfigmapReload: "configmap.reloader.stakater.com/reload", + SecretReload: "secret.reloader.stakater.com/reload", + ConfigmapExclude: "configmaps.exclude.reloader.stakater.com/reload", + SecretExclude: "secrets.exclude.reloader.stakater.com/reload", + Ignore: "reloader.stakater.com/ignore", + Search: "reloader.stakater.com/search", + Match: "reloader.stakater.com/match", + RolloutStrategy: "reloader.stakater.com/rollout-strategy", + PausePeriod: "deployment.reloader.stakater.com/pause-period", + PausedAt: "deployment.reloader.stakater.com/paused-at", + LastReloadedFrom: "reloader.stakater.com/last-reloaded-from", + } +} + +// IsResourceIgnored checks if a resource name should be ignored (case-insensitive). +func (c *Config) IsResourceIgnored(name string) bool { + for _, ignored := range c.IgnoredResources { + if equalFold(ignored, name) { + return true + } + } + return false +} + +// IsWorkloadIgnored checks if a workload type should be ignored (case-insensitive). +func (c *Config) IsWorkloadIgnored(workloadType string) bool { + for _, ignored := range c.IgnoredWorkloads { + if equalFold(ignored, workloadType) { + return true + } + } + return false +} + +// IsNamespaceIgnored checks if a namespace should be ignored. +func (c *Config) IsNamespaceIgnored(namespace string) bool { + for _, ignored := range c.IgnoredNamespaces { + if ignored == namespace { + return true + } + } + return false +} + +// equalFold is a simple case-insensitive string comparison. +func equalFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + c1, c2 := s[i], t[i] + if c1 != c2 { + // Convert to lowercase for comparison + if 'A' <= c1 && c1 <= 'Z' { + c1 += 'a' - 'A' + } + if 'A' <= c2 && c2 <= 'Z' { + c2 += 'a' - 'A' + } + if c1 != c2 { + return false + } + } + } + return true +} diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go new file mode 100644 index 000000000..e43637649 --- /dev/null +++ b/internal/pkg/config/flags.go @@ -0,0 +1,157 @@ +package config + +import ( + "strings" + + "github.com/spf13/pflag" +) + +// flagValues holds intermediate string values from CLI flags +// that need further parsing into the Config struct. +type flagValues struct { + namespaceSelectors string + resourceSelectors string + ignoredResources string + ignoredWorkloads string + ignoredNamespaces string + isArgoRollouts string + reloadOnCreate string + reloadOnDelete string +} + +var fv flagValues + +// BindFlags binds configuration flags to the provided flag set. +// Call this before parsing flags, then call ApplyFlags after parsing. +func BindFlags(fs *pflag.FlagSet, cfg *Config) { + // Auto reload + fs.BoolVar(&cfg.AutoReloadAll, "auto-reload-all", cfg.AutoReloadAll, + "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations") + + // Reload strategy + fs.StringVar((*string)(&cfg.ReloadStrategy), "reload-strategy", string(cfg.ReloadStrategy), + "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'") + + // Argo Rollouts + fs.StringVar(&fv.isArgoRollouts, "is-argo-rollouts", "false", + "Enable Argo Rollouts support (true/false)") + + // Event watching + fs.StringVar(&fv.reloadOnCreate, "reload-on-create", "false", + "Reload when configmaps/secrets are created (true/false)") + fs.StringVar(&fv.reloadOnDelete, "reload-on-delete", "false", + "Reload when configmaps/secrets are deleted (true/false)") + + // Sync after restart + fs.BoolVar(&cfg.SyncAfterRestart, "sync-after-restart", cfg.SyncAfterRestart, + "Trigger sync operation after restart") + + // High availability + fs.BoolVar(&cfg.EnableHA, "enable-ha", cfg.EnableHA, + "Enable high-availability mode with leader election") + + // Webhook + fs.StringVar(&cfg.WebhookURL, "webhook-url", cfg.WebhookURL, + "URL to send notification instead of triggering reload") + + // Filtering - resources + fs.StringVar(&fv.ignoredResources, "resources-to-ignore", "", + "Comma-separated list of configmap/secret names to ignore (case-insensitive)") + fs.StringVar(&fv.ignoredWorkloads, "workload-types-to-ignore", "", + "Comma-separated list of workload types to ignore (Deployment, DaemonSet, StatefulSet)") + fs.StringVar(&fv.ignoredNamespaces, "namespaces-to-ignore", "", + "Comma-separated list of namespaces to ignore") + + // Filtering - selectors + fs.StringVar(&fv.namespaceSelectors, "namespace-selector", "", + "Comma-separated list of namespace label selectors") + fs.StringVar(&fv.resourceSelectors, "resource-label-selector", "", + "Comma-separated list of resource label selectors") + + // Logging + fs.StringVar(&cfg.LogFormat, "log-format", cfg.LogFormat, + "Log format: 'json' or empty for default") + fs.StringVar(&cfg.LogLevel, "log-level", cfg.LogLevel, + "Log level: trace, debug, info, warning, error, fatal, panic") + + // Metrics + fs.StringVar(&cfg.MetricsAddr, "metrics-addr", cfg.MetricsAddr, + "Address to serve metrics on") + + // Profiling + fs.BoolVar(&cfg.EnablePProf, "enable-pprof", cfg.EnablePProf, + "Enable pprof profiling server") + fs.StringVar(&cfg.PProfAddr, "pprof-addr", cfg.PProfAddr, + "Address for pprof server") + + // Annotation customization + fs.StringVar(&cfg.Annotations.Auto, "auto-annotation", cfg.Annotations.Auto, + "Custom annotation for auto-reload") + fs.StringVar(&cfg.Annotations.ConfigmapAuto, "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, + "Custom annotation for configmap auto-reload") + fs.StringVar(&cfg.Annotations.SecretAuto, "secret-auto-annotation", cfg.Annotations.SecretAuto, + "Custom annotation for secret auto-reload") + fs.StringVar(&cfg.Annotations.ConfigmapReload, "configmap-reload-annotation", cfg.Annotations.ConfigmapReload, + "Custom annotation for configmap reload") + fs.StringVar(&cfg.Annotations.SecretReload, "secret-reload-annotation", cfg.Annotations.SecretReload, + "Custom annotation for secret reload") + fs.StringVar(&cfg.Annotations.Ignore, "ignore-annotation", cfg.Annotations.Ignore, + "Custom annotation for ignoring resources") + fs.StringVar(&cfg.Annotations.Search, "search-annotation", cfg.Annotations.Search, + "Custom annotation for search-based matching") + fs.StringVar(&cfg.Annotations.Match, "match-annotation", cfg.Annotations.Match, + "Custom annotation for match-based matching") + + // Watched namespace (for single-namespace mode) + fs.StringVar(&cfg.WatchedNamespace, "watch-namespace", cfg.WatchedNamespace, + "Namespace to watch (empty for all namespaces)") +} + +// ApplyFlags applies flag values that need post-processing. +// Call this after parsing flags. +func ApplyFlags(cfg *Config) error { + // Parse boolean string flags + cfg.ArgoRolloutsEnabled = parseBoolString(fv.isArgoRollouts) + cfg.ReloadOnCreate = parseBoolString(fv.reloadOnCreate) + cfg.ReloadOnDelete = parseBoolString(fv.reloadOnDelete) + + // Parse comma-separated lists + cfg.IgnoredResources = splitAndTrim(fv.ignoredResources) + cfg.IgnoredWorkloads = splitAndTrim(fv.ignoredWorkloads) + cfg.IgnoredNamespaces = splitAndTrim(fv.ignoredNamespaces) + + // Parse selectors + var err error + cfg.NamespaceSelectors, err = ParseSelectors(splitAndTrim(fv.namespaceSelectors)) + if err != nil { + return err + } + cfg.ResourceSelectors, err = ParseSelectors(splitAndTrim(fv.resourceSelectors)) + if err != nil { + return err + } + + return nil +} + +// parseBoolString parses a string as a boolean, defaulting to false. +func parseBoolString(s string) bool { + s = strings.ToLower(strings.TrimSpace(s)) + return s == "true" || s == "1" || s == "yes" +} + +// splitAndTrim splits a comma-separated string and trims whitespace. +func splitAndTrim(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ",") + result := make([]string, 0, len(parts)) + for _, p := range parts { + p = strings.TrimSpace(p) + if p != "" { + result = append(result, p) + } + } + return result +} diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go new file mode 100644 index 000000000..8a3bbfe54 --- /dev/null +++ b/internal/pkg/config/validation.go @@ -0,0 +1,148 @@ +package config + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/labels" +) + +// ValidationError represents a configuration validation error. +type ValidationError struct { + Field string + Message string +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("config.%s: %s", e.Field, e.Message) +} + +// ValidationErrors is a collection of validation errors. +type ValidationErrors []ValidationError + +func (e ValidationErrors) Error() string { + if len(e) == 0 { + return "" + } + if len(e) == 1 { + return e[0].Error() + } + var b strings.Builder + b.WriteString("multiple configuration errors:\n") + for _, err := range e { + b.WriteString(" - ") + b.WriteString(err.Error()) + b.WriteString("\n") + } + return b.String() +} + +// Validate checks the configuration for errors and normalizes values. +func (c *Config) Validate() error { + var errs ValidationErrors + + // Validate ReloadStrategy + switch c.ReloadStrategy { + case ReloadStrategyEnvVars, ReloadStrategyAnnotations: + // valid + case "": + c.ReloadStrategy = ReloadStrategyEnvVars + default: + errs = append(errs, ValidationError{ + Field: "ReloadStrategy", + Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ReloadStrategy, ReloadStrategyEnvVars, ReloadStrategyAnnotations), + }) + } + + // Validate ArgoRolloutStrategy + switch c.ArgoRolloutStrategy { + case ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout: + // valid + case "": + c.ArgoRolloutStrategy = ArgoRolloutStrategyRollout + default: + errs = append(errs, ValidationError{ + Field: "ArgoRolloutStrategy", + Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ArgoRolloutStrategy, ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout), + }) + } + + // Validate LogLevel + switch strings.ToLower(c.LogLevel) { + case "trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", "": + // valid + default: + errs = append(errs, ValidationError{ + Field: "LogLevel", + Message: fmt.Sprintf("invalid log level %q", c.LogLevel), + }) + } + + // Validate LogFormat + switch strings.ToLower(c.LogFormat) { + case "json", "": + // valid + default: + errs = append(errs, ValidationError{ + Field: "LogFormat", + Message: fmt.Sprintf("invalid log format %q, must be \"json\" or empty", c.LogFormat), + }) + } + + // Normalize IgnoredResources to lowercase for consistent comparison + c.IgnoredResources = normalizeToLower(c.IgnoredResources) + + // Normalize IgnoredWorkloads to lowercase + c.IgnoredWorkloads = normalizeToLower(c.IgnoredWorkloads) + + if len(errs) > 0 { + return errs + } + return nil +} + +// normalizeToLower converts all strings in the slice to lowercase and removes empty strings. +func normalizeToLower(items []string) []string { + if len(items) == 0 { + return items + } + result := make([]string, 0, len(items)) + for _, item := range items { + item = strings.TrimSpace(strings.ToLower(item)) + if item != "" { + result = append(result, item) + } + } + return result +} + +// ParseSelectors parses a slice of selector strings into label selectors. +func ParseSelectors(selectorStrings []string) ([]labels.Selector, error) { + if len(selectorStrings) == 0 { + return nil, nil + } + + selectors := make([]labels.Selector, 0, len(selectorStrings)) + for _, s := range selectorStrings { + s = strings.TrimSpace(s) + if s == "" { + continue + } + selector, err := labels.Parse(s) + if err != nil { + return nil, fmt.Errorf("invalid selector %q: %w", s, err) + } + selectors = append(selectors, selector) + } + return selectors, nil +} + +// MustParseSelectors parses selectors and panics on error. +// Use only when selectors are known to be valid (e.g., from validated config). +func MustParseSelectors(selectorStrings []string) []labels.Selector { + selectors, err := ParseSelectors(selectorStrings) + if err != nil { + panic(err) + } + return selectors +} diff --git a/internal/pkg/workload/cronjob.go b/internal/pkg/workload/cronjob.go new file mode 100644 index 000000000..42df8eca6 --- /dev/null +++ b/internal/pkg/workload/cronjob.go @@ -0,0 +1,208 @@ +package workload + +import ( + "context" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CronJobWorkload wraps a Kubernetes CronJob. +// Note: CronJobs have a special update mechanism - instead of updating the CronJob itself, +// Reloader creates a new Job from the CronJob's template. +type CronJobWorkload struct { + cronjob *batchv1.CronJob +} + +// NewCronJobWorkload creates a new CronJobWorkload. +func NewCronJobWorkload(c *batchv1.CronJob) *CronJobWorkload { + return &CronJobWorkload{cronjob: c} +} + +// Ensure CronJobWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*CronJobWorkload)(nil) + +func (w *CronJobWorkload) Kind() Kind { + return KindCronJob +} + +func (w *CronJobWorkload) GetObject() client.Object { + return w.cronjob +} + +func (w *CronJobWorkload) GetName() string { + return w.cronjob.Name +} + +func (w *CronJobWorkload) GetNamespace() string { + return w.cronjob.Namespace +} + +func (w *CronJobWorkload) GetAnnotations() map[string]string { + return w.cronjob.Annotations +} + +// GetPodTemplateAnnotations returns annotations from the JobTemplate's pod template. +func (w *CronJobWorkload) GetPodTemplateAnnotations() map[string]string { + if w.cronjob.Spec.JobTemplate.Spec.Template.Annotations == nil { + w.cronjob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) + } + return w.cronjob.Spec.JobTemplate.Spec.Template.Annotations +} + +func (w *CronJobWorkload) SetPodTemplateAnnotation(key, value string) { + if w.cronjob.Spec.JobTemplate.Spec.Template.Annotations == nil { + w.cronjob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) + } + w.cronjob.Spec.JobTemplate.Spec.Template.Annotations[key] = value +} + +func (w *CronJobWorkload) GetContainers() []corev1.Container { + return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers +} + +func (w *CronJobWorkload) SetContainers(containers []corev1.Container) { + w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers = containers +} + +func (w *CronJobWorkload) GetInitContainers() []corev1.Container { + return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers +} + +func (w *CronJobWorkload) SetInitContainers(containers []corev1.Container) { + w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers = containers +} + +func (w *CronJobWorkload) GetVolumes() []corev1.Volume { + return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Volumes +} + +// Update for CronJob is a no-op - use CreateJobFromCronJob instead. +// CronJobs trigger reloads by creating a new Job from their template. +func (w *CronJobWorkload) Update(ctx context.Context, c client.Client) error { + // CronJobs don't get updated directly - a new Job is created instead + // This is handled by the reload package's special CronJob logic + return nil +} + +func (w *CronJobWorkload) DeepCopy() Workload { + return &CronJobWorkload{cronjob: w.cronjob.DeepCopy()} +} + +func (w *CronJobWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *CronJobWorkload) UsesConfigMap(name string) bool { + spec := &w.cronjob.Spec.JobTemplate.Spec.Template.Spec + + // Check volumes + for _, vol := range spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *CronJobWorkload) UsesSecret(name string) bool { + spec := &w.cronjob.Spec.JobTemplate.Spec.Template.Spec + + // Check volumes + for _, vol := range spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *CronJobWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.cronjob.OwnerReferences +} + +// GetCronJob returns the underlying CronJob for special handling. +func (w *CronJobWorkload) GetCronJob() *batchv1.CronJob { + return w.cronjob +} diff --git a/internal/pkg/workload/daemonset.go b/internal/pkg/workload/daemonset.go new file mode 100644 index 000000000..ca51f4b5d --- /dev/null +++ b/internal/pkg/workload/daemonset.go @@ -0,0 +1,192 @@ +package workload + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DaemonSetWorkload wraps a Kubernetes DaemonSet. +type DaemonSetWorkload struct { + daemonset *appsv1.DaemonSet +} + +// NewDaemonSetWorkload creates a new DaemonSetWorkload. +func NewDaemonSetWorkload(d *appsv1.DaemonSet) *DaemonSetWorkload { + return &DaemonSetWorkload{daemonset: d} +} + +// Ensure DaemonSetWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*DaemonSetWorkload)(nil) + +func (w *DaemonSetWorkload) Kind() Kind { + return KindDaemonSet +} + +func (w *DaemonSetWorkload) GetObject() client.Object { + return w.daemonset +} + +func (w *DaemonSetWorkload) GetName() string { + return w.daemonset.Name +} + +func (w *DaemonSetWorkload) GetNamespace() string { + return w.daemonset.Namespace +} + +func (w *DaemonSetWorkload) GetAnnotations() map[string]string { + return w.daemonset.Annotations +} + +func (w *DaemonSetWorkload) GetPodTemplateAnnotations() map[string]string { + if w.daemonset.Spec.Template.Annotations == nil { + w.daemonset.Spec.Template.Annotations = make(map[string]string) + } + return w.daemonset.Spec.Template.Annotations +} + +func (w *DaemonSetWorkload) SetPodTemplateAnnotation(key, value string) { + if w.daemonset.Spec.Template.Annotations == nil { + w.daemonset.Spec.Template.Annotations = make(map[string]string) + } + w.daemonset.Spec.Template.Annotations[key] = value +} + +func (w *DaemonSetWorkload) GetContainers() []corev1.Container { + return w.daemonset.Spec.Template.Spec.Containers +} + +func (w *DaemonSetWorkload) SetContainers(containers []corev1.Container) { + w.daemonset.Spec.Template.Spec.Containers = containers +} + +func (w *DaemonSetWorkload) GetInitContainers() []corev1.Container { + return w.daemonset.Spec.Template.Spec.InitContainers +} + +func (w *DaemonSetWorkload) SetInitContainers(containers []corev1.Container) { + w.daemonset.Spec.Template.Spec.InitContainers = containers +} + +func (w *DaemonSetWorkload) GetVolumes() []corev1.Volume { + return w.daemonset.Spec.Template.Spec.Volumes +} + +func (w *DaemonSetWorkload) Update(ctx context.Context, c client.Client) error { + return c.Update(ctx, w.daemonset) +} + +func (w *DaemonSetWorkload) DeepCopy() Workload { + return &DaemonSetWorkload{daemonset: w.daemonset.DeepCopy()} +} + +func (w *DaemonSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.daemonset.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *DaemonSetWorkload) UsesConfigMap(name string) bool { + // Check volumes + for _, vol := range w.daemonset.Spec.Template.Spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.daemonset.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *DaemonSetWorkload) UsesSecret(name string) bool { + // Check volumes + for _, vol := range w.daemonset.Spec.Template.Spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.daemonset.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *DaemonSetWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.daemonset.OwnerReferences +} diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go new file mode 100644 index 000000000..0ddb5b45f --- /dev/null +++ b/internal/pkg/workload/deployment.go @@ -0,0 +1,194 @@ +package workload + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DeploymentWorkload wraps a Kubernetes Deployment. +type DeploymentWorkload struct { + deployment *appsv1.Deployment +} + +// NewDeploymentWorkload creates a new DeploymentWorkload. +func NewDeploymentWorkload(d *appsv1.Deployment) *DeploymentWorkload { + return &DeploymentWorkload{deployment: d} +} + +// Ensure DeploymentWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*DeploymentWorkload)(nil) + +func (w *DeploymentWorkload) Kind() Kind { + return KindDeployment +} + +func (w *DeploymentWorkload) GetObject() client.Object { + return w.deployment +} + +func (w *DeploymentWorkload) GetName() string { + return w.deployment.Name +} + +func (w *DeploymentWorkload) GetNamespace() string { + return w.deployment.Namespace +} + +func (w *DeploymentWorkload) GetAnnotations() map[string]string { + return w.deployment.Annotations +} + +func (w *DeploymentWorkload) GetPodTemplateAnnotations() map[string]string { + if w.deployment.Spec.Template.Annotations == nil { + w.deployment.Spec.Template.Annotations = make(map[string]string) + } + return w.deployment.Spec.Template.Annotations +} + +func (w *DeploymentWorkload) SetPodTemplateAnnotation(key, value string) { + if w.deployment.Spec.Template.Annotations == nil { + w.deployment.Spec.Template.Annotations = make(map[string]string) + } + w.deployment.Spec.Template.Annotations[key] = value +} + +func (w *DeploymentWorkload) GetContainers() []corev1.Container { + return w.deployment.Spec.Template.Spec.Containers +} + +func (w *DeploymentWorkload) SetContainers(containers []corev1.Container) { + w.deployment.Spec.Template.Spec.Containers = containers +} + +func (w *DeploymentWorkload) GetInitContainers() []corev1.Container { + return w.deployment.Spec.Template.Spec.InitContainers +} + +func (w *DeploymentWorkload) SetInitContainers(containers []corev1.Container) { + w.deployment.Spec.Template.Spec.InitContainers = containers +} + +func (w *DeploymentWorkload) GetVolumes() []corev1.Volume { + return w.deployment.Spec.Template.Spec.Volumes +} + +func (w *DeploymentWorkload) Update(ctx context.Context, c client.Client) error { + return c.Update(ctx, w.deployment) +} + +func (w *DeploymentWorkload) DeepCopy() Workload { + return &DeploymentWorkload{deployment: w.deployment.DeepCopy()} +} + +func (w *DeploymentWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.deployment.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.deployment.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *DeploymentWorkload) UsesConfigMap(name string) bool { + // Check volumes + for _, vol := range w.deployment.Spec.Template.Spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.deployment.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + // Check individual env vars + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.deployment.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *DeploymentWorkload) UsesSecret(name string) bool { + // Check volumes + for _, vol := range w.deployment.Spec.Template.Spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.deployment.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + // Check individual env vars + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.deployment.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *DeploymentWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.deployment.OwnerReferences +} diff --git a/internal/pkg/workload/interface.go b/internal/pkg/workload/interface.go new file mode 100644 index 000000000..6f805af40 --- /dev/null +++ b/internal/pkg/workload/interface.go @@ -0,0 +1,120 @@ +// Package workload provides an abstraction layer for Kubernetes workload types. +// It allows uniform handling of Deployments, DaemonSets, StatefulSets, Jobs, CronJobs, and Argo Rollouts. +// +// Note: Jobs and CronJobs have special update mechanisms: +// - Job: deleted and recreated with the same spec +// - CronJob: a new Job is created from the CronJob's template +package workload + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Kind represents the type of workload. +type Kind string + +const ( + KindDeployment Kind = "Deployment" + KindDaemonSet Kind = "DaemonSet" + KindStatefulSet Kind = "StatefulSet" + KindArgoRollout Kind = "Rollout" + KindJob Kind = "Job" + KindCronJob Kind = "CronJob" +) + +// Workload provides a uniform interface for managing Kubernetes workloads. +// All implementations must be safe for concurrent use. +type Workload interface { + // Kind returns the workload type. + Kind() Kind + + // GetObject returns the underlying Kubernetes object. + GetObject() client.Object + + // GetName returns the workload name. + GetName() string + + // GetNamespace returns the workload namespace. + GetNamespace() string + + // GetAnnotations returns the workload's annotations. + GetAnnotations() map[string]string + + // GetPodTemplateAnnotations returns annotations from the pod template spec. + GetPodTemplateAnnotations() map[string]string + + // SetPodTemplateAnnotation sets an annotation on the pod template. + SetPodTemplateAnnotation(key, value string) + + // GetContainers returns all containers (including init containers). + GetContainers() []corev1.Container + + // SetContainers updates the containers. + SetContainers(containers []corev1.Container) + + // GetInitContainers returns all init containers. + GetInitContainers() []corev1.Container + + // SetInitContainers updates the init containers. + SetInitContainers(containers []corev1.Container) + + // GetVolumes returns the pod template volumes. + GetVolumes() []corev1.Volume + + // Update persists changes to the workload. + Update(ctx context.Context, c client.Client) error + + // DeepCopy returns a deep copy of the workload. + DeepCopy() Workload +} + +// Accessor provides read-only access to workload configuration. +// Use this interface when you only need to inspect workload state. +type Accessor interface { + // Kind returns the workload type. + Kind() Kind + + // GetName returns the workload name. + GetName() string + + // GetNamespace returns the workload namespace. + GetNamespace() string + + // GetAnnotations returns the workload's annotations. + GetAnnotations() map[string]string + + // GetPodTemplateAnnotations returns annotations from the pod template spec. + GetPodTemplateAnnotations() map[string]string + + // GetContainers returns all containers (including init containers). + GetContainers() []corev1.Container + + // GetInitContainers returns all init containers. + GetInitContainers() []corev1.Container + + // GetVolumes returns the pod template volumes. + GetVolumes() []corev1.Volume + + // GetEnvFromSources returns all envFrom sources from all containers. + GetEnvFromSources() []corev1.EnvFromSource + + // UsesConfigMap checks if the workload uses a specific ConfigMap. + UsesConfigMap(name string) bool + + // UsesSecret checks if the workload uses a specific Secret. + UsesSecret(name string) bool + + // GetOwnerReferences returns the owner references of the workload. + GetOwnerReferences() []metav1.OwnerReference +} + +// WorkloadAccessor provides both Workload and Accessor interfaces. +// This is the primary type returned by the registry. +type WorkloadAccessor interface { + Workload + Accessor +} diff --git a/internal/pkg/workload/job.go b/internal/pkg/workload/job.go new file mode 100644 index 000000000..85b01e9b3 --- /dev/null +++ b/internal/pkg/workload/job.go @@ -0,0 +1,203 @@ +package workload + +import ( + "context" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// JobWorkload wraps a Kubernetes Job. +// Note: Jobs have a special update mechanism - instead of updating the Job, +// Reloader deletes and recreates it with the same spec. +type JobWorkload struct { + job *batchv1.Job +} + +// NewJobWorkload creates a new JobWorkload. +func NewJobWorkload(j *batchv1.Job) *JobWorkload { + return &JobWorkload{job: j} +} + +// Ensure JobWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*JobWorkload)(nil) + +func (w *JobWorkload) Kind() Kind { + return KindJob +} + +func (w *JobWorkload) GetObject() client.Object { + return w.job +} + +func (w *JobWorkload) GetName() string { + return w.job.Name +} + +func (w *JobWorkload) GetNamespace() string { + return w.job.Namespace +} + +func (w *JobWorkload) GetAnnotations() map[string]string { + return w.job.Annotations +} + +func (w *JobWorkload) GetPodTemplateAnnotations() map[string]string { + if w.job.Spec.Template.Annotations == nil { + w.job.Spec.Template.Annotations = make(map[string]string) + } + return w.job.Spec.Template.Annotations +} + +func (w *JobWorkload) SetPodTemplateAnnotation(key, value string) { + if w.job.Spec.Template.Annotations == nil { + w.job.Spec.Template.Annotations = make(map[string]string) + } + w.job.Spec.Template.Annotations[key] = value +} + +func (w *JobWorkload) GetContainers() []corev1.Container { + return w.job.Spec.Template.Spec.Containers +} + +func (w *JobWorkload) SetContainers(containers []corev1.Container) { + w.job.Spec.Template.Spec.Containers = containers +} + +func (w *JobWorkload) GetInitContainers() []corev1.Container { + return w.job.Spec.Template.Spec.InitContainers +} + +func (w *JobWorkload) SetInitContainers(containers []corev1.Container) { + w.job.Spec.Template.Spec.InitContainers = containers +} + +func (w *JobWorkload) GetVolumes() []corev1.Volume { + return w.job.Spec.Template.Spec.Volumes +} + +// Update for Job is a no-op - use RecreateJob instead. +// Jobs trigger reloads by being deleted and recreated. +func (w *JobWorkload) Update(ctx context.Context, c client.Client) error { + // Jobs don't get updated directly - they are deleted and recreated + // This is handled by the reload package's special Job logic + return nil +} + +func (w *JobWorkload) DeepCopy() Workload { + return &JobWorkload{job: w.job.DeepCopy()} +} + +func (w *JobWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.job.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.job.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *JobWorkload) UsesConfigMap(name string) bool { + // Check volumes + for _, vol := range w.job.Spec.Template.Spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range w.job.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.job.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *JobWorkload) UsesSecret(name string) bool { + // Check volumes + for _, vol := range w.job.Spec.Template.Spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range w.job.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.job.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *JobWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.job.OwnerReferences +} + +// GetJob returns the underlying Job for special handling. +func (w *JobWorkload) GetJob() *batchv1.Job { + return w.job +} diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go new file mode 100644 index 000000000..55f55b2d6 --- /dev/null +++ b/internal/pkg/workload/registry.go @@ -0,0 +1,74 @@ +package workload + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Registry provides factory methods for creating Workload instances. +type Registry struct { + argoRolloutsEnabled bool +} + +// NewRegistry creates a new workload registry. +func NewRegistry(argoRolloutsEnabled bool) *Registry { + return &Registry{ + argoRolloutsEnabled: argoRolloutsEnabled, + } +} + +// SupportedKinds returns all supported workload kinds. +func (r *Registry) SupportedKinds() []Kind { + kinds := []Kind{ + KindDeployment, + KindDaemonSet, + KindStatefulSet, + KindJob, + KindCronJob, + } + if r.argoRolloutsEnabled { + kinds = append(kinds, KindArgoRollout) + } + return kinds +} + +// FromObject creates a WorkloadAccessor from a Kubernetes object. +func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { + switch o := obj.(type) { + case *appsv1.Deployment: + return NewDeploymentWorkload(o), nil + case *appsv1.DaemonSet: + return NewDaemonSetWorkload(o), nil + case *appsv1.StatefulSet: + return NewStatefulSetWorkload(o), nil + case *batchv1.Job: + return NewJobWorkload(o), nil + case *batchv1.CronJob: + return NewCronJobWorkload(o), nil + default: + return nil, fmt.Errorf("unsupported object type: %T", obj) + } +} + +// KindFromString converts a string to a Kind. +func KindFromString(s string) (Kind, error) { + switch s { + case "Deployment", "deployment", "deployments": + return KindDeployment, nil + case "DaemonSet", "daemonset", "daemonsets": + return KindDaemonSet, nil + case "StatefulSet", "statefulset", "statefulsets": + return KindStatefulSet, nil + case "Rollout", "rollout", "rollouts": + return KindArgoRollout, nil + case "Job", "job", "jobs": + return KindJob, nil + case "CronJob", "cronjob", "cronjobs": + return KindCronJob, nil + default: + return "", fmt.Errorf("unknown workload kind: %s", s) + } +} diff --git a/internal/pkg/workload/statefulset.go b/internal/pkg/workload/statefulset.go new file mode 100644 index 000000000..003cef3d3 --- /dev/null +++ b/internal/pkg/workload/statefulset.go @@ -0,0 +1,192 @@ +package workload + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// StatefulSetWorkload wraps a Kubernetes StatefulSet. +type StatefulSetWorkload struct { + statefulset *appsv1.StatefulSet +} + +// NewStatefulSetWorkload creates a new StatefulSetWorkload. +func NewStatefulSetWorkload(s *appsv1.StatefulSet) *StatefulSetWorkload { + return &StatefulSetWorkload{statefulset: s} +} + +// Ensure StatefulSetWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*StatefulSetWorkload)(nil) + +func (w *StatefulSetWorkload) Kind() Kind { + return KindStatefulSet +} + +func (w *StatefulSetWorkload) GetObject() client.Object { + return w.statefulset +} + +func (w *StatefulSetWorkload) GetName() string { + return w.statefulset.Name +} + +func (w *StatefulSetWorkload) GetNamespace() string { + return w.statefulset.Namespace +} + +func (w *StatefulSetWorkload) GetAnnotations() map[string]string { + return w.statefulset.Annotations +} + +func (w *StatefulSetWorkload) GetPodTemplateAnnotations() map[string]string { + if w.statefulset.Spec.Template.Annotations == nil { + w.statefulset.Spec.Template.Annotations = make(map[string]string) + } + return w.statefulset.Spec.Template.Annotations +} + +func (w *StatefulSetWorkload) SetPodTemplateAnnotation(key, value string) { + if w.statefulset.Spec.Template.Annotations == nil { + w.statefulset.Spec.Template.Annotations = make(map[string]string) + } + w.statefulset.Spec.Template.Annotations[key] = value +} + +func (w *StatefulSetWorkload) GetContainers() []corev1.Container { + return w.statefulset.Spec.Template.Spec.Containers +} + +func (w *StatefulSetWorkload) SetContainers(containers []corev1.Container) { + w.statefulset.Spec.Template.Spec.Containers = containers +} + +func (w *StatefulSetWorkload) GetInitContainers() []corev1.Container { + return w.statefulset.Spec.Template.Spec.InitContainers +} + +func (w *StatefulSetWorkload) SetInitContainers(containers []corev1.Container) { + w.statefulset.Spec.Template.Spec.InitContainers = containers +} + +func (w *StatefulSetWorkload) GetVolumes() []corev1.Volume { + return w.statefulset.Spec.Template.Spec.Volumes +} + +func (w *StatefulSetWorkload) Update(ctx context.Context, c client.Client) error { + return c.Update(ctx, w.statefulset) +} + +func (w *StatefulSetWorkload) DeepCopy() Workload { + return &StatefulSetWorkload{statefulset: w.statefulset.DeepCopy()} +} + +func (w *StatefulSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.statefulset.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *StatefulSetWorkload) UsesConfigMap(name string) bool { + // Check volumes + for _, vol := range w.statefulset.Spec.Template.Spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.statefulset.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *StatefulSetWorkload) UsesSecret(name string) bool { + // Check volumes + for _, vol := range w.statefulset.Spec.Template.Spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check envFrom + for _, container := range w.statefulset.Spec.Template.Spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *StatefulSetWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.statefulset.OwnerReferences +} From dce45a454a2d15f58f529c9fddd340d9e5545398 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:51 +0100 Subject: [PATCH 02/35] refactor: migrate reloader command to use new config package --- internal/pkg/cmd/reloader.go | 114 +++++++++++++++++++++++++--------- internal/pkg/config/config.go | 4 ++ internal/pkg/config/flags.go | 52 +++++++++------- 3 files changed, 118 insertions(+), 52 deletions(-) diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index f54d75717..3b86bd20b 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -2,13 +2,12 @@ package cmd import ( "context" - "errors" "fmt" "net/http" _ "net/http/pprof" "os" - "strings" + "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/leadership" @@ -24,8 +23,15 @@ import ( "github.com/stakater/Reloader/pkg/kube" ) +// cfg holds the configuration for this reloader instance. +// It is populated by flag parsing and used throughout the application. +var cfg *config.Config + // NewReloaderCommand starts the reloader controller func NewReloaderCommand() *cobra.Command { + // Create config with defaults + cfg = config.NewDefault() + cmd := &cobra.Command{ Use: "reloader", Short: "A watcher for your Kubernetes cluster", @@ -33,29 +39,29 @@ func NewReloaderCommand() *cobra.Command { Run: startReloader, } - // options - util.ConfigureReloaderFlags(cmd) + // Bind flags to the new config package + config.BindFlags(cmd.PersistentFlags(), cfg) return cmd } func validateFlags(*cobra.Command, []string) error { - // Ensure the reload strategy is one of the following... - var validReloadStrategy bool - valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy} - for _, s := range valid { - if s == options.ReloadStrategy { - validReloadStrategy = true - } + // Apply post-parse flag processing (converts string flags to proper types) + if err := config.ApplyFlags(cfg); err != nil { + return fmt.Errorf("applying flags: %w", err) } - if !validReloadStrategy { - err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", ")) - return errors.New(err) + // Validate the configuration + if err := cfg.Validate(); err != nil { + return fmt.Errorf("validating config: %w", err) } + // Sync new config to old options package for backward compatibility + // This bridge allows existing code to keep working during migration + syncConfigToOptions(cfg) + // Validate that HA options are correct - if options.EnableHA { + if cfg.EnableHA { if err := validateHAEnvs(); err != nil { return err } @@ -64,6 +70,58 @@ func validateFlags(*cobra.Command, []string) error { return nil } +// syncConfigToOptions bridges the new Config struct to the old options package. +// This allows existing code to continue working during the migration period. +// TODO: Remove this once all code is migrated to use Config directly. +func syncConfigToOptions(cfg *config.Config) { + options.AutoReloadAll = cfg.AutoReloadAll + options.ConfigmapUpdateOnChangeAnnotation = cfg.Annotations.ConfigmapReload + options.SecretUpdateOnChangeAnnotation = cfg.Annotations.SecretReload + options.ReloaderAutoAnnotation = cfg.Annotations.Auto + options.ConfigmapReloaderAutoAnnotation = cfg.Annotations.ConfigmapAuto + options.SecretReloaderAutoAnnotation = cfg.Annotations.SecretAuto + options.IgnoreResourceAnnotation = cfg.Annotations.Ignore + options.ConfigmapExcludeReloaderAnnotation = cfg.Annotations.ConfigmapExclude + options.SecretExcludeReloaderAnnotation = cfg.Annotations.SecretExclude + options.AutoSearchAnnotation = cfg.Annotations.Search + options.SearchMatchAnnotation = cfg.Annotations.Match + options.RolloutStrategyAnnotation = cfg.Annotations.RolloutStrategy + options.PauseDeploymentAnnotation = cfg.Annotations.PausePeriod + options.PauseDeploymentTimeAnnotation = cfg.Annotations.PausedAt + options.LogFormat = cfg.LogFormat + options.LogLevel = cfg.LogLevel + options.WebhookUrl = cfg.WebhookURL + options.ResourcesToIgnore = cfg.IgnoredResources + options.WorkloadTypesToIgnore = cfg.IgnoredWorkloads + options.NamespacesToIgnore = cfg.IgnoredNamespaces + options.NamespaceSelectors = cfg.NamespaceSelectorStrings + options.ResourceSelectors = cfg.ResourceSelectorStrings + options.EnableHA = cfg.EnableHA + options.SyncAfterRestart = cfg.SyncAfterRestart + options.EnablePProf = cfg.EnablePProf + options.PProfAddr = cfg.PProfAddr + + // Convert ReloadStrategy to string for old options + options.ReloadStrategy = string(cfg.ReloadStrategy) + + // Convert bool flags to string for old options (IsArgoRollouts, ReloadOnCreate, ReloadOnDelete) + if cfg.ArgoRolloutsEnabled { + options.IsArgoRollouts = "true" + } else { + options.IsArgoRollouts = "false" + } + if cfg.ReloadOnCreate { + options.ReloadOnCreate = "true" + } else { + options.ReloadOnCreate = "false" + } + if cfg.ReloadOnDelete { + options.ReloadOnDelete = "true" + } else { + options.ReloadOnDelete = "false" + } +} + func configureLogging(logFormat, logLevel string) error { switch logFormat { case "json": @@ -104,7 +162,7 @@ func getHAEnvs() (string, string) { func startReloader(cmd *cobra.Command, args []string) { common.GetCommandLineOptions() - err := configureLogging(options.LogFormat, options.LogLevel) + err := configureLogging(cfg.LogFormat, cfg.LogLevel) if err != nil { logrus.Warn(err) } @@ -124,12 +182,10 @@ func startReloader(cmd *cobra.Command, args []string) { logrus.Fatal(err) } - ignoredResourcesList, err := util.GetIgnoredResourcesList() - if err != nil { - logrus.Fatal(err) - } + // Use config's IgnoredResources (already validated and normalized to lowercase) + ignoredResourcesList := util.List(cfg.IgnoredResources) - ignoredNamespacesList := options.NamespacesToIgnore + ignoredNamespacesList := cfg.IgnoredNamespaces namespaceLabelSelector := "" if isGlobal { @@ -152,7 +208,7 @@ func startReloader(cmd *cobra.Command, args []string) { logrus.Warnf("resource-label-selector is set, will only detect changes on resources with these labels: %s.", resourceLabelSelector) } - if options.WebhookUrl != "" { + if cfg.WebhookURL != "" { logrus.Warnf("webhook-url is set, will only send webhook, no resources will be reloaded") } @@ -171,8 +227,8 @@ func startReloader(cmd *cobra.Command, args []string) { controllers = append(controllers, c) - // If HA is enabled we only run the controller when - if options.EnableHA { + // If HA is enabled we only run the controller when we're the leader + if cfg.EnableHA { continue } // Now let's start the controller @@ -183,7 +239,7 @@ func startReloader(cmd *cobra.Command, args []string) { } // Run leadership election - if options.EnableHA { + if cfg.EnableHA { podName, podNamespace := getHAEnvs() lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace) ctx, cancel := context.WithCancel(context.Background()) @@ -193,17 +249,17 @@ func startReloader(cmd *cobra.Command, args []string) { common.PublishMetaInfoConfigmap(clientset) - if options.EnablePProf { + if cfg.EnablePProf { go startPProfServer() } leadership.SetupLivenessEndpoint() - logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil)) + logrus.Fatal(http.ListenAndServe(cfg.MetricsAddr, nil)) } func startPProfServer() { - logrus.Infof("Starting pprof server on %s", options.PProfAddr) - if err := http.ListenAndServe(options.PProfAddr, nil); err != nil { + logrus.Infof("Starting pprof server on %s", cfg.PProfAddr) + if err := http.ListenAndServe(cfg.PProfAddr, nil); err != nil { logrus.Errorf("Failed to start pprof server: %v", err) } } diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 7a228284c..8861fb5be 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -71,6 +71,10 @@ type Config struct { NamespaceSelectors []labels.Selector ResourceSelectors []labels.Selector + // Raw selector strings (for backward compatibility with old code) + NamespaceSelectorStrings []string + ResourceSelectorStrings []string + // Logging configuration LogFormat string // "json" or "" for default LogLevel string // trace, debug, info, warning, error, fatal, panic diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index e43637649..bf242bca3 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -32,8 +32,8 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { fs.StringVar((*string)(&cfg.ReloadStrategy), "reload-strategy", string(cfg.ReloadStrategy), "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'") - // Argo Rollouts - fs.StringVar(&fv.isArgoRollouts, "is-argo-rollouts", "false", + // Argo Rollouts (note: capital A in Argo for backward compatibility) + fs.StringVar(&fv.isArgoRollouts, "is-Argo-Rollouts", "false", "Enable Argo Rollouts support (true/false)") // Event watching @@ -54,11 +54,11 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { fs.StringVar(&cfg.WebhookURL, "webhook-url", cfg.WebhookURL, "URL to send notification instead of triggering reload") - // Filtering - resources + // Filtering - resources (use StringVar not StringSliceVar for simpler parsing) fs.StringVar(&fv.ignoredResources, "resources-to-ignore", "", - "Comma-separated list of configmap/secret names to ignore (case-insensitive)") - fs.StringVar(&fv.ignoredWorkloads, "workload-types-to-ignore", "", - "Comma-separated list of workload types to ignore (Deployment, DaemonSet, StatefulSet)") + "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')") + fs.StringVar(&fv.ignoredWorkloads, "ignored-workload-types", "", + "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") fs.StringVar(&fv.ignoredNamespaces, "namespaces-to-ignore", "", "Comma-separated list of namespaces to ignore") @@ -84,23 +84,25 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { fs.StringVar(&cfg.PProfAddr, "pprof-addr", cfg.PProfAddr, "Address for pprof server") - // Annotation customization + // Annotation customization (flag names match v1 for backward compatibility) fs.StringVar(&cfg.Annotations.Auto, "auto-annotation", cfg.Annotations.Auto, - "Custom annotation for auto-reload") + "Annotation to detect changes in secrets/configmaps") fs.StringVar(&cfg.Annotations.ConfigmapAuto, "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, - "Custom annotation for configmap auto-reload") + "Annotation to detect changes in configmaps") fs.StringVar(&cfg.Annotations.SecretAuto, "secret-auto-annotation", cfg.Annotations.SecretAuto, - "Custom annotation for secret auto-reload") - fs.StringVar(&cfg.Annotations.ConfigmapReload, "configmap-reload-annotation", cfg.Annotations.ConfigmapReload, - "Custom annotation for configmap reload") - fs.StringVar(&cfg.Annotations.SecretReload, "secret-reload-annotation", cfg.Annotations.SecretReload, - "Custom annotation for secret reload") - fs.StringVar(&cfg.Annotations.Ignore, "ignore-annotation", cfg.Annotations.Ignore, - "Custom annotation for ignoring resources") - fs.StringVar(&cfg.Annotations.Search, "search-annotation", cfg.Annotations.Search, - "Custom annotation for search-based matching") - fs.StringVar(&cfg.Annotations.Match, "match-annotation", cfg.Annotations.Match, - "Custom annotation for match-based matching") + "Annotation to detect changes in secrets") + fs.StringVar(&cfg.Annotations.ConfigmapReload, "configmap-annotation", cfg.Annotations.ConfigmapReload, + "Annotation to detect changes in configmaps, specified by name") + fs.StringVar(&cfg.Annotations.SecretReload, "secret-annotation", cfg.Annotations.SecretReload, + "Annotation to detect changes in secrets, specified by name") + fs.StringVar(&cfg.Annotations.Search, "auto-search-annotation", cfg.Annotations.Search, + "Annotation to detect changes in configmaps or secrets tagged with special match annotation") + fs.StringVar(&cfg.Annotations.Match, "search-match-annotation", cfg.Annotations.Match, + "Annotation to mark secrets or configmaps to match the search") + fs.StringVar(&cfg.Annotations.PausePeriod, "pause-deployment-annotation", cfg.Annotations.PausePeriod, + "Annotation to define the time period to pause a deployment after a configmap/secret change") + fs.StringVar(&cfg.Annotations.PausedAt, "pause-deployment-time-annotation", cfg.Annotations.PausedAt, + "Annotation to indicate when a deployment was paused by Reloader") // Watched namespace (for single-namespace mode) fs.StringVar(&cfg.WatchedNamespace, "watch-namespace", cfg.WatchedNamespace, @@ -120,13 +122,17 @@ func ApplyFlags(cfg *Config) error { cfg.IgnoredWorkloads = splitAndTrim(fv.ignoredWorkloads) cfg.IgnoredNamespaces = splitAndTrim(fv.ignoredNamespaces) - // Parse selectors + // Store raw selector strings (for backward compatibility) + cfg.NamespaceSelectorStrings = splitAndTrim(fv.namespaceSelectors) + cfg.ResourceSelectorStrings = splitAndTrim(fv.resourceSelectors) + + // Parse selectors into labels.Selector var err error - cfg.NamespaceSelectors, err = ParseSelectors(splitAndTrim(fv.namespaceSelectors)) + cfg.NamespaceSelectors, err = ParseSelectors(cfg.NamespaceSelectorStrings) if err != nil { return err } - cfg.ResourceSelectors, err = ParseSelectors(splitAndTrim(fv.resourceSelectors)) + cfg.ResourceSelectors, err = ParseSelectors(cfg.ResourceSelectorStrings) if err != nil { return err } From 94e3fcd702b635187e6dd56cd1a6919312819ae0 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:51 +0100 Subject: [PATCH 03/35] feat: Add reload package with core matching and strategy logic --- internal/pkg/reload/hasher.go | 85 +++++++ internal/pkg/reload/matcher.go | 320 ++++++++++++++++++++++++ internal/pkg/reload/predicate.go | 146 +++++++++++ internal/pkg/reload/service.go | 401 +++++++++++++++++++++++++++++++ internal/pkg/reload/strategy.go | 203 ++++++++++++++++ 5 files changed, 1155 insertions(+) create mode 100644 internal/pkg/reload/hasher.go create mode 100644 internal/pkg/reload/matcher.go create mode 100644 internal/pkg/reload/predicate.go create mode 100644 internal/pkg/reload/service.go create mode 100644 internal/pkg/reload/strategy.go diff --git a/internal/pkg/reload/hasher.go b/internal/pkg/reload/hasher.go new file mode 100644 index 000000000..6c1b1613a --- /dev/null +++ b/internal/pkg/reload/hasher.go @@ -0,0 +1,85 @@ +// Package reload provides core reload logic for ConfigMaps and Secrets. +package reload + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + "io" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// Hasher computes content hashes for ConfigMaps and Secrets. +// The hash is used to detect changes and trigger workload reloads. +type Hasher struct{} + +// NewHasher creates a new Hasher instance. +func NewHasher() *Hasher { + return &Hasher{} +} + +// HashConfigMap computes a SHA1 hash of the ConfigMap's data and binaryData. +// The hash is deterministic - same content always produces the same hash. +func (h *Hasher) HashConfigMap(cm *corev1.ConfigMap) string { + if cm == nil { + return h.computeSHA("") + } + return h.hashConfigMapData(cm.Data, cm.BinaryData) +} + +// HashSecret computes a SHA1 hash of the Secret's data. +// The hash is deterministic - same content always produces the same hash. +func (h *Hasher) HashSecret(secret *corev1.Secret) string { + if secret == nil { + return h.computeSHA("") + } + return h.hashSecretData(secret.Data) +} + +// hashConfigMapData computes a hash from ConfigMap data and binary data. +// Keys are sorted to ensure deterministic output. +func (h *Hasher) hashConfigMapData(data map[string]string, binaryData map[string][]byte) string { + values := make([]string, 0, len(data)+len(binaryData)) + + for k, v := range data { + values = append(values, k+"="+v) + } + + for k, v := range binaryData { + // Binary data is base64 encoded for consistent hashing + values = append(values, k+"="+base64.StdEncoding.EncodeToString(v)) + } + + sort.Strings(values) + return h.computeSHA(strings.Join(values, ";")) +} + +// hashSecretData computes a hash from Secret data. +// Keys are sorted to ensure deterministic output. +func (h *Hasher) hashSecretData(data map[string][]byte) string { + values := make([]string, 0, len(data)) + + for k, v := range data { + // Secret data is stored as raw bytes, not base64 encoded + values = append(values, k+"="+string(v)) + } + + sort.Strings(values) + return h.computeSHA(strings.Join(values, ";")) +} + +// computeSHA generates a SHA1 hash from a string. +func (h *Hasher) computeSHA(data string) string { + hasher := sha1.New() + _, _ = io.WriteString(hasher, data) + return fmt.Sprintf("%x", hasher.Sum(nil)) +} + +// EmptyHash returns the hash of empty content. +// This is useful for comparison when resources are deleted. +func (h *Hasher) EmptyHash() string { + return h.computeSHA("") +} diff --git a/internal/pkg/reload/matcher.go b/internal/pkg/reload/matcher.go new file mode 100644 index 000000000..51bd7d6c5 --- /dev/null +++ b/internal/pkg/reload/matcher.go @@ -0,0 +1,320 @@ +package reload + +import ( + "regexp" + "strings" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// ResourceType represents the type of Kubernetes resource. +type ResourceType string + +const ( + // ResourceTypeConfigMap represents a ConfigMap resource. + ResourceTypeConfigMap ResourceType = "configmap" + // ResourceTypeSecret represents a Secret resource. + ResourceTypeSecret ResourceType = "secret" +) + +// MatchResult contains the result of checking if a workload should be reloaded. +type MatchResult struct { + // ShouldReload indicates whether the workload should be reloaded. + ShouldReload bool + // AutoReload indicates if this is an auto-reload (vs explicit annotation). + // This affects which container to target for env var injection. + AutoReload bool + // Reason provides a human-readable explanation of the decision. + Reason string +} + +// Matcher determines whether a workload should be reloaded based on annotations. +type Matcher struct { + cfg *config.Config +} + +// NewMatcher creates a new Matcher with the given configuration. +func NewMatcher(cfg *config.Config) *Matcher { + return &Matcher{cfg: cfg} +} + +// MatchInput contains all the information needed to determine if a reload should occur. +type MatchInput struct { + // ResourceName is the name of the ConfigMap or Secret that changed. + ResourceName string + // ResourceNamespace is the namespace of the ConfigMap or Secret. + ResourceNamespace string + // ResourceType is whether this is a ConfigMap or Secret. + ResourceType ResourceType + // ResourceAnnotations are the annotations on the ConfigMap or Secret. + ResourceAnnotations map[string]string + // WorkloadAnnotations are the annotations on the workload (Deployment, etc.). + WorkloadAnnotations map[string]string + // PodAnnotations are the annotations on the pod template. + PodAnnotations map[string]string +} + +// ShouldReload determines if a workload should be reloaded based on its annotations. +// +// The matching logic follows this precedence (BUG FIX: explicit annotations checked first): +// 1. If the resource has the ignore annotation, skip it +// 2. If the resource is in the exclude list for this workload, skip it +// 3. If explicit reload annotation matches the resource name, reload (not auto) +// 4. If search annotation is enabled and resource has match annotation, reload (auto) +// 5. If auto annotation is "true", reload (auto) +// 6. If typed auto annotation is "true", reload (auto) +// 7. If AutoReloadAll is enabled and no explicit "false" annotations, reload (auto) +func (m *Matcher) ShouldReload(input MatchInput) MatchResult { + // Check resource-level ignore annotation + if m.isResourceIgnored(input.ResourceAnnotations) { + return MatchResult{ + ShouldReload: false, + Reason: "resource has ignore annotation", + } + } + + // Determine which annotations to use (workload or pod template) + annotations := m.selectAnnotations(input) + + // Check if resource is excluded + if m.isResourceExcluded(input.ResourceName, input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: false, + Reason: "resource is in exclude list", + } + } + + // Check explicit reload annotation (e.g., configmap.reloader.stakater.com/reload: "my-config") + // BUG FIX: Check this BEFORE auto annotations to ensure explicit references take precedence + if m.matchesExplicitAnnotation(input.ResourceName, input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: false, + Reason: "matches explicit reload annotation", + } + } + + // Check search/match pattern + if m.matchesSearchPattern(input.ResourceAnnotations, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "matches search/match pattern", + } + } + + // Check auto annotations + if m.matchesAutoAnnotation(input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "auto annotation enabled", + } + } + + // Check global auto-reload-all setting + if m.matchesAutoReloadAll(input.ResourceType, annotations) { + return MatchResult{ + ShouldReload: true, + AutoReload: true, + Reason: "auto-reload-all enabled", + } + } + + return MatchResult{ + ShouldReload: false, + Reason: "no matching annotations", + } +} + +// isResourceIgnored checks if the resource has the ignore annotation set to true. +func (m *Matcher) isResourceIgnored(resourceAnnotations map[string]string) bool { + if resourceAnnotations == nil { + return false + } + return resourceAnnotations[m.cfg.Annotations.Ignore] == "true" +} + +// selectAnnotations determines which set of annotations to use for matching. +// If workload annotations don't have relevant annotations, fall back to pod annotations. +func (m *Matcher) selectAnnotations(input MatchInput) map[string]string { + // Check if any relevant annotation exists on workload annotations + if m.hasRelevantAnnotations(input.WorkloadAnnotations, input.ResourceType) { + return input.WorkloadAnnotations + } + // Fall back to pod annotations + if m.hasRelevantAnnotations(input.PodAnnotations, input.ResourceType) { + return input.PodAnnotations + } + // Default to workload annotations even if empty + return input.WorkloadAnnotations +} + +// hasRelevantAnnotations checks if the annotations contain any reload-related annotation. +func (m *Matcher) hasRelevantAnnotations(annotations map[string]string, resourceType ResourceType) bool { + if annotations == nil { + return false + } + + // Check for explicit annotation + explicitAnn := m.getExplicitAnnotation(resourceType) + if _, ok := annotations[explicitAnn]; ok { + return true + } + + // Check for search annotation + if _, ok := annotations[m.cfg.Annotations.Search]; ok { + return true + } + + // Check for auto annotation + if _, ok := annotations[m.cfg.Annotations.Auto]; ok { + return true + } + + // Check for typed auto annotation + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + if _, ok := annotations[typedAutoAnn]; ok { + return true + } + + return false +} + +// isResourceExcluded checks if the resource is in the exclude list. +func (m *Matcher) isResourceExcluded(resourceName string, resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + var excludeAnn string + switch resourceType { + case ResourceTypeConfigMap: + excludeAnn = m.cfg.Annotations.ConfigmapExclude + case ResourceTypeSecret: + excludeAnn = m.cfg.Annotations.SecretExclude + } + + excludeList, ok := annotations[excludeAnn] + if !ok || excludeList == "" { + return false + } + + for _, excluded := range strings.Split(excludeList, ",") { + if strings.TrimSpace(excluded) == resourceName { + return true + } + } + + return false +} + +// matchesExplicitAnnotation checks if the resource name matches the explicit reload annotation. +func (m *Matcher) matchesExplicitAnnotation(resourceName string, resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + explicitAnn := m.getExplicitAnnotation(resourceType) + annotationValue, ok := annotations[explicitAnn] + if !ok || annotationValue == "" { + return false + } + + // Support comma-separated list of resource names with regex matching + for _, value := range strings.Split(annotationValue, ",") { + value = strings.TrimSpace(value) + if value == "" { + continue + } + // Support regex patterns + re, err := regexp.Compile("^" + value + "$") + if err != nil { + // If regex is invalid, fall back to exact match + if value == resourceName { + return true + } + continue + } + if re.MatchString(resourceName) { + return true + } + } + + return false +} + +// matchesSearchPattern checks if the search/match pattern is enabled. +func (m *Matcher) matchesSearchPattern(resourceAnnotations, workloadAnnotations map[string]string) bool { + if workloadAnnotations == nil || resourceAnnotations == nil { + return false + } + + searchValue, ok := workloadAnnotations[m.cfg.Annotations.Search] + if !ok || searchValue != "true" { + return false + } + + matchValue, ok := resourceAnnotations[m.cfg.Annotations.Match] + return ok && matchValue == "true" +} + +// matchesAutoAnnotation checks if auto reload is enabled via annotations. +func (m *Matcher) matchesAutoAnnotation(resourceType ResourceType, annotations map[string]string) bool { + if annotations == nil { + return false + } + + // Check generic auto annotation + if annotations[m.cfg.Annotations.Auto] == "true" { + return true + } + + // Check typed auto annotation + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + return annotations[typedAutoAnn] == "true" +} + +// matchesAutoReloadAll checks if global auto-reload-all is enabled. +func (m *Matcher) matchesAutoReloadAll(resourceType ResourceType, annotations map[string]string) bool { + if !m.cfg.AutoReloadAll { + return false + } + + // If auto annotation is explicitly set to false, don't auto-reload + if annotations != nil { + if annotations[m.cfg.Annotations.Auto] == "false" { + return false + } + typedAutoAnn := m.getTypedAutoAnnotation(resourceType) + if annotations[typedAutoAnn] == "false" { + return false + } + } + + return true +} + +// getExplicitAnnotation returns the explicit reload annotation for the resource type. +func (m *Matcher) getExplicitAnnotation(resourceType ResourceType) string { + switch resourceType { + case ResourceTypeConfigMap: + return m.cfg.Annotations.ConfigmapReload + case ResourceTypeSecret: + return m.cfg.Annotations.SecretReload + default: + return "" + } +} + +// getTypedAutoAnnotation returns the typed auto annotation for the resource type. +func (m *Matcher) getTypedAutoAnnotation(resourceType ResourceType) string { + switch resourceType { + case ResourceTypeConfigMap: + return m.cfg.Annotations.ConfigmapAuto + case ResourceTypeSecret: + return m.cfg.Annotations.SecretAuto + default: + return "" + } +} diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go new file mode 100644 index 000000000..7e6381e2e --- /dev/null +++ b/internal/pkg/reload/predicate.go @@ -0,0 +1,146 @@ +package reload + +import ( + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// ConfigMapPredicates returns predicates for filtering ConfigMap events. +func ConfigMapPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Only process create events if ReloadOnCreate is enabled + // or if SyncAfterRestart is enabled (for initial sync) + return cfg.ReloadOnCreate || cfg.SyncAfterRestart + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // Always process updates, but filter by content change + oldCM, okOld := e.ObjectOld.(*corev1.ConfigMap) + newCM, okNew := e.ObjectNew.(*corev1.ConfigMap) + if !okOld || !okNew { + return false + } + + // Check if the data actually changed + oldHash := hasher.HashConfigMap(oldCM) + newHash := hasher.HashConfigMap(newCM) + return oldHash != newHash + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Only process delete events if ReloadOnDelete is enabled + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + // Ignore generic events + return false + }, + } +} + +// SecretPredicates returns predicates for filtering Secret events. +func SecretPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Only process create events if ReloadOnCreate is enabled + // or if SyncAfterRestart is enabled (for initial sync) + return cfg.ReloadOnCreate || cfg.SyncAfterRestart + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // Always process updates, but filter by content change + oldSecret, okOld := e.ObjectOld.(*corev1.Secret) + newSecret, okNew := e.ObjectNew.(*corev1.Secret) + if !okOld || !okNew { + return false + } + + // Check if the data actually changed + oldHash := hasher.HashSecret(oldSecret) + newHash := hasher.HashSecret(newSecret) + return oldHash != newHash + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Only process delete events if ReloadOnDelete is enabled + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + // Ignore generic events + return false + }, + } +} + +// NamespaceFilterPredicate returns a predicate that filters resources by namespace. +func NamespaceFilterPredicate(cfg *config.Config) predicate.Predicate { + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + namespace := obj.GetNamespace() + + // Check if namespace should be ignored + if cfg.IsNamespaceIgnored(namespace) { + return false + } + + // Check namespace selectors + // Note: For now, we pass through and let the controller handle selector matching + // A more efficient implementation would check labels here + return true + }) +} + +// LabelSelectorPredicate returns a predicate that filters resources by labels. +func LabelSelectorPredicate(cfg *config.Config) predicate.Predicate { + if len(cfg.ResourceSelectors) == 0 { + // No selectors configured, allow all + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + return true + }) + } + + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + // Check if any selector matches + for _, selector := range cfg.ResourceSelectors { + if selector.Matches(labelsSet(labels)) { + return true + } + } + + return false + }) +} + +// labelsSet implements labels.Labels interface for a map. +type labelsSet map[string]string + +func (ls labelsSet) Has(key string) bool { + _, ok := ls[key] + return ok +} + +func (ls labelsSet) Get(key string) string { + return ls[key] +} + +// IgnoreAnnotationPredicate returns a predicate that filters out resources with the ignore annotation. +func IgnoreAnnotationPredicate(cfg *config.Config) predicate.Predicate { + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return true + } + + // Check for ignore annotation + return annotations[cfg.Annotations.Ignore] != "true" + }) +} + +// CombinedPredicates combines multiple predicates with AND logic. +func CombinedPredicates(predicates ...predicate.Predicate) predicate.Predicate { + return predicate.And(predicates...) +} diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go new file mode 100644 index 000000000..169ae9ea0 --- /dev/null +++ b/internal/pkg/reload/service.go @@ -0,0 +1,401 @@ +package reload + +import ( + "context" + "fmt" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Service orchestrates the reload logic for ConfigMaps and Secrets. +type Service struct { + cfg *config.Config + hasher *Hasher + matcher *Matcher + strategy Strategy +} + +// NewService creates a new reload Service with the given configuration. +func NewService(cfg *config.Config) *Service { + return &Service{ + cfg: cfg, + hasher: NewHasher(), + matcher: NewMatcher(cfg), + strategy: NewStrategy(cfg), + } +} + +// ConfigMapChange represents a change event for a ConfigMap. +type ConfigMapChange struct { + ConfigMap *corev1.ConfigMap + EventType EventType +} + +// SecretChange represents a change event for a Secret. +type SecretChange struct { + Secret *corev1.Secret + EventType EventType +} + +// EventType represents the type of change event. +type EventType string + +const ( + // EventTypeCreate indicates a resource was created. + EventTypeCreate EventType = "create" + // EventTypeUpdate indicates a resource was updated. + EventTypeUpdate EventType = "update" + // EventTypeDelete indicates a resource was deleted. + EventTypeDelete EventType = "delete" +) + +// ReloadDecision contains the result of evaluating whether to reload a workload. +type ReloadDecision struct { + // Workload is the workload accessor. + Workload workload.WorkloadAccessor + // ShouldReload indicates whether the workload should be reloaded. + ShouldReload bool + // AutoReload indicates if this is an auto-reload. + AutoReload bool + // Reason provides a human-readable explanation. + Reason string + // Hash is the computed hash of the resource content. + Hash string +} + +// ProcessConfigMap evaluates all workloads to determine which should be reloaded. +// This method does not modify any workloads - it only returns decisions. +func (s *Service) ProcessConfigMap(change ConfigMapChange, workloads []workload.WorkloadAccessor) []ReloadDecision { + if change.ConfigMap == nil { + return nil + } + + // Check if we should process this event type + if !s.shouldProcessEvent(change.EventType) { + return nil + } + + // Compute hash + hash := s.hasher.HashConfigMap(change.ConfigMap) + if change.EventType == EventTypeDelete { + hash = s.hasher.EmptyHash() + } + + return s.processResource( + change.ConfigMap.Name, + change.ConfigMap.Namespace, + change.ConfigMap.Annotations, + ResourceTypeConfigMap, + hash, + workloads, + ) +} + +// ProcessSecret evaluates all workloads to determine which should be reloaded. +// This method does not modify any workloads - it only returns decisions. +func (s *Service) ProcessSecret(change SecretChange, workloads []workload.WorkloadAccessor) []ReloadDecision { + if change.Secret == nil { + return nil + } + + // Check if we should process this event type + if !s.shouldProcessEvent(change.EventType) { + return nil + } + + // Compute hash + hash := s.hasher.HashSecret(change.Secret) + if change.EventType == EventTypeDelete { + hash = s.hasher.EmptyHash() + } + + return s.processResource( + change.Secret.Name, + change.Secret.Namespace, + change.Secret.Annotations, + ResourceTypeSecret, + hash, + workloads, + ) +} + +// processResource processes a resource change against all workloads. +func (s *Service) processResource( + resourceName string, + resourceNamespace string, + resourceAnnotations map[string]string, + resourceType ResourceType, + hash string, + workloads []workload.WorkloadAccessor, +) []ReloadDecision { + var decisions []ReloadDecision + + for _, wl := range workloads { + // Skip workloads in different namespaces + if wl.GetNamespace() != resourceNamespace { + continue + } + + // Check if workload should be ignored based on type + if s.cfg.IsWorkloadIgnored(string(wl.Kind())) { + continue + } + + // Check if workload uses this resource (via volumes or env) + var usesResource bool + switch resourceType { + case ResourceTypeConfigMap: + usesResource = wl.UsesConfigMap(resourceName) + case ResourceTypeSecret: + usesResource = wl.UsesSecret(resourceName) + } + + // Build match input + input := MatchInput{ + ResourceName: resourceName, + ResourceNamespace: resourceNamespace, + ResourceType: resourceType, + ResourceAnnotations: resourceAnnotations, + WorkloadAnnotations: wl.GetAnnotations(), + PodAnnotations: wl.GetPodTemplateAnnotations(), + } + + // Check if we should reload + matchResult := s.matcher.ShouldReload(input) + + // For auto-reload, the workload must actually use the resource + // For explicit annotation, the user explicitly requested it + shouldReload := matchResult.ShouldReload + if matchResult.AutoReload && !usesResource { + shouldReload = false + } + + decisions = append(decisions, ReloadDecision{ + Workload: wl, + ShouldReload: shouldReload, + AutoReload: matchResult.AutoReload, + Reason: matchResult.Reason, + Hash: hash, + }) + } + + return decisions +} + +// shouldProcessEvent checks if the event type should be processed. +func (s *Service) shouldProcessEvent(eventType EventType) bool { + switch eventType { + case EventTypeCreate: + return s.cfg.ReloadOnCreate + case EventTypeDelete: + return s.cfg.ReloadOnDelete + case EventTypeUpdate: + return true + default: + return false + } +} + +// ApplyReload applies the reload strategy to a workload. +// This modifies the workload in-place but does not persist the changes. +// Returns true if changes were made, false otherwise. +func (s *Service) ApplyReload( + ctx context.Context, + wl workload.WorkloadAccessor, + resourceName string, + resourceType ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + // Find the target container + container := s.findTargetContainer(wl, resourceName, resourceType, autoReload) + + input := StrategyInput{ + ResourceName: resourceName, + ResourceType: resourceType, + Namespace: namespace, + Hash: hash, + Container: container, + PodAnnotations: wl.GetPodTemplateAnnotations(), + AutoReload: autoReload, + } + + return s.strategy.Apply(input) +} + +// findTargetContainer finds the container to target for the reload. +// For auto-reload, it finds the container that uses the resource. +// For explicit annotation, it returns the first container. +func (s *Service) findTargetContainer( + wl workload.WorkloadAccessor, + resourceName string, + resourceType ResourceType, + autoReload bool, +) *corev1.Container { + containers := wl.GetContainers() + if len(containers) == 0 { + return nil + } + + // For explicit annotation, return the first container + if !autoReload { + return &containers[0] + } + + volumes := wl.GetVolumes() + initContainers := wl.GetInitContainers() + + // For auto-reload, find the container that uses the resource + // Check volumes first + volumeName := s.findVolumeUsingResource(volumes, resourceName, resourceType) + if volumeName != "" { + container := s.findContainerWithVolumeMount(containers, volumeName) + if container != nil { + return container + } + // Check init containers + container = s.findContainerWithVolumeMount(initContainers, volumeName) + if container != nil { + // Return the first regular container for init container refs + return &containers[0] + } + } + + // Check env references + container := s.findContainerWithEnvRef(containers, resourceName, resourceType) + if container != nil { + return container + } + + // Check init container env references + container = s.findContainerWithEnvRef(initContainers, resourceName, resourceType) + if container != nil { + // Return the first regular container for init container refs + return &containers[0] + } + + // Default to first container + return &containers[0] +} + +// findVolumeUsingResource finds a volume that uses the given resource. +func (s *Service) findVolumeUsingResource(volumes []corev1.Volume, resourceName string, resourceType ResourceType) string { + for _, vol := range volumes { + switch resourceType { + case ResourceTypeConfigMap: + if vol.ConfigMap != nil && vol.ConfigMap.Name == resourceName { + return vol.Name + } + if vol.Projected != nil { + for _, src := range vol.Projected.Sources { + if src.ConfigMap != nil && src.ConfigMap.Name == resourceName { + return vol.Name + } + } + } + case ResourceTypeSecret: + if vol.Secret != nil && vol.Secret.SecretName == resourceName { + return vol.Name + } + if vol.Projected != nil { + for _, src := range vol.Projected.Sources { + if src.Secret != nil && src.Secret.Name == resourceName { + return vol.Name + } + } + } + } + } + return "" +} + +// findContainerWithVolumeMount finds a container that mounts the given volume. +func (s *Service) findContainerWithVolumeMount(containers []corev1.Container, volumeName string) *corev1.Container { + for i := range containers { + for _, mount := range containers[i].VolumeMounts { + if mount.Name == volumeName { + return &containers[i] + } + } + } + return nil +} + +// findContainerWithEnvRef finds a container that references the resource via env. +func (s *Service) findContainerWithEnvRef(containers []corev1.Container, resourceName string, resourceType ResourceType) *corev1.Container { + for i := range containers { + // Check env vars + for _, env := range containers[i].Env { + if env.ValueFrom == nil { + continue + } + switch resourceType { + case ResourceTypeConfigMap: + if env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == resourceName { + return &containers[i] + } + case ResourceTypeSecret: + if env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == resourceName { + return &containers[i] + } + } + } + + // Check envFrom + for _, envFrom := range containers[i].EnvFrom { + switch resourceType { + case ResourceTypeConfigMap: + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == resourceName { + return &containers[i] + } + case ResourceTypeSecret: + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == resourceName { + return &containers[i] + } + } + } + } + return nil +} + +// Hasher returns the hasher used by this service. +func (s *Service) Hasher() *Hasher { + return s.hasher +} + +// Matcher returns the matcher used by this service. +func (s *Service) Matcher() *Matcher { + return s.matcher +} + +// Strategy returns the strategy used by this service. +func (s *Service) Strategy() Strategy { + return s.strategy +} + +// ListWorkloads lists all workloads in the given namespace. +// If namespace is empty, lists workloads in all namespaces. +func ListWorkloads(ctx context.Context, c client.Client, namespace string, registry *workload.Registry) ([]workload.WorkloadAccessor, error) { + var workloads []workload.WorkloadAccessor + + for _, kind := range registry.SupportedKinds() { + list, err := listWorkloadsByKind(ctx, c, namespace, kind) + if err != nil { + return nil, fmt.Errorf("listing %s: %w", kind, err) + } + workloads = append(workloads, list...) + } + + return workloads, nil +} + +// listWorkloadsByKind lists workloads of a specific kind. +func listWorkloadsByKind(ctx context.Context, c client.Client, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { + // This will be implemented by the controller using the appropriate list functions + // For now, return empty slice as the controller will handle this + return nil, nil +} diff --git a/internal/pkg/reload/strategy.go b/internal/pkg/reload/strategy.go new file mode 100644 index 000000000..ce938a7ef --- /dev/null +++ b/internal/pkg/reload/strategy.go @@ -0,0 +1,203 @@ +package reload + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" +) + +const ( + // EnvVarPrefix is the prefix for environment variables added by Reloader. + EnvVarPrefix = "STAKATER_" + // ConfigmapEnvVarPostfix is the postfix for ConfigMap environment variables. + ConfigmapEnvVarPostfix = "CONFIGMAP" + // SecretEnvVarPostfix is the postfix for Secret environment variables. + SecretEnvVarPostfix = "SECRET" +) + +// Strategy defines how workload restarts are triggered. +type Strategy interface { + // Apply applies the reload strategy to the pod spec. + // Returns true if changes were made, false otherwise. + Apply(input StrategyInput) (bool, error) + + // Name returns the strategy name for logging purposes. + Name() string +} + +// StrategyInput contains the information needed to apply a reload strategy. +type StrategyInput struct { + // ResourceName is the name of the ConfigMap or Secret that changed. + ResourceName string + // ResourceType is the type of resource (configmap or secret). + ResourceType ResourceType + // Namespace is the namespace of the resource. + Namespace string + // Hash is the SHA hash of the resource content. + Hash string + // Container is the container to target for env var injection. + // If nil, the first container is used. + Container *corev1.Container + // PodAnnotations is the pod template annotations map (for annotation strategy). + PodAnnotations map[string]string + // AutoReload indicates if this is an auto-reload (affects container selection). + AutoReload bool +} + +// ReloadSource contains metadata about what triggered a reload. +// This is stored in the annotation when using annotation strategy. +type ReloadSource struct { + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Hash string `json:"hash"` + Containers []string `json:"containers"` + ReloadedAt time.Time `json:"reloadedAt"` +} + +// EnvVarStrategy triggers reloads by adding/updating environment variables. +// This is the default strategy and is GitOps-friendly. +type EnvVarStrategy struct{} + +// NewEnvVarStrategy creates a new EnvVarStrategy. +func NewEnvVarStrategy() *EnvVarStrategy { + return &EnvVarStrategy{} +} + +// Name returns the strategy name. +func (s *EnvVarStrategy) Name() string { + return string(config.ReloadStrategyEnvVars) +} + +// Apply adds or updates an environment variable to trigger a restart. +func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { + if input.Container == nil { + return false, fmt.Errorf("container is required for env-var strategy") + } + + envVarName := s.envVarName(input.ResourceName, input.ResourceType) + + // Check if env var already exists + for i := range input.Container.Env { + if input.Container.Env[i].Name == envVarName { + if input.Container.Env[i].Value == input.Hash { + // Already up to date + return false, nil + } + // Update existing + input.Container.Env[i].Value = input.Hash + return true, nil + } + } + + // Add new env var + input.Container.Env = append(input.Container.Env, corev1.EnvVar{ + Name: envVarName, + Value: input.Hash, + }) + + return true, nil +} + +// envVarName generates the environment variable name for a resource. +func (s *EnvVarStrategy) envVarName(resourceName string, resourceType ResourceType) string { + var postfix string + switch resourceType { + case ResourceTypeConfigMap: + postfix = ConfigmapEnvVarPostfix + case ResourceTypeSecret: + postfix = SecretEnvVarPostfix + } + return EnvVarPrefix + convertToEnvVarName(resourceName) + "_" + postfix +} + +// convertToEnvVarName converts a string to a valid environment variable name. +// Invalid characters are replaced with underscores, and the result is uppercased. +func convertToEnvVarName(text string) string { + var buffer bytes.Buffer + upper := strings.ToUpper(text) + lastCharValid := false + + for i := 0; i < len(upper); i++ { + ch := upper[i] + if (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') { + buffer.WriteByte(ch) + lastCharValid = true + } else { + if lastCharValid { + buffer.WriteByte('_') + } + lastCharValid = false + } + } + + return buffer.String() +} + +// AnnotationStrategy triggers reloads by adding/updating pod template annotations. +type AnnotationStrategy struct { + cfg *config.Config +} + +// NewAnnotationStrategy creates a new AnnotationStrategy. +func NewAnnotationStrategy(cfg *config.Config) *AnnotationStrategy { + return &AnnotationStrategy{cfg: cfg} +} + +// Name returns the strategy name. +func (s *AnnotationStrategy) Name() string { + return string(config.ReloadStrategyAnnotations) +} + +// Apply adds or updates a pod annotation to trigger a restart. +func (s *AnnotationStrategy) Apply(input StrategyInput) (bool, error) { + if input.PodAnnotations == nil { + return false, fmt.Errorf("pod annotations map is required for annotation strategy") + } + + containerName := "" + if input.Container != nil { + containerName = input.Container.Name + } + + // Create reload source metadata + source := ReloadSource{ + Kind: string(input.ResourceType), + Name: input.ResourceName, + Namespace: input.Namespace, + Hash: input.Hash, + Containers: []string{containerName}, + ReloadedAt: time.Now().UTC(), + } + + sourceJSON, err := json.Marshal(source) + if err != nil { + return false, fmt.Errorf("failed to marshal reload source: %w", err) + } + + annotationKey := s.cfg.Annotations.LastReloadedFrom + existingValue := input.PodAnnotations[annotationKey] + + if existingValue == string(sourceJSON) { + // Already up to date + return false, nil + } + + input.PodAnnotations[annotationKey] = string(sourceJSON) + return true, nil +} + +// NewStrategy creates a Strategy based on the configuration. +func NewStrategy(cfg *config.Config) Strategy { + switch cfg.ReloadStrategy { + case config.ReloadStrategyAnnotations: + return NewAnnotationStrategy(cfg) + default: + return NewEnvVarStrategy() + } +} From 0a2aa122f520346eafde0d3d7f5e5c27cc104cb5 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:52 +0100 Subject: [PATCH 04/35] feat: Add controller-runtime reconcilers and unit tests for reload packag --- go.mod | 6 +- go.sum | 6 + .../pkg/controller/configmap_reconciler.go | 340 ++++++++++++ internal/pkg/controller/secret_reconciler.go | 340 ++++++++++++ internal/pkg/reload/hasher_test.go | 231 +++++++++ internal/pkg/reload/matcher_test.go | 488 ++++++++++++++++++ internal/pkg/reload/strategy_test.go | 292 +++++++++++ 7 files changed, 1702 insertions(+), 1 deletion(-) create mode 100644 internal/pkg/controller/configmap_reconciler.go create mode 100644 internal/pkg/controller/secret_reconciler.go create mode 100644 internal/pkg/reload/hasher_test.go create mode 100644 internal/pkg/reload/matcher_test.go create mode 100644 internal/pkg/reload/strategy_test.go diff --git a/go.mod b/go.mod index af8cb9622..bff72edbf 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 + github.com/go-logr/logr v1.4.2 github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 github.com/parnurzeal/gorequest v0.3.0 @@ -27,8 +28,8 @@ require ( github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect @@ -54,16 +55,19 @@ require ( github.com/prometheus/procfs v0.16.0 // indirect github.com/smartystreets/goconvey v1.7.2 // indirect github.com/x448/float16 v0.8.4 // indirect + golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect golang.org/x/net v0.39.0 // indirect golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/go.sum b/go.sum index dd99ea92f..945a27578 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,12 @@ github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -171,6 +175,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go new file mode 100644 index 000000000..2dcd33381 --- /dev/null +++ b/internal/pkg/controller/configmap_reconciler.go @@ -0,0 +1,340 @@ +package controller + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ConfigMapReconciler watches ConfigMaps and triggers workload reloads. +type ConfigMapReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + ReloadService *reload.Service + Registry *workload.Registry + Collectors *metrics.Collectors + + // initialized tracks whether initial sync has completed. + // Used to skip create events during startup unless SyncAfterRestart is enabled. + initialized bool + initOnce sync.Once +} + +// Reconcile handles ConfigMap events and triggers workload reloads as needed. +func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("configmap", req.NamespacedName) + + // Mark as initialized after first reconcile (caches are synced at this point) + r.initOnce.Do(func() { + r.initialized = true + log.Info("ConfigMap controller initialized") + }) + + // Fetch the ConfigMap + var cm corev1.ConfigMap + if err := r.Get(ctx, req.NamespacedName, &cm); err != nil { + if errors.IsNotFound(err) { + // ConfigMap was deleted - handle if ReloadOnDelete is enabled + if r.Config.ReloadOnDelete { + return r.handleDelete(ctx, req, log) + } + return ctrl.Result{}, nil + } + log.Error(err, "failed to get ConfigMap") + return ctrl.Result{}, err + } + + // Check if namespace should be ignored + if r.Config.IsNamespaceIgnored(cm.Namespace) { + log.V(1).Info("skipping ConfigMap in ignored namespace") + return ctrl.Result{}, nil + } + + // Get all workloads in the same namespace + workloads, err := r.listWorkloads(ctx, cm.Namespace) + if err != nil { + log.Error(err, "failed to list workloads") + return ctrl.Result{}, err + } + + // Evaluate which workloads should be reloaded + change := reload.ConfigMapChange{ + ConfigMap: &cm, + EventType: reload.EventTypeUpdate, + } + decisions := r.ReloadService.ProcessConfigMap(change, workloads) + + // Apply reloads + for _, decision := range decisions { + if !decision.ShouldReload { + continue + } + + log.Info("reloading workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + "reason", decision.Reason, + ) + + updated, err := r.ReloadService.ApplyReload( + ctx, + decision.Workload, + cm.Name, + reload.ResourceTypeConfigMap, + cm.Namespace, + decision.Hash, + decision.AutoReload, + ) + if err != nil { + log.Error(err, "failed to apply reload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + r.recordMetrics(false, cm.Namespace) + continue + } + + if updated { + // Persist the changes + if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { + log.Error(err, "failed to update workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + r.recordMetrics(false, cm.Namespace) + continue + } + r.recordMetrics(true, cm.Namespace) + log.Info("workload reloaded successfully", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + } + } + + return ctrl.Result{}, nil +} + +// handleDelete handles ConfigMap deletion events. +func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { + log.Info("handling ConfigMap deletion") + + // Get all workloads in the namespace + workloads, err := r.listWorkloads(ctx, req.Namespace) + if err != nil { + log.Error(err, "failed to list workloads") + return ctrl.Result{}, err + } + + // For delete events, we create a change with nil ConfigMap + // The service will use an empty hash + change := reload.ConfigMapChange{ + ConfigMap: &corev1.ConfigMap{}, + EventType: reload.EventTypeDelete, + } + change.ConfigMap.Name = req.Name + change.ConfigMap.Namespace = req.Namespace + + decisions := r.ReloadService.ProcessConfigMap(change, workloads) + + // Apply reloads for delete + for _, decision := range decisions { + if !decision.ShouldReload { + continue + } + + log.Info("reloading workload due to ConfigMap deletion", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + + updated, err := r.ReloadService.ApplyReload( + ctx, + decision.Workload, + req.Name, + reload.ResourceTypeConfigMap, + req.Namespace, + decision.Hash, + decision.AutoReload, + ) + if err != nil { + log.Error(err, "failed to apply reload for deletion") + r.recordMetrics(false, req.Namespace) + continue + } + + if updated { + if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { + log.Error(err, "failed to update workload") + r.recordMetrics(false, req.Namespace) + continue + } + r.recordMetrics(true, req.Namespace) + } + } + + return ctrl.Result{}, nil +} + +// listWorkloads returns all workloads in the given namespace. +func (r *ConfigMapReconciler) listWorkloads(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var result []workload.WorkloadAccessor + + for _, kind := range r.Registry.SupportedKinds() { + // Skip ignored workload types + if r.Config.IsWorkloadIgnored(string(kind)) { + continue + } + + workloads, err := r.listWorkloadsByKind(ctx, namespace, kind) + if err != nil { + return nil, err + } + result = append(result, workloads...) + } + + return result, nil +} + +// listWorkloadsByKind lists workloads of a specific kind in the namespace. +func (r *ConfigMapReconciler) listWorkloadsByKind(ctx context.Context, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { + switch kind { + case workload.KindDeployment: + return r.listDeployments(ctx, namespace) + case workload.KindDaemonSet: + return r.listDaemonSets(ctx, namespace) + case workload.KindStatefulSet: + return r.listStatefulSets(ctx, namespace) + case workload.KindJob: + return r.listJobs(ctx, namespace) + case workload.KindCronJob: + return r.listCronJobs(ctx, namespace) + default: + return nil, nil + } +} + +func (r *ConfigMapReconciler) listDeployments(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.DeploymentList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewDeploymentWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *ConfigMapReconciler) listDaemonSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.DaemonSetList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewDaemonSetWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *ConfigMapReconciler) listStatefulSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.StatefulSetList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewStatefulSetWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *ConfigMapReconciler) listJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list batchv1.JobList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewJobWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *ConfigMapReconciler) listCronJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list batchv1.CronJobList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewCronJobWorkload(&list.Items[i]) + } + return result, nil +} + +// recordMetrics records reload metrics. +func (r *ConfigMapReconciler) recordMetrics(success bool, namespace string) { + if r.Collectors == nil { + return + } + // TODO: Integrate with existing metrics collectors +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { + hasher := r.ReloadService.Hasher() + + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}). + WithEventFilter(predicate.And( + reload.ConfigMapPredicates(r.Config, hasher), + reload.NamespaceFilterPredicate(r.Config), + reload.LabelSelectorPredicate(r.Config), + reload.IgnoreAnnotationPredicate(r.Config), + r.createEventFilter(), + )). + Complete(r) +} + +// createEventFilter filters create events based on initialization state. +func (r *ConfigMapReconciler) createEventFilter() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // During startup, skip create events unless SyncAfterRestart is enabled + if !r.initialized && !r.Config.SyncAfterRestart { + return false + } + // After initialization, only process creates if ReloadOnCreate is enabled + return r.Config.ReloadOnCreate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return r.Config.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} + +// Ensure ConfigMapReconciler implements reconcile.Reconciler +var _ reconcile.Reconciler = &ConfigMapReconciler{} diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go new file mode 100644 index 000000000..54e4aa8ec --- /dev/null +++ b/internal/pkg/controller/secret_reconciler.go @@ -0,0 +1,340 @@ +package controller + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// SecretReconciler watches Secrets and triggers workload reloads. +type SecretReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + ReloadService *reload.Service + Registry *workload.Registry + Collectors *metrics.Collectors + + // initialized tracks whether initial sync has completed. + // Used to skip create events during startup unless SyncAfterRestart is enabled. + initialized bool + initOnce sync.Once +} + +// Reconcile handles Secret events and triggers workload reloads as needed. +func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("secret", req.NamespacedName) + + // Mark as initialized after first reconcile (caches are synced at this point) + r.initOnce.Do(func() { + r.initialized = true + log.Info("Secret controller initialized") + }) + + // Fetch the Secret + var secret corev1.Secret + if err := r.Get(ctx, req.NamespacedName, &secret); err != nil { + if errors.IsNotFound(err) { + // Secret was deleted - handle if ReloadOnDelete is enabled + if r.Config.ReloadOnDelete { + return r.handleDelete(ctx, req, log) + } + return ctrl.Result{}, nil + } + log.Error(err, "failed to get Secret") + return ctrl.Result{}, err + } + + // Check if namespace should be ignored + if r.Config.IsNamespaceIgnored(secret.Namespace) { + log.V(1).Info("skipping Secret in ignored namespace") + return ctrl.Result{}, nil + } + + // Get all workloads in the same namespace + workloads, err := r.listWorkloads(ctx, secret.Namespace) + if err != nil { + log.Error(err, "failed to list workloads") + return ctrl.Result{}, err + } + + // Evaluate which workloads should be reloaded + change := reload.SecretChange{ + Secret: &secret, + EventType: reload.EventTypeUpdate, + } + decisions := r.ReloadService.ProcessSecret(change, workloads) + + // Apply reloads + for _, decision := range decisions { + if !decision.ShouldReload { + continue + } + + log.Info("reloading workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + "reason", decision.Reason, + ) + + updated, err := r.ReloadService.ApplyReload( + ctx, + decision.Workload, + secret.Name, + reload.ResourceTypeSecret, + secret.Namespace, + decision.Hash, + decision.AutoReload, + ) + if err != nil { + log.Error(err, "failed to apply reload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + r.recordMetrics(false, secret.Namespace) + continue + } + + if updated { + // Persist the changes + if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { + log.Error(err, "failed to update workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + r.recordMetrics(false, secret.Namespace) + continue + } + r.recordMetrics(true, secret.Namespace) + log.Info("workload reloaded successfully", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + } + } + + return ctrl.Result{}, nil +} + +// handleDelete handles Secret deletion events. +func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { + log.Info("handling Secret deletion") + + // Get all workloads in the namespace + workloads, err := r.listWorkloads(ctx, req.Namespace) + if err != nil { + log.Error(err, "failed to list workloads") + return ctrl.Result{}, err + } + + // For delete events, we create a change with empty Secret + // The service will use an empty hash + change := reload.SecretChange{ + Secret: &corev1.Secret{}, + EventType: reload.EventTypeDelete, + } + change.Secret.Name = req.Name + change.Secret.Namespace = req.Namespace + + decisions := r.ReloadService.ProcessSecret(change, workloads) + + // Apply reloads for delete + for _, decision := range decisions { + if !decision.ShouldReload { + continue + } + + log.Info("reloading workload due to Secret deletion", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + + updated, err := r.ReloadService.ApplyReload( + ctx, + decision.Workload, + req.Name, + reload.ResourceTypeSecret, + req.Namespace, + decision.Hash, + decision.AutoReload, + ) + if err != nil { + log.Error(err, "failed to apply reload for deletion") + r.recordMetrics(false, req.Namespace) + continue + } + + if updated { + if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { + log.Error(err, "failed to update workload") + r.recordMetrics(false, req.Namespace) + continue + } + r.recordMetrics(true, req.Namespace) + } + } + + return ctrl.Result{}, nil +} + +// listWorkloads returns all workloads in the given namespace. +func (r *SecretReconciler) listWorkloads(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var result []workload.WorkloadAccessor + + for _, kind := range r.Registry.SupportedKinds() { + // Skip ignored workload types + if r.Config.IsWorkloadIgnored(string(kind)) { + continue + } + + workloads, err := r.listWorkloadsByKind(ctx, namespace, kind) + if err != nil { + return nil, err + } + result = append(result, workloads...) + } + + return result, nil +} + +// listWorkloadsByKind lists workloads of a specific kind in the namespace. +func (r *SecretReconciler) listWorkloadsByKind(ctx context.Context, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { + switch kind { + case workload.KindDeployment: + return r.listDeployments(ctx, namespace) + case workload.KindDaemonSet: + return r.listDaemonSets(ctx, namespace) + case workload.KindStatefulSet: + return r.listStatefulSets(ctx, namespace) + case workload.KindJob: + return r.listJobs(ctx, namespace) + case workload.KindCronJob: + return r.listCronJobs(ctx, namespace) + default: + return nil, nil + } +} + +func (r *SecretReconciler) listDeployments(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.DeploymentList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewDeploymentWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *SecretReconciler) listDaemonSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.DaemonSetList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewDaemonSetWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *SecretReconciler) listStatefulSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list appsv1.StatefulSetList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewStatefulSetWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *SecretReconciler) listJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list batchv1.JobList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewJobWorkload(&list.Items[i]) + } + return result, nil +} + +func (r *SecretReconciler) listCronJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { + var list batchv1.CronJobList + if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]workload.WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = workload.NewCronJobWorkload(&list.Items[i]) + } + return result, nil +} + +// recordMetrics records reload metrics. +func (r *SecretReconciler) recordMetrics(success bool, namespace string) { + if r.Collectors == nil { + return + } + // TODO: Integrate with existing metrics collectors +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { + hasher := r.ReloadService.Hasher() + + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Secret{}). + WithEventFilter(predicate.And( + reload.SecretPredicates(r.Config, hasher), + reload.NamespaceFilterPredicate(r.Config), + reload.LabelSelectorPredicate(r.Config), + reload.IgnoreAnnotationPredicate(r.Config), + r.createEventFilter(), + )). + Complete(r) +} + +// createEventFilter filters create events based on initialization state. +func (r *SecretReconciler) createEventFilter() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // During startup, skip create events unless SyncAfterRestart is enabled + if !r.initialized && !r.Config.SyncAfterRestart { + return false + } + // After initialization, only process creates if ReloadOnCreate is enabled + return r.Config.ReloadOnCreate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return r.Config.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} + +// Ensure SecretReconciler implements reconcile.Reconciler +var _ reconcile.Reconciler = &SecretReconciler{} diff --git a/internal/pkg/reload/hasher_test.go b/internal/pkg/reload/hasher_test.go new file mode 100644 index 000000000..0b892adca --- /dev/null +++ b/internal/pkg/reload/hasher_test.go @@ -0,0 +1,231 @@ +package reload + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" +) + +func TestHasher_HashConfigMap(t *testing.T) { + hasher := NewHasher() + + tests := []struct { + name string + cm *corev1.ConfigMap + wantHash string + }{ + { + name: "empty configmap", + cm: &corev1.ConfigMap{ + Data: nil, + BinaryData: nil, + }, + wantHash: hasher.EmptyHash(), + }, + { + name: "configmap with data", + cm: &corev1.ConfigMap{ + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + // Hash should be deterministic + wantHash: hasher.HashConfigMap(&corev1.ConfigMap{ + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }), + }, + { + name: "configmap with binary data", + cm: &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "binary1": []byte("binaryvalue1"), + }, + }, + wantHash: hasher.HashConfigMap(&corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "binary1": []byte("binaryvalue1"), + }, + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := hasher.HashConfigMap(tt.cm) + if got != tt.wantHash { + t.Errorf("HashConfigMap() = %v, want %v", got, tt.wantHash) + } + }) + } +} + +func TestHasher_HashConfigMap_Deterministic(t *testing.T) { + hasher := NewHasher() + + cm := &corev1.ConfigMap{ + Data: map[string]string{ + "z-key": "value-z", + "a-key": "value-a", + "m-key": "value-m", + }, + } + + // Hash should be the same regardless of iteration order + hash1 := hasher.HashConfigMap(cm) + hash2 := hasher.HashConfigMap(cm) + hash3 := hasher.HashConfigMap(cm) + + if hash1 != hash2 || hash2 != hash3 { + t.Errorf("Hash is not deterministic: %s, %s, %s", hash1, hash2, hash3) + } +} + +func TestHasher_HashConfigMap_DifferentValues(t *testing.T) { + hasher := NewHasher() + + cm1 := &corev1.ConfigMap{ + Data: map[string]string{ + "key": "value1", + }, + } + + cm2 := &corev1.ConfigMap{ + Data: map[string]string{ + "key": "value2", + }, + } + + hash1 := hasher.HashConfigMap(cm1) + hash2 := hasher.HashConfigMap(cm2) + + if hash1 == hash2 { + t.Errorf("Different values should produce different hashes") + } +} + +func TestHasher_HashSecret(t *testing.T) { + hasher := NewHasher() + + tests := []struct { + name string + secret *corev1.Secret + wantHash string + }{ + { + name: "empty secret", + secret: &corev1.Secret{ + Data: nil, + }, + wantHash: hasher.EmptyHash(), + }, + { + name: "secret with data", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, + }, + wantHash: hasher.HashSecret(&corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := hasher.HashSecret(tt.secret) + if got != tt.wantHash { + t.Errorf("HashSecret() = %v, want %v", got, tt.wantHash) + } + }) + } +} + +func TestHasher_HashSecret_Deterministic(t *testing.T) { + hasher := NewHasher() + + secret := &corev1.Secret{ + Data: map[string][]byte{ + "z-key": []byte("value-z"), + "a-key": []byte("value-a"), + "m-key": []byte("value-m"), + }, + } + + // Hash should be the same regardless of iteration order + hash1 := hasher.HashSecret(secret) + hash2 := hasher.HashSecret(secret) + hash3 := hasher.HashSecret(secret) + + if hash1 != hash2 || hash2 != hash3 { + t.Errorf("Hash is not deterministic: %s, %s, %s", hash1, hash2, hash3) + } +} + +func TestHasher_HashSecret_DifferentValues(t *testing.T) { + hasher := NewHasher() + + secret1 := &corev1.Secret{ + Data: map[string][]byte{ + "key": []byte("value1"), + }, + } + + secret2 := &corev1.Secret{ + Data: map[string][]byte{ + "key": []byte("value2"), + }, + } + + hash1 := hasher.HashSecret(secret1) + hash2 := hasher.HashSecret(secret2) + + if hash1 == hash2 { + t.Errorf("Different values should produce different hashes") + } +} + +func TestHasher_EmptyHash(t *testing.T) { + hasher := NewHasher() + + emptyHash := hasher.EmptyHash() + if emptyHash == "" { + t.Error("EmptyHash should not be empty string") + } + + // Empty ConfigMap should match EmptyHash + cm := &corev1.ConfigMap{} + if hasher.HashConfigMap(cm) != emptyHash { + t.Error("Empty ConfigMap hash should equal EmptyHash") + } + + // Empty Secret should match EmptyHash + secret := &corev1.Secret{} + if hasher.HashSecret(secret) != emptyHash { + t.Error("Empty Secret hash should equal EmptyHash") + } +} + +func TestHasher_NilInput(t *testing.T) { + hasher := NewHasher() + + // Test nil ConfigMap + cmHash := hasher.HashConfigMap(nil) + if cmHash != hasher.EmptyHash() { + t.Errorf("nil ConfigMap should return EmptyHash, got %s", cmHash) + } + + // Test nil Secret + secretHash := hasher.HashSecret(nil) + if secretHash != hasher.EmptyHash() { + t.Errorf("nil Secret should return EmptyHash, got %s", secretHash) + } +} diff --git a/internal/pkg/reload/matcher_test.go b/internal/pkg/reload/matcher_test.go new file mode 100644 index 000000000..5e4f8b8f7 --- /dev/null +++ b/internal/pkg/reload/matcher_test.go @@ -0,0 +1,488 @@ +package reload + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestMatcher_ShouldReload(t *testing.T) { + defaultCfg := config.NewDefault() + matcher := NewMatcher(defaultCfg) + + tests := []struct { + name string + input MatchInput + wantReload bool + wantAutoReload bool + description string + }{ + // Ignore annotation tests + { + name: "ignore annotation on resource skips reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Resources with ignore annotation should never trigger reload", + }, + { + name: "ignore annotation false allows reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "false"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Resources with ignore=false should allow reload", + }, + + // Exclude annotation tests + { + name: "exclude annotation skips reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Excluded ConfigMaps should not trigger reload", + }, + { + name: "exclude annotation with multiple values", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmaps.exclude.reloader.stakater.com/reload": "other-config,my-config,another-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMaps in comma-separated exclude list should not trigger reload", + }, + + // BUG FIX: Explicit annotation checked BEFORE auto + { + name: "explicit reload annotation with auto enabled - should reload", + input: MatchInput{ + ResourceName: "external-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmap.reloader.stakater.com/reload": "external-config", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, // Explicit, not auto + description: "BUG FIX: Explicit reload annotation should work even when auto is enabled", + }, + { + name: "explicit reload annotation matches pattern - should reload", + input: MatchInput{ + ResourceName: "app-config-v2", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "app-config-.*", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, + description: "Regex pattern in reload annotation should match", + }, + { + name: "explicit reload annotation does not match - should not reload", + input: MatchInput{ + ResourceName: "other-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "app-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMaps not in reload list should not trigger reload", + }, + + // Auto annotation tests + { + name: "auto annotation on workload triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Auto annotation on workload should trigger reload", + }, + { + name: "auto annotation on pod template triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: map[string]string{"reloader.stakater.com/auto": "true"}, + }, + wantReload: true, + wantAutoReload: true, + description: "Auto annotation on pod template should trigger reload", + }, + { + name: "configmap-specific auto annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"configmap.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "ConfigMap-specific auto annotation should trigger reload", + }, + { + name: "secret-specific auto annotation for secret", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"secret.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "Secret-specific auto annotation should trigger reload for secrets", + }, + { + name: "configmap-specific auto annotation does not match secret", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"configmap.reloader.stakater.com/auto": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "ConfigMap-specific auto annotation should not match secrets", + }, + + // Search/Match annotation tests + { + name: "search annotation with matching resource", + input: MatchInput{ + ResourceName: "app-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/match": "true"}, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/search": "true"}, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, // Search mode is an auto-discovery mechanism + description: "Search annotation with matching resource should trigger reload", + }, + { + name: "search annotation without matching resource", + input: MatchInput{ + ResourceName: "app-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{"reloader.stakater.com/search": "true"}, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Search annotation without matching resource should not trigger reload", + }, + + // No annotations - should not reload + { + name: "no annotations does not trigger reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Without any annotations, should not trigger reload", + }, + + // Secret tests + { + name: "secret reload annotation", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "secret.reloader.stakater.com/reload": "my-secret", + }, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: false, + description: "Secret reload annotation should trigger reload", + }, + { + name: "secret exclude annotation", + input: MatchInput{ + ResourceName: "my-secret", + ResourceNamespace: "default", + ResourceType: ResourceTypeSecret, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "secrets.exclude.reloader.stakater.com/reload": "my-secret", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Secret exclude annotation should prevent reload", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) + + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } + + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } + + t.Logf("✓ %s", tt.description) + }) + } +} + +func TestMatcher_ShouldReload_AutoReloadAll(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + matcher := NewMatcher(cfg) + + tests := []struct { + name string + input MatchInput + wantReload bool + wantAutoReload bool + description string + }{ + { + name: "auto-reload-all triggers reload", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: true, + wantAutoReload: true, + description: "With auto-reload-all enabled, all ConfigMaps should trigger reload", + }, + { + name: "auto-reload-all respects ignore annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: nil, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Even with auto-reload-all, ignore annotation should be respected", + }, + { + name: "auto-reload-all respects exclude annotation", + input: MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + PodAnnotations: nil, + }, + wantReload: false, + wantAutoReload: false, + description: "Even with auto-reload-all, exclude annotation should be respected", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) + + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } + + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } + + t.Logf("✓ %s", tt.description) + }) + } +} + +// TestMatcher_BugFix_AutoDoesNotIgnoreExplicit tests the fix for the bug where +// having reloader.stakater.com/auto: "true" would cause explicit reload annotations +// to be ignored due to an early return. +func TestMatcher_BugFix_AutoDoesNotIgnoreExplicit(t *testing.T) { + cfg := config.NewDefault() + matcher := NewMatcher(cfg) + + // This is the exact scenario from the bug report: + // Workload has: + // reloader.stakater.com/auto: "true" (watches all referenced CMs) + // configmap.reloader.stakater.com/reload: "external-config" (ALSO watches this one) + // Container references: app-config + // + // When "external-config" changes: + // - Expected: Reload (explicitly listed) + // - Bug behavior: No reload (auto annotation causes early return) + + input := MatchInput{ + ResourceName: "external-config", // Not referenced by workload + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: nil, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", // Enables auto-reload + "configmap.reloader.stakater.com/reload": "external-config", // Explicit list + }, + PodAnnotations: nil, + } + + result := matcher.ShouldReload(input) + + if !result.ShouldReload { + t.Errorf("BUG: Explicit reload annotation ignored when auto is enabled") + t.Errorf("Expected ShouldReload=true for explicitly listed ConfigMap, got false") + } + + // Should be marked as non-auto since it matched the explicit list + if result.AutoReload { + t.Errorf("Expected AutoReload=false for explicit match, got true") + } + + t.Log("✓ Bug fixed: Explicit reload annotation works even when auto is enabled") +} + +// TestMatcher_PrecedenceOrder verifies the correct order of precedence: +// 1. Ignore annotation → skip +// 2. Exclude annotation → skip +// 3. Explicit reload annotation → reload (BUG FIX: before auto!) +// 4. Search/Match → reload +// 5. Auto annotation → reload +// 6. Auto-reload-all → reload +func TestMatcher_PrecedenceOrder(t *testing.T) { + cfg := config.NewDefault() + matcher := NewMatcher(cfg) + + t.Run("explicit takes precedence over auto", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.AutoReload { + t.Error("Expected explicit match (AutoReload=false), got auto match") + } + if !result.ShouldReload { + t.Error("Expected ShouldReload=true") + } + }) + + t.Run("ignore takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected ignore to take precedence, but got ShouldReload=true") + } + }) + + t.Run("exclude takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected exclude to take precedence, but got ShouldReload=true") + } + }) +} diff --git a/internal/pkg/reload/strategy_test.go b/internal/pkg/reload/strategy_test.go new file mode 100644 index 000000000..41c54538d --- /dev/null +++ b/internal/pkg/reload/strategy_test.go @@ -0,0 +1,292 @@ +package reload + +import ( + "encoding/json" + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" +) + +func TestEnvVarStrategy_Apply(t *testing.T) { + strategy := NewEnvVarStrategy() + + t.Run("adds new env var", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{}, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for new env var") + } + + // Verify env var was added + found := false + for _, env := range container.Env { + if env.Name == "STAKATER_MY_CONFIG_CONFIGMAP" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Errorf("expected env var STAKATER_MY_CONFIG_CONFIGMAP=abc123, got %+v", container.Env) + } + }) + + t.Run("updates existing env var", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{ + {Name: "STAKATER_MY_CONFIG_CONFIGMAP", Value: "old-hash"}, + }, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "new-hash", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for updated env var") + } + + // Verify env var was updated + if container.Env[0].Value != "new-hash" { + t.Errorf("expected env var value=new-hash, got %s", container.Env[0].Value) + } + }) + + t.Run("no change when hash is same", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{ + {Name: "STAKATER_MY_CONFIG_CONFIGMAP", Value: "same-hash"}, + }, + } + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "same-hash", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if changed { + t.Error("expected changed=false when hash is unchanged") + } + }) + + t.Run("error when container is nil", func(t *testing.T) { + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: nil, + } + + _, err := strategy.Apply(input) + if err == nil { + t.Error("expected error for nil container") + } + }) + + t.Run("secret env var has correct postfix", func(t *testing.T) { + container := &corev1.Container{ + Name: "test-container", + Env: []corev1.EnvVar{}, + } + + input := StrategyInput{ + ResourceName: "my-secret", + ResourceType: ResourceTypeSecret, + Namespace: "default", + Hash: "abc123", + Container: container, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true") + } + + // Verify env var name has SECRET postfix + found := false + for _, env := range container.Env { + if env.Name == "STAKATER_MY_SECRET_SECRET" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Errorf("expected env var STAKATER_MY_SECRET_SECRET=abc123, got %+v", container.Env) + } + }) +} + +func TestEnvVarStrategy_EnvVarName(t *testing.T) { + strategy := NewEnvVarStrategy() + + tests := []struct { + resourceName string + resourceType ResourceType + expected string + }{ + {"my-config", ResourceTypeConfigMap, "STAKATER_MY_CONFIG_CONFIGMAP"}, + {"my-secret", ResourceTypeSecret, "STAKATER_MY_SECRET_SECRET"}, + {"app-config-v2", ResourceTypeConfigMap, "STAKATER_APP_CONFIG_V2_CONFIGMAP"}, + {"my.dotted.config", ResourceTypeConfigMap, "STAKATER_MY_DOTTED_CONFIG_CONFIGMAP"}, + {"MyMixedCase", ResourceTypeConfigMap, "STAKATER_MYMIXEDCASE_CONFIGMAP"}, + {"config-with-123-numbers", ResourceTypeConfigMap, "STAKATER_CONFIG_WITH_123_NUMBERS_CONFIGMAP"}, + } + + for _, tt := range tests { + t.Run(tt.resourceName, func(t *testing.T) { + got := strategy.envVarName(tt.resourceName, tt.resourceType) + if got != tt.expected { + t.Errorf("envVarName(%q, %q) = %q, want %q", + tt.resourceName, tt.resourceType, got, tt.expected) + } + }) + } +} + +func TestConvertToEnvVarName(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"my-config", "MY_CONFIG"}, + {"my.config", "MY_CONFIG"}, + {"my_config", "MY_CONFIG"}, + {"MY-CONFIG", "MY_CONFIG"}, + {"config123", "CONFIG123"}, + {"123config", "123CONFIG"}, + {"my--config", "MY_CONFIG"}, + {"my..config", "MY_CONFIG"}, + {"", ""}, + {"-leading-dash", "LEADING_DASH"}, + {"trailing-dash-", "TRAILING_DASH_"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertToEnvVarName(tt.input) + if got != tt.expected { + t.Errorf("convertToEnvVarName(%q) = %q, want %q", tt.input, got, tt.expected) + } + }) + } +} + +func TestAnnotationStrategy_Apply(t *testing.T) { + cfg := config.NewDefault() + strategy := NewAnnotationStrategy(cfg) + + t.Run("adds new annotation", func(t *testing.T) { + annotations := make(map[string]string) + container := &corev1.Container{Name: "test-container"} + + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + Container: container, + PodAnnotations: annotations, + } + + changed, err := strategy.Apply(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !changed { + t.Error("expected changed=true for new annotation") + } + + // Verify annotation was added + annotationValue := annotations[cfg.Annotations.LastReloadedFrom] + if annotationValue == "" { + t.Error("expected annotation to be set") + } + + // Verify annotation content + var source ReloadSource + if err := json.Unmarshal([]byte(annotationValue), &source); err != nil { + t.Fatalf("failed to unmarshal annotation: %v", err) + } + if source.Kind != string(ResourceTypeConfigMap) { + t.Errorf("expected kind=%s, got %s", ResourceTypeConfigMap, source.Kind) + } + if source.Name != "my-config" { + t.Errorf("expected name=my-config, got %s", source.Name) + } + if source.Hash != "abc123" { + t.Errorf("expected hash=abc123, got %s", source.Hash) + } + }) + + t.Run("error when annotations map is nil", func(t *testing.T) { + input := StrategyInput{ + ResourceName: "my-config", + ResourceType: ResourceTypeConfigMap, + Namespace: "default", + Hash: "abc123", + PodAnnotations: nil, + } + + _, err := strategy.Apply(input) + if err == nil { + t.Error("expected error for nil annotations map") + } + }) +} + +func TestNewStrategy(t *testing.T) { + t.Run("default strategy is env-vars", func(t *testing.T) { + cfg := config.NewDefault() + strategy := NewStrategy(cfg) + + if strategy.Name() != string(config.ReloadStrategyEnvVars) { + t.Errorf("expected env-vars strategy, got %s", strategy.Name()) + } + }) + + t.Run("annotations strategy when configured", func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + strategy := NewStrategy(cfg) + + if strategy.Name() != string(config.ReloadStrategyAnnotations) { + t.Errorf("expected annotations strategy, got %s", strategy.Name()) + } + }) +} From ce1e7dfafb6dd9f488670ad649c315fa0aec71de Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:52 +0100 Subject: [PATCH 05/35] feat: Add webhook notification and conflict retry for Reloader v2 --- internal/pkg/config/config.go | 4 + internal/pkg/config/flags.go | 4 + .../pkg/controller/configmap_reconciler.go | 121 ++++++++++++----- internal/pkg/controller/manager.go | 124 ++++++++++++++++++ internal/pkg/controller/retry.go | 68 ++++++++++ internal/pkg/controller/secret_reconciler.go | 116 +++++++++++----- internal/pkg/events/recorder.go | 60 +++++++++ internal/pkg/metrics/prometheus.go | 24 ++++ internal/pkg/webhook/webhook.go | 95 ++++++++++++++ 9 files changed, 555 insertions(+), 61 deletions(-) create mode 100644 internal/pkg/controller/manager.go create mode 100644 internal/pkg/controller/retry.go create mode 100644 internal/pkg/events/recorder.go create mode 100644 internal/pkg/webhook/webhook.go diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 8861fb5be..6288d5f9a 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -82,6 +82,9 @@ type Config struct { // Metrics configuration MetricsAddr string // Address to serve metrics on (default :9090) + // Health probe configuration + HealthAddr string // Address to serve health probes on (default :8081) + // Profiling configuration EnablePProf bool PProfAddr string @@ -170,6 +173,7 @@ func NewDefault() *Config { LogFormat: "", LogLevel: "info", MetricsAddr: ":9090", + HealthAddr: ":8081", EnablePProf: false, PProfAddr: ":6060", Alerting: AlertingConfig{}, diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index bf242bca3..ef423082c 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -78,6 +78,10 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { fs.StringVar(&cfg.MetricsAddr, "metrics-addr", cfg.MetricsAddr, "Address to serve metrics on") + // Health probes + fs.StringVar(&cfg.HealthAddr, "health-addr", cfg.HealthAddr, + "Address to serve health probes on") + // Profiling fs.BoolVar(&cfg.EnablePProf, "enable-pprof", cfg.EnablePProf, "Enable pprof profiling server") diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index 2dcd33381..cb78a7ba9 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -3,11 +3,14 @@ package controller import ( "context" "sync" + "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -28,9 +31,9 @@ type ConfigMapReconciler struct { ReloadService *reload.Service Registry *workload.Registry Collectors *metrics.Collectors + EventRecorder *events.Recorder + WebhookClient *webhook.Client - // initialized tracks whether initial sync has completed. - // Used to skip create events during startup unless SyncAfterRestart is enabled. initialized bool initOnce sync.Once } @@ -79,20 +82,31 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } decisions := r.ReloadService.ProcessConfigMap(change, workloads) - // Apply reloads + // Collect workloads that should be reloaded + var workloadsToReload []reload.ReloadDecision for _, decision := range decisions { - if !decision.ShouldReload { - continue + if decision.ShouldReload { + workloadsToReload = append(workloadsToReload, decision) } + } + + // If webhook is configured, send notification instead of modifying workloads + if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { + return r.sendWebhookNotification(ctx, cm.Name, cm.Namespace, reload.ResourceTypeConfigMap, workloadsToReload, log) + } + // Apply reloads with conflict retry + for _, decision := range workloadsToReload { log.Info("reloading workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), "reason", decision.Reason, ) - updated, err := r.ReloadService.ApplyReload( + updated, err := UpdateWorkloadWithRetry( ctx, + r.Client, + r.ReloadService, decision.Workload, cm.Name, reload.ResourceTypeConfigMap, @@ -101,24 +115,17 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( decision.AutoReload, ) if err != nil { - log.Error(err, "failed to apply reload", + log.Error(err, "failed to update workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) + r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "ConfigMap", cm.Name, err) r.recordMetrics(false, cm.Namespace) continue } if updated { - // Persist the changes - if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { - log.Error(err, "failed to update workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - r.recordMetrics(false, cm.Namespace) - continue - } + r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "ConfigMap", cm.Name) r.recordMetrics(true, cm.Namespace) log.Info("workload reloaded successfully", "workload", decision.Workload.GetName(), @@ -130,6 +137,9 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } +// FieldManager is the field manager name used for server-side apply. +const FieldManager = "reloader" + // handleDelete handles ConfigMap deletion events. func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { log.Info("handling ConfigMap deletion") @@ -141,8 +151,7 @@ func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } - // For delete events, we create a change with nil ConfigMap - // The service will use an empty hash + // For delete events, we create a change with empty ConfigMap change := reload.ConfigMapChange{ ConfigMap: &corev1.ConfigMap{}, EventType: reload.EventTypeDelete, @@ -152,19 +161,30 @@ func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request decisions := r.ReloadService.ProcessConfigMap(change, workloads) - // Apply reloads for delete + // Collect workloads that should be reloaded + var workloadsToReload []reload.ReloadDecision for _, decision := range decisions { - if !decision.ShouldReload { - continue + if decision.ShouldReload { + workloadsToReload = append(workloadsToReload, decision) } + } + // If webhook is configured, send notification instead of modifying workloads + if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { + return r.sendWebhookNotification(ctx, req.Name, req.Namespace, reload.ResourceTypeConfigMap, workloadsToReload, log) + } + + // Apply reloads for delete with conflict retry + for _, decision := range workloadsToReload { log.Info("reloading workload due to ConfigMap deletion", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) - updated, err := r.ReloadService.ApplyReload( + updated, err := UpdateWorkloadWithRetry( ctx, + r.Client, + r.ReloadService, decision.Workload, req.Name, reload.ResourceTypeConfigMap, @@ -173,17 +193,14 @@ func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request decision.AutoReload, ) if err != nil { - log.Error(err, "failed to apply reload for deletion") + log.Error(err, "failed to update workload") + r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "ConfigMap", req.Name, err) r.recordMetrics(false, req.Namespace) continue } if updated { - if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { - log.Error(err, "failed to update workload") - r.recordMetrics(false, req.Namespace) - continue - } + r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "ConfigMap", req.Name) r.recordMetrics(true, req.Namespace) } } @@ -291,10 +308,52 @@ func (r *ConfigMapReconciler) listCronJobs(ctx context.Context, namespace string // recordMetrics records reload metrics. func (r *ConfigMapReconciler) recordMetrics(success bool, namespace string) { - if r.Collectors == nil { - return + r.Collectors.RecordReload(success, namespace) +} + +// sendWebhookNotification sends a webhook notification instead of modifying workloads. +func (r *ConfigMapReconciler) sendWebhookNotification( + ctx context.Context, + resourceName, namespace string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + var workloads []webhook.WorkloadInfo + var hash string + for _, d := range decisions { + workloads = append(workloads, webhook.WorkloadInfo{ + Kind: string(d.Workload.Kind()), + Name: d.Workload.GetName(), + Namespace: d.Workload.GetNamespace(), + }) + if hash == "" { + hash = d.Hash + } + } + + payload := webhook.Payload{ + Kind: string(resourceType), + Namespace: namespace, + ResourceName: resourceName, + ResourceType: string(resourceType), + Hash: hash, + Timestamp: time.Now().UTC(), + Workloads: workloads, } - // TODO: Integrate with existing metrics collectors + + if err := r.WebhookClient.Send(ctx, payload); err != nil { + log.Error(err, "failed to send webhook notification") + r.recordMetrics(false, namespace) + return ctrl.Result{}, err + } + + log.Info("webhook notification sent", + "resource", resourceName, + "workloadCount", len(workloads), + ) + r.recordMetrics(true, namespace) + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go new file mode 100644 index 000000000..d4914cdd3 --- /dev/null +++ b/internal/pkg/controller/manager.go @@ -0,0 +1,124 @@ +package controller + +import ( + "context" + "fmt" + "time" + + argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var runtimeScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(runtimeScheme)) + utilruntime.Must(argorolloutsv1alpha1.AddToScheme(runtimeScheme)) +} + +// ManagerOptions contains options for creating a new Manager. +type ManagerOptions struct { + Config *config.Config + Log logr.Logger + Collectors *metrics.Collectors +} + +// NewManager creates a new controller-runtime manager with the given options. +func NewManager(opts ManagerOptions) (ctrl.Manager, error) { + cfg := opts.Config + + leaseDuration := 15 * time.Second + renewDeadline := 10 * time.Second + retryPeriod := 2 * time.Second + + mgrOpts := ctrl.Options{ + Scheme: runtimeScheme, + Metrics: ctrlmetrics.Options{ + BindAddress: cfg.MetricsAddr, + }, + HealthProbeBindAddress: cfg.HealthAddr, + LeaderElection: cfg.EnableHA, + LeaderElectionID: "reloader-leader-election", + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOpts) + if err != nil { + return nil, fmt.Errorf("creating manager: %w", err) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + return nil, fmt.Errorf("setting up health check: %w", err) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + return nil, fmt.Errorf("setting up ready check: %w", err) + } + + return mgr, nil +} + +// SetupReconcilers sets up all reconcilers with the manager. +func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { + registry := workload.NewRegistry(cfg.ArgoRolloutsEnabled) + reloadService := reload.NewService(cfg) + eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) + + // Create webhook client if URL is configured + var webhookClient *webhook.Client + if cfg.WebhookURL != "" { + webhookClient = webhook.NewClient(cfg.WebhookURL, log.WithName("webhook")) + log.Info("webhook mode enabled", "url", cfg.WebhookURL) + } + + if !cfg.IsResourceIgnored("configmaps") { + if err := (&ConfigMapReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("configmap-reconciler"), + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up configmap reconciler: %w", err) + } + } + + if !cfg.IsResourceIgnored("secrets") { + if err := (&SecretReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("secret-reconciler"), + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up secret reconciler: %w", err) + } + } + + return nil +} + +// RunManager starts the manager and blocks until it stops. +func RunManager(ctx context.Context, mgr ctrl.Manager, log logr.Logger) error { + log.Info("starting manager") + return mgr.Start(ctx) +} diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go new file mode 100644 index 000000000..f81e8e923 --- /dev/null +++ b/internal/pkg/controller/retry.go @@ -0,0 +1,68 @@ +package controller + +import ( + "context" + + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// UpdateWorkloadWithRetry updates a workload with exponential backoff on conflict. +// On conflict, it re-fetches the object, re-applies the reload changes, and retries. +func UpdateWorkloadWithRetry( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + var updated bool + isFirstAttempt := true + + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + // On retry, re-fetch the object to get the latest ResourceVersion + if !isFirstAttempt { + obj := wl.GetObject() + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if errors.IsNotFound(err) { + // Object was deleted, nothing to update + return nil + } + return err + } + } + isFirstAttempt = false + + // Apply reload changes (this modifies the workload in-place) + var applyErr error + updated, applyErr = reloadService.ApplyReload( + ctx, + wl, + resourceName, + resourceType, + namespace, + hash, + autoReload, + ) + if applyErr != nil { + return applyErr + } + + if !updated { + return nil + } + + // Attempt update with field ownership + return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + }) + + return updated, err +} diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index 54e4aa8ec..0d68ae131 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -3,11 +3,14 @@ package controller import ( "context" "sync" + "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -28,9 +31,9 @@ type SecretReconciler struct { ReloadService *reload.Service Registry *workload.Registry Collectors *metrics.Collectors + EventRecorder *events.Recorder + WebhookClient *webhook.Client - // initialized tracks whether initial sync has completed. - // Used to skip create events during startup unless SyncAfterRestart is enabled. initialized bool initOnce sync.Once } @@ -79,20 +82,31 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } decisions := r.ReloadService.ProcessSecret(change, workloads) - // Apply reloads + // Collect workloads that should be reloaded + var workloadsToReload []reload.ReloadDecision for _, decision := range decisions { - if !decision.ShouldReload { - continue + if decision.ShouldReload { + workloadsToReload = append(workloadsToReload, decision) } + } + // If webhook is configured, send notification instead of modifying workloads + if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { + return r.sendWebhookNotification(ctx, secret.Name, secret.Namespace, reload.ResourceTypeSecret, workloadsToReload, log) + } + + // Apply reloads with conflict retry + for _, decision := range workloadsToReload { log.Info("reloading workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), "reason", decision.Reason, ) - updated, err := r.ReloadService.ApplyReload( + updated, err := UpdateWorkloadWithRetry( ctx, + r.Client, + r.ReloadService, decision.Workload, secret.Name, reload.ResourceTypeSecret, @@ -101,24 +115,17 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr decision.AutoReload, ) if err != nil { - log.Error(err, "failed to apply reload", + log.Error(err, "failed to update workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) + r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "Secret", secret.Name, err) r.recordMetrics(false, secret.Namespace) continue } if updated { - // Persist the changes - if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { - log.Error(err, "failed to update workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - r.recordMetrics(false, secret.Namespace) - continue - } + r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "Secret", secret.Name) r.recordMetrics(true, secret.Namespace) log.Info("workload reloaded successfully", "workload", decision.Workload.GetName(), @@ -142,7 +149,6 @@ func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, l } // For delete events, we create a change with empty Secret - // The service will use an empty hash change := reload.SecretChange{ Secret: &corev1.Secret{}, EventType: reload.EventTypeDelete, @@ -152,19 +158,30 @@ func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, l decisions := r.ReloadService.ProcessSecret(change, workloads) - // Apply reloads for delete + // Collect workloads that should be reloaded + var workloadsToReload []reload.ReloadDecision for _, decision := range decisions { - if !decision.ShouldReload { - continue + if decision.ShouldReload { + workloadsToReload = append(workloadsToReload, decision) } + } + // If webhook is configured, send notification instead of modifying workloads + if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { + return r.sendWebhookNotification(ctx, req.Name, req.Namespace, reload.ResourceTypeSecret, workloadsToReload, log) + } + + // Apply reloads for delete with conflict retry + for _, decision := range workloadsToReload { log.Info("reloading workload due to Secret deletion", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) - updated, err := r.ReloadService.ApplyReload( + updated, err := UpdateWorkloadWithRetry( ctx, + r.Client, + r.ReloadService, decision.Workload, req.Name, reload.ResourceTypeSecret, @@ -173,17 +190,14 @@ func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, l decision.AutoReload, ) if err != nil { - log.Error(err, "failed to apply reload for deletion") + log.Error(err, "failed to update workload") + r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "Secret", req.Name, err) r.recordMetrics(false, req.Namespace) continue } if updated { - if err := r.Update(ctx, decision.Workload.GetObject()); err != nil { - log.Error(err, "failed to update workload") - r.recordMetrics(false, req.Namespace) - continue - } + r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "Secret", req.Name) r.recordMetrics(true, req.Namespace) } } @@ -291,10 +305,52 @@ func (r *SecretReconciler) listCronJobs(ctx context.Context, namespace string) ( // recordMetrics records reload metrics. func (r *SecretReconciler) recordMetrics(success bool, namespace string) { - if r.Collectors == nil { - return + r.Collectors.RecordReload(success, namespace) +} + +// sendWebhookNotification sends a webhook notification instead of modifying workloads. +func (r *SecretReconciler) sendWebhookNotification( + ctx context.Context, + resourceName, namespace string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + var workloads []webhook.WorkloadInfo + var hash string + for _, d := range decisions { + workloads = append(workloads, webhook.WorkloadInfo{ + Kind: string(d.Workload.Kind()), + Name: d.Workload.GetName(), + Namespace: d.Workload.GetNamespace(), + }) + if hash == "" { + hash = d.Hash + } + } + + payload := webhook.Payload{ + Kind: string(resourceType), + Namespace: namespace, + ResourceName: resourceName, + ResourceType: string(resourceType), + Hash: hash, + Timestamp: time.Now().UTC(), + Workloads: workloads, } - // TODO: Integrate with existing metrics collectors + + if err := r.WebhookClient.Send(ctx, payload); err != nil { + log.Error(err, "failed to send webhook notification") + r.recordMetrics(false, namespace) + return ctrl.Result{}, err + } + + log.Info("webhook notification sent", + "resource", resourceName, + "workloadCount", len(workloads), + ) + r.recordMetrics(true, namespace) + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. diff --git a/internal/pkg/events/recorder.go b/internal/pkg/events/recorder.go new file mode 100644 index 000000000..1f3eef58f --- /dev/null +++ b/internal/pkg/events/recorder.go @@ -0,0 +1,60 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +const ( + // EventTypeNormal represents a normal event. + EventTypeNormal = corev1.EventTypeNormal + // EventTypeWarning represents a warning event. + EventTypeWarning = corev1.EventTypeWarning + + // ReasonReloaded indicates a workload was successfully reloaded. + ReasonReloaded = "Reloaded" + // ReasonReloadFailed indicates a workload reload failed. + ReasonReloadFailed = "ReloadFailed" +) + +// Recorder wraps the Kubernetes event recorder. +type Recorder struct { + recorder record.EventRecorder +} + +// NewRecorder creates a new event Recorder. +func NewRecorder(recorder record.EventRecorder) *Recorder { + if recorder == nil { + return nil + } + return &Recorder{recorder: recorder} +} + +// ReloadSuccess records a successful reload event. +func (r *Recorder) ReloadSuccess(object runtime.Object, resourceType, resourceName string) { + if r == nil || r.recorder == nil { + return + } + r.recorder.Event( + object, + EventTypeNormal, + ReasonReloaded, + fmt.Sprintf("Reloaded due to %s %s change", resourceType, resourceName), + ) +} + +// ReloadFailed records a failed reload event. +func (r *Recorder) ReloadFailed(object runtime.Object, resourceType, resourceName string, err error) { + if r == nil || r.recorder == nil { + return + } + r.recorder.Event( + object, + EventTypeWarning, + ReasonReloadFailed, + fmt.Sprintf("Failed to reload due to %s %s change: %v", resourceType, resourceName, err), + ) +} diff --git a/internal/pkg/metrics/prometheus.go b/internal/pkg/metrics/prometheus.go index 94153eace..f78ef03d0 100644 --- a/internal/pkg/metrics/prometheus.go +++ b/internal/pkg/metrics/prometheus.go @@ -8,9 +8,32 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" ) +// Collectors holds Prometheus metrics collectors for Reloader. type Collectors struct { Reloaded *prometheus.CounterVec ReloadedByNamespace *prometheus.CounterVec + countByNamespace bool +} + +// RecordReload records a reload event with the given success status and namespace. +func (c *Collectors) RecordReload(success bool, namespace string) { + if c == nil { + return + } + + successLabel := "false" + if success { + successLabel = "true" + } + + c.Reloaded.With(prometheus.Labels{"success": successLabel}).Inc() + + if c.countByNamespace { + c.ReloadedByNamespace.With(prometheus.Labels{ + "success": successLabel, + "namespace": namespace, + }).Inc() + } } func NewCollectors() Collectors { @@ -43,6 +66,7 @@ func NewCollectors() Collectors { return Collectors{ Reloaded: reloaded, ReloadedByNamespace: reloaded_by_namespace, + countByNamespace: os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled", } } diff --git a/internal/pkg/webhook/webhook.go b/internal/pkg/webhook/webhook.go new file mode 100644 index 000000000..a40c5a5c6 --- /dev/null +++ b/internal/pkg/webhook/webhook.go @@ -0,0 +1,95 @@ +// Package webhook handles sending reload notifications to external endpoints. +// When --webhook-url is set, Reloader sends HTTP POST requests instead of modifying workloads. +package webhook + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/go-logr/logr" +) + +// Payload represents the data sent to the webhook endpoint. +type Payload struct { + Kind string `json:"kind"` + Namespace string `json:"namespace"` + ResourceName string `json:"resourceName"` + ResourceType string `json:"resourceType"` + Hash string `json:"hash"` + Timestamp time.Time `json:"timestamp"` + + // Workloads contains the list of workloads that would be reloaded. + Workloads []WorkloadInfo `json:"workloads"` +} + +// WorkloadInfo describes a workload that would be reloaded. +type WorkloadInfo struct { + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +// Client sends reload notifications to webhook endpoints. +type Client struct { + httpClient *http.Client + url string + log logr.Logger +} + +// NewClient creates a new webhook client. +func NewClient(url string, log logr.Logger) *Client { + return &Client{ + httpClient: &http.Client{ + Timeout: 30 * time.Second, + }, + url: url, + log: log, + } +} + +// Send posts the payload to the configured webhook URL. +func (c *Client) Send(ctx context.Context, payload Payload) error { + if c.url == "" { + return nil + } + + data, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("marshaling payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", "Reloader/2.0") + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("webhook returned status %d", resp.StatusCode) + } + + c.log.V(1).Info("webhook notification sent", + "url", c.url, + "resourceType", payload.ResourceType, + "resourceName", payload.ResourceName, + "workloadCount", len(payload.Workloads), + ) + + return nil +} + +// IsConfigured returns true if the webhook URL is set. +func (c *Client) IsConfigured() bool { + return c != nil && c.url != "" +} From b5df945fffb009c76cbe2fbfe9e52400ff22f31f Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:52 +0100 Subject: [PATCH 06/35] feat: reload execution and observability --- internal/pkg/metadata/metadata.go | 283 +++++++++++ internal/pkg/metadata/metadata_test.go | 330 +++++++++++++ internal/pkg/reload/hasher.go | 6 +- internal/pkg/reload/hasher_test.go | 37 +- internal/pkg/reload/predicate_test.go | 502 ++++++++++++++++++++ internal/pkg/reload/service.go | 48 +- internal/pkg/reload/service_test.go | 620 +++++++++++++++++++++++++ internal/pkg/reload/strategy.go | 22 +- 8 files changed, 1827 insertions(+), 21 deletions(-) create mode 100644 internal/pkg/metadata/metadata.go create mode 100644 internal/pkg/metadata/metadata_test.go create mode 100644 internal/pkg/reload/predicate_test.go create mode 100644 internal/pkg/reload/service_test.go diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go new file mode 100644 index 000000000..09db4e8e9 --- /dev/null +++ b/internal/pkg/metadata/metadata.go @@ -0,0 +1,283 @@ +// Package metadata provides metadata ConfigMap creation for Reloader. +// The metadata ConfigMap contains build info, configuration options, and deployment info. +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "os" + "runtime" + "time" + + "github.com/sirupsen/logrus" + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // ConfigMapName is the name of the metadata ConfigMap. + ConfigMapName = "reloader-meta-info" + // ConfigMapLabelKey is the label key for the metadata ConfigMap. + ConfigMapLabelKey = "reloader.stakater.com/meta-info" + // ConfigMapLabelValue is the label value for the metadata ConfigMap. + ConfigMapLabelValue = "reloader-oss" + // FieldManager is the field manager name for server-side apply. + FieldManager = "reloader" + + // Environment variables for deployment info. + EnvReloaderNamespace = "RELOADER_NAMESPACE" + EnvReloaderDeploymentName = "RELOADER_DEPLOYMENT_NAME" +) + +// Version, Commit, and BuildDate are set during the build process +// using the -X linker flag to inject these values into the binary. +var ( + Version = "dev" + Commit = "unknown" + BuildDate = "unknown" +) + +// MetaInfo contains comprehensive metadata about the Reloader instance. +type MetaInfo struct { + // BuildInfo contains information about the build version, commit, and compilation details. + BuildInfo BuildInfo `json:"buildInfo"` + // ReloaderOptions contains all the configuration options used by this Reloader instance. + ReloaderOptions ReloaderOptions `json:"reloaderOptions"` + // DeploymentInfo contains metadata about the Kubernetes deployment of this instance. + DeploymentInfo DeploymentInfo `json:"deploymentInfo"` +} + +// BuildInfo contains information about the build and version of the Reloader binary. +type BuildInfo struct { + // GoVersion is the version of Go used to compile the binary. + GoVersion string `json:"goVersion"` + // ReleaseVersion is the version tag or branch of the Reloader release. + ReleaseVersion string `json:"releaseVersion"` + // CommitHash is the Git commit hash of the source code used to build this binary. + CommitHash string `json:"commitHash"` + // CommitTime is the timestamp of the Git commit used to build this binary. + CommitTime time.Time `json:"commitTime"` +} + +// DeploymentInfo contains metadata about the Reloader deployment. +type DeploymentInfo struct { + // Name is the name of the Reloader deployment. + Name string `json:"name"` + // Namespace is the namespace where Reloader is deployed. + Namespace string `json:"namespace"` +} + +// ReloaderOptions contains the configuration options for Reloader. +// This is a subset of config.Config that's relevant for the metadata ConfigMap. +type ReloaderOptions struct { + // AutoReloadAll enables automatic reloading of all resources. + AutoReloadAll bool `json:"autoReloadAll"` + // ReloadStrategy specifies the strategy used to trigger resource reloads. + ReloadStrategy string `json:"reloadStrategy"` + // IsArgoRollouts indicates whether support for Argo Rollouts is enabled. + IsArgoRollouts bool `json:"isArgoRollouts"` + // ReloadOnCreate indicates whether to trigger reloads when resources are created. + ReloadOnCreate bool `json:"reloadOnCreate"` + // ReloadOnDelete indicates whether to trigger reloads when resources are deleted. + ReloadOnDelete bool `json:"reloadOnDelete"` + // SyncAfterRestart indicates whether to sync add events after Reloader restarts. + SyncAfterRestart bool `json:"syncAfterRestart"` + // EnableHA indicates whether High Availability mode is enabled. + EnableHA bool `json:"enableHA"` + // WebhookURL is the URL to send webhook notifications to. + WebhookURL string `json:"webhookUrl"` + // LogFormat specifies the log format to use. + LogFormat string `json:"logFormat"` + // LogLevel specifies the log level to use. + LogLevel string `json:"logLevel"` + // ResourcesToIgnore is a list of resource types to ignore. + ResourcesToIgnore []string `json:"resourcesToIgnore"` + // WorkloadTypesToIgnore is a list of workload types to ignore. + WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"` + // NamespacesToIgnore is a list of namespaces to ignore. + NamespacesToIgnore []string `json:"namespacesToIgnore"` + // NamespaceSelectors is a list of namespace label selectors. + NamespaceSelectors []string `json:"namespaceSelectors"` + // ResourceSelectors is a list of resource label selectors. + ResourceSelectors []string `json:"resourceSelectors"` + + // Annotations + ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"` + SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"` + ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"` + ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"` + SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"` + IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"` + ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"` + SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"` + AutoSearchAnnotation string `json:"autoSearchAnnotation"` + SearchMatchAnnotation string `json:"searchMatchAnnotation"` + RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"` + PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"` + PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"` +} + +// NewBuildInfo creates a new BuildInfo with current build information. +func NewBuildInfo() BuildInfo { + return BuildInfo{ + GoVersion: runtime.Version(), + ReleaseVersion: Version, + CommitHash: Commit, + CommitTime: parseUTCTime(BuildDate), + } +} + +// NewReloaderOptions creates ReloaderOptions from a Config. +func NewReloaderOptions(cfg *config.Config) ReloaderOptions { + return ReloaderOptions{ + AutoReloadAll: cfg.AutoReloadAll, + ReloadStrategy: string(cfg.ReloadStrategy), + IsArgoRollouts: cfg.ArgoRolloutsEnabled, + ReloadOnCreate: cfg.ReloadOnCreate, + ReloadOnDelete: cfg.ReloadOnDelete, + SyncAfterRestart: cfg.SyncAfterRestart, + EnableHA: cfg.EnableHA, + WebhookURL: cfg.WebhookURL, + LogFormat: cfg.LogFormat, + LogLevel: cfg.LogLevel, + ResourcesToIgnore: cfg.IgnoredResources, + WorkloadTypesToIgnore: cfg.IgnoredWorkloads, + NamespacesToIgnore: cfg.IgnoredNamespaces, + NamespaceSelectors: cfg.NamespaceSelectorStrings, + ResourceSelectors: cfg.ResourceSelectorStrings, + ConfigmapUpdateOnChangeAnnotation: cfg.Annotations.ConfigmapReload, + SecretUpdateOnChangeAnnotation: cfg.Annotations.SecretReload, + ReloaderAutoAnnotation: cfg.Annotations.Auto, + ConfigmapReloaderAutoAnnotation: cfg.Annotations.ConfigmapAuto, + SecretReloaderAutoAnnotation: cfg.Annotations.SecretAuto, + IgnoreResourceAnnotation: cfg.Annotations.Ignore, + ConfigmapExcludeReloaderAnnotation: cfg.Annotations.ConfigmapExclude, + SecretExcludeReloaderAnnotation: cfg.Annotations.SecretExclude, + AutoSearchAnnotation: cfg.Annotations.Search, + SearchMatchAnnotation: cfg.Annotations.Match, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, + PauseDeploymentAnnotation: cfg.Annotations.PausePeriod, + PauseDeploymentTimeAnnotation: cfg.Annotations.PausedAt, + } +} + +// NewMetaInfo creates a new MetaInfo from configuration. +func NewMetaInfo(cfg *config.Config) *MetaInfo { + return &MetaInfo{ + BuildInfo: NewBuildInfo(), + ReloaderOptions: NewReloaderOptions(cfg), + DeploymentInfo: DeploymentInfo{ + Name: os.Getenv(EnvReloaderDeploymentName), + Namespace: os.Getenv(EnvReloaderNamespace), + }, + } +} + +// ToConfigMap converts MetaInfo to a Kubernetes ConfigMap. +func (m *MetaInfo) ToConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName, + Namespace: m.DeploymentInfo.Namespace, + Labels: map[string]string{ + ConfigMapLabelKey: ConfigMapLabelValue, + }, + }, + Data: map[string]string{ + "buildInfo": toJSON(m.BuildInfo), + "reloaderOptions": toJSON(m.ReloaderOptions), + "deploymentInfo": toJSON(m.DeploymentInfo), + }, + } +} + +// Publisher handles creating and updating the metadata ConfigMap. +type Publisher struct { + client client.Client + cfg *config.Config +} + +// NewPublisher creates a new Publisher. +func NewPublisher(c client.Client, cfg *config.Config) *Publisher { + return &Publisher{ + client: c, + cfg: cfg, + } +} + +// Publish creates or updates the metadata ConfigMap. +// Returns an error if the operation fails, or nil on success. +// If RELOADER_NAMESPACE is not set, this is a no-op. +func (p *Publisher) Publish(ctx context.Context) error { + namespace := os.Getenv(EnvReloaderNamespace) + if namespace == "" { + logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") + return nil + } + + metaInfo := NewMetaInfo(p.cfg) + configMap := metaInfo.ToConfigMap() + + // Try to get existing ConfigMap + existing := &corev1.ConfigMap{} + err := p.client.Get(ctx, client.ObjectKey{ + Name: ConfigMapName, + Namespace: namespace, + }, existing) + + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get existing meta info configmap: %w", err) + } + // ConfigMap doesn't exist, create it + logrus.Info("Creating meta info configmap") + if err := p.client.Create(ctx, configMap, client.FieldOwner(FieldManager)); err != nil { + return fmt.Errorf("failed to create meta info configmap: %w", err) + } + logrus.Info("Meta info configmap created successfully") + return nil + } + + // ConfigMap exists, update it + logrus.Info("Meta info configmap already exists, updating it") + existing.Data = configMap.Data + existing.Labels = configMap.Labels + if err := p.client.Update(ctx, existing, client.FieldOwner(FieldManager)); err != nil { + return fmt.Errorf("failed to update meta info configmap: %w", err) + } + logrus.Info("Meta info configmap updated successfully") + return nil +} + +// PublishMetaInfoConfigMap is a convenience function that creates a Publisher and calls Publish. +// This provides a simple API similar to the v1 PublishMetaInfoConfigmap function. +func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config) error { + publisher := NewPublisher(c, cfg) + return publisher.Publish(ctx) +} + +// toJSON marshals data to JSON string. Returns empty string on error. +func toJSON(data interface{}) string { + jsonData, err := json.Marshal(data) + if err != nil { + return "" + } + return string(jsonData) +} + +// parseUTCTime parses a time string in RFC3339 format. +// Returns zero time if value is empty or parsing fails. +func parseUTCTime(value string) time.Time { + if value == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC3339, value) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go new file mode 100644 index 000000000..94fce1838 --- /dev/null +++ b/internal/pkg/metadata/metadata_test.go @@ -0,0 +1,330 @@ +package metadata + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestNewBuildInfo(t *testing.T) { + // Set build variables for testing + oldVersion := Version + oldCommit := Commit + oldBuildDate := BuildDate + defer func() { + Version = oldVersion + Commit = oldCommit + BuildDate = oldBuildDate + }() + + Version = "1.0.0" + Commit = "abc123" + BuildDate = "2024-01-01T12:00:00Z" + + info := NewBuildInfo() + + if info.ReleaseVersion != "1.0.0" { + t.Errorf("ReleaseVersion = %s, want 1.0.0", info.ReleaseVersion) + } + if info.CommitHash != "abc123" { + t.Errorf("CommitHash = %s, want abc123", info.CommitHash) + } + if info.GoVersion == "" { + t.Error("GoVersion should not be empty") + } + if info.CommitTime.IsZero() { + t.Error("CommitTime should not be zero") + } +} + +func TestNewReloaderOptions(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + cfg.ArgoRolloutsEnabled = true + cfg.ReloadOnCreate = true + cfg.ReloadOnDelete = true + cfg.EnableHA = true + cfg.WebhookURL = "https://example.com/webhook" + cfg.LogFormat = "json" + cfg.LogLevel = "debug" + cfg.IgnoredResources = []string{"configmaps"} + cfg.IgnoredWorkloads = []string{"jobs"} + cfg.IgnoredNamespaces = []string{"kube-system"} + + opts := NewReloaderOptions(cfg) + + if !opts.AutoReloadAll { + t.Error("AutoReloadAll should be true") + } + if opts.ReloadStrategy != "annotations" { + t.Errorf("ReloadStrategy = %s, want annotations", opts.ReloadStrategy) + } + if !opts.IsArgoRollouts { + t.Error("IsArgoRollouts should be true") + } + if !opts.ReloadOnCreate { + t.Error("ReloadOnCreate should be true") + } + if !opts.ReloadOnDelete { + t.Error("ReloadOnDelete should be true") + } + if !opts.EnableHA { + t.Error("EnableHA should be true") + } + if opts.WebhookURL != "https://example.com/webhook" { + t.Errorf("WebhookURL = %s, want https://example.com/webhook", opts.WebhookURL) + } + if opts.LogFormat != "json" { + t.Errorf("LogFormat = %s, want json", opts.LogFormat) + } + if opts.LogLevel != "debug" { + t.Errorf("LogLevel = %s, want debug", opts.LogLevel) + } + if len(opts.ResourcesToIgnore) != 1 || opts.ResourcesToIgnore[0] != "configmaps" { + t.Errorf("ResourcesToIgnore = %v, want [configmaps]", opts.ResourcesToIgnore) + } + if len(opts.WorkloadTypesToIgnore) != 1 || opts.WorkloadTypesToIgnore[0] != "jobs" { + t.Errorf("WorkloadTypesToIgnore = %v, want [jobs]", opts.WorkloadTypesToIgnore) + } + if len(opts.NamespacesToIgnore) != 1 || opts.NamespacesToIgnore[0] != "kube-system" { + t.Errorf("NamespacesToIgnore = %v, want [kube-system]", opts.NamespacesToIgnore) + } + + // Check annotations + if opts.ReloaderAutoAnnotation != "reloader.stakater.com/auto" { + t.Errorf("ReloaderAutoAnnotation = %s, want reloader.stakater.com/auto", opts.ReloaderAutoAnnotation) + } +} + +func TestMetaInfo_ToConfigMap(t *testing.T) { + // Set environment variables + os.Setenv(EnvReloaderNamespace, "reloader-ns") + os.Setenv(EnvReloaderDeploymentName, "reloader-deploy") + defer func() { + os.Unsetenv(EnvReloaderNamespace) + os.Unsetenv(EnvReloaderDeploymentName) + }() + + cfg := config.NewDefault() + metaInfo := NewMetaInfo(cfg) + cm := metaInfo.ToConfigMap() + + if cm.Name != ConfigMapName { + t.Errorf("Name = %s, want %s", cm.Name, ConfigMapName) + } + if cm.Namespace != "reloader-ns" { + t.Errorf("Namespace = %s, want reloader-ns", cm.Namespace) + } + if cm.Labels[ConfigMapLabelKey] != ConfigMapLabelValue { + t.Errorf("Label = %s, want %s", cm.Labels[ConfigMapLabelKey], ConfigMapLabelValue) + } + + // Check data fields exist + if _, ok := cm.Data["buildInfo"]; !ok { + t.Error("buildInfo data key missing") + } + if _, ok := cm.Data["reloaderOptions"]; !ok { + t.Error("reloaderOptions data key missing") + } + if _, ok := cm.Data["deploymentInfo"]; !ok { + t.Error("deploymentInfo data key missing") + } + + // Verify buildInfo is valid JSON + var buildInfo BuildInfo + if err := json.Unmarshal([]byte(cm.Data["buildInfo"]), &buildInfo); err != nil { + t.Errorf("buildInfo is not valid JSON: %v", err) + } + + // Verify deploymentInfo contains expected values + var deployInfo DeploymentInfo + if err := json.Unmarshal([]byte(cm.Data["deploymentInfo"]), &deployInfo); err != nil { + t.Errorf("deploymentInfo is not valid JSON: %v", err) + } + if deployInfo.Namespace != "reloader-ns" { + t.Errorf("DeploymentInfo.Namespace = %s, want reloader-ns", deployInfo.Namespace) + } + if deployInfo.Name != "reloader-deploy" { + t.Errorf("DeploymentInfo.Name = %s, want reloader-deploy", deployInfo.Name) + } +} + +func TestPublisher_Publish_NoNamespace(t *testing.T) { + // Ensure RELOADER_NAMESPACE is not set + os.Unsetenv(EnvReloaderNamespace) + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg) + + err := publisher.Publish(context.Background()) + if err != nil { + t.Errorf("Publish() with no namespace should not error, got: %v", err) + } +} + +func TestPublisher_Publish_CreateNew(t *testing.T) { + // Set environment variables + os.Setenv(EnvReloaderNamespace, "test-ns") + os.Setenv(EnvReloaderDeploymentName, "test-deploy") + defer func() { + os.Unsetenv(EnvReloaderNamespace) + os.Unsetenv(EnvReloaderDeploymentName) + }() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg) + + ctx := context.Background() + err := publisher.Publish(ctx) + if err != nil { + t.Errorf("Publish() error = %v", err) + } + + // Verify ConfigMap was created + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get created ConfigMap: %v", err) + } + if cm.Name != ConfigMapName { + t.Errorf("ConfigMap.Name = %s, want %s", cm.Name, ConfigMapName) + } +} + +func TestPublisher_Publish_UpdateExisting(t *testing.T) { + // Set environment variables + os.Setenv(EnvReloaderNamespace, "test-ns") + os.Setenv(EnvReloaderDeploymentName, "test-deploy") + defer func() { + os.Unsetenv(EnvReloaderNamespace) + os.Unsetenv(EnvReloaderDeploymentName) + }() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Create existing ConfigMap with old data + existingCM := &corev1.ConfigMap{} + existingCM.Name = ConfigMapName + existingCM.Namespace = "test-ns" + existingCM.Data = map[string]string{ + "buildInfo": `{"goVersion":"old"}`, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(existingCM). + Build() + + cfg := config.NewDefault() + publisher := NewPublisher(fakeClient, cfg) + + ctx := context.Background() + err := publisher.Publish(ctx) + if err != nil { + t.Errorf("Publish() error = %v", err) + } + + // Verify ConfigMap was updated + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get updated ConfigMap: %v", err) + } + + // Check that all data keys are present + if _, ok := cm.Data["buildInfo"]; !ok { + t.Error("buildInfo data key missing after update") + } + if _, ok := cm.Data["reloaderOptions"]; !ok { + t.Error("reloaderOptions data key missing after update") + } + if _, ok := cm.Data["deploymentInfo"]; !ok { + t.Error("deploymentInfo data key missing after update") + } + + // Verify labels were added + if cm.Labels[ConfigMapLabelKey] != ConfigMapLabelValue { + t.Errorf("Label not updated: %s", cm.Labels[ConfigMapLabelKey]) + } +} + +func TestPublishMetaInfoConfigMap(t *testing.T) { + // Set environment variables + os.Setenv(EnvReloaderNamespace, "test-ns") + defer os.Unsetenv(EnvReloaderNamespace) + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + + cfg := config.NewDefault() + ctx := context.Background() + + err := PublishMetaInfoConfigMap(ctx, fakeClient, cfg) + if err != nil { + t.Errorf("PublishMetaInfoConfigMap() error = %v", err) + } + + // Verify ConfigMap was created + cm := &corev1.ConfigMap{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) + if err != nil { + t.Errorf("Failed to get created ConfigMap: %v", err) + } +} + +func TestParseUTCTime(t *testing.T) { + tests := []struct { + name string + input string + wantErr bool + }{ + { + name: "valid RFC3339 time", + input: "2024-01-01T12:00:00Z", + wantErr: false, + }, + { + name: "empty string", + input: "", + wantErr: true, // returns zero time + }, + { + name: "invalid format", + input: "not-a-time", + wantErr: true, // returns zero time + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseUTCTime(tt.input) + if tt.wantErr { + if !result.IsZero() { + t.Errorf("parseUTCTime(%s) should return zero time", tt.input) + } + } else { + if result.IsZero() { + t.Errorf("parseUTCTime(%s) should not return zero time", tt.input) + } + } + }) + } +} diff --git a/internal/pkg/reload/hasher.go b/internal/pkg/reload/hasher.go index 6c1b1613a..3839fd1a9 100644 --- a/internal/pkg/reload/hasher.go +++ b/internal/pkg/reload/hasher.go @@ -78,8 +78,8 @@ func (h *Hasher) computeSHA(data string) string { return fmt.Sprintf("%x", hasher.Sum(nil)) } -// EmptyHash returns the hash of empty content. -// This is useful for comparison when resources are deleted. +// EmptyHash returns an empty string to signal resource deletion. +// This triggers env var removal when using the env-vars strategy. func (h *Hasher) EmptyHash() string { - return h.computeSHA("") + return "" } diff --git a/internal/pkg/reload/hasher_test.go b/internal/pkg/reload/hasher_test.go index 0b892adca..92edbf34e 100644 --- a/internal/pkg/reload/hasher_test.go +++ b/internal/pkg/reload/hasher_test.go @@ -20,7 +20,8 @@ func TestHasher_HashConfigMap(t *testing.T) { Data: nil, BinaryData: nil, }, - wantHash: hasher.EmptyHash(), + // Empty configmap gets a valid hash (hash of empty data) + wantHash: hasher.HashConfigMap(&corev1.ConfigMap{}), }, { name: "configmap with data", @@ -120,7 +121,8 @@ func TestHasher_HashSecret(t *testing.T) { secret: &corev1.Secret{ Data: nil, }, - wantHash: hasher.EmptyHash(), + // Empty secret gets a valid hash (hash of empty data) + wantHash: hasher.HashSecret(&corev1.Secret{}), }, { name: "secret with data", @@ -196,36 +198,39 @@ func TestHasher_HashSecret_DifferentValues(t *testing.T) { func TestHasher_EmptyHash(t *testing.T) { hasher := NewHasher() + // EmptyHash returns empty string to signal deletion emptyHash := hasher.EmptyHash() - if emptyHash == "" { - t.Error("EmptyHash should not be empty string") + if emptyHash != "" { + t.Errorf("EmptyHash should be empty string, got %s", emptyHash) } - // Empty ConfigMap should match EmptyHash + // Empty ConfigMap should have a valid hash (not empty) cm := &corev1.ConfigMap{} - if hasher.HashConfigMap(cm) != emptyHash { - t.Error("Empty ConfigMap hash should equal EmptyHash") + cmHash := hasher.HashConfigMap(cm) + if cmHash == "" { + t.Error("Empty ConfigMap should have a non-empty hash") } - // Empty Secret should match EmptyHash + // Empty Secret should have a valid hash (not empty) secret := &corev1.Secret{} - if hasher.HashSecret(secret) != emptyHash { - t.Error("Empty Secret hash should equal EmptyHash") + secretHash := hasher.HashSecret(secret) + if secretHash == "" { + t.Error("Empty Secret should have a non-empty hash") } } func TestHasher_NilInput(t *testing.T) { hasher := NewHasher() - // Test nil ConfigMap + // Test nil ConfigMap - returns hash of empty content (not EmptyHash) cmHash := hasher.HashConfigMap(nil) - if cmHash != hasher.EmptyHash() { - t.Errorf("nil ConfigMap should return EmptyHash, got %s", cmHash) + if cmHash == "" { + t.Error("nil ConfigMap should return a valid hash") } - // Test nil Secret + // Test nil Secret - returns hash of empty content (not EmptyHash) secretHash := hasher.HashSecret(nil) - if secretHash != hasher.EmptyHash() { - t.Errorf("nil Secret should return EmptyHash, got %s", secretHash) + if secretHash == "" { + t.Error("nil Secret should return a valid hash") } } diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go new file mode 100644 index 000000000..5386121ea --- /dev/null +++ b/internal/pkg/reload/predicate_test.go @@ -0,0 +1,502 @@ +package reload + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestNamespaceFilterPredicate_Create(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + eventNamespace string + wantAllow bool + }{ + { + name: "allow non-ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + eventNamespace: "default", + wantAllow: true, + }, + { + name: "block ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + eventNamespace: "kube-system", + wantAllow: false, + }, + { + name: "allow when no namespaces ignored", + ignoredNamespaces: []string{}, + eventNamespace: "kube-system", + wantAllow: true, + }, + { + name: "block multiple ignored namespaces", + ignoredNamespaces: []string{"kube-system", "kube-public", "test-ns"}, + eventNamespace: "test-ns", + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestNamespaceFilterPredicate_Update(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.UpdateEvent{ObjectNew: cm} + if !predicate.Update(e) { + t.Error("Update() should allow non-ignored namespace") + } + + cm.Namespace = "kube-system" + e = event.UpdateEvent{ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should block ignored namespace") + } +} + +func TestNamespaceFilterPredicate_Delete(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.DeleteEvent{Object: cm} + if !predicate.Delete(e) { + t.Error("Delete() should allow non-ignored namespace") + } +} + +func TestNamespaceFilterPredicate_Generic(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + e := event.GenericEvent{Object: cm} + if !predicate.Generic(e) { + t.Error("Generic() should allow non-ignored namespace") + } +} + +func TestLabelSelectorPredicate_Create(t *testing.T) { + tests := []struct { + name string + selector string + objectLabels map[string]string + wantAllow bool + }{ + { + name: "match single label", + selector: "app=reloader", + objectLabels: map[string]string{"app": "reloader"}, + wantAllow: true, + }, + { + name: "no match single label", + selector: "app=reloader", + objectLabels: map[string]string{"app": "other"}, + wantAllow: false, + }, + { + name: "match multiple labels", + selector: "app=reloader,env=prod", + objectLabels: map[string]string{"app": "reloader", "env": "prod", "extra": "value"}, + wantAllow: true, + }, + { + name: "partial match fails", + selector: "app=reloader,env=prod", + objectLabels: map[string]string{"app": "reloader"}, + wantAllow: false, + }, + { + name: "empty labels no match", + selector: "app=reloader", + objectLabels: map[string]string{}, + wantAllow: false, + }, + { + name: "nil labels no match", + selector: "app=reloader", + objectLabels: nil, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + selector, err := labels.Parse(tt.selector) + if err != nil { + t.Fatalf("Failed to parse selector: %v", err) + } + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.objectLabels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestLabelSelectorPredicate_NoSelectors(t *testing.T) { + cfg := config.NewDefault() + // No selectors configured + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"any": "label"}, + }, + } + + e := event.CreateEvent{Object: cm} + if !predicate.Create(e) { + t.Error("Create() should allow all when no selectors configured") + } +} + +func TestLabelSelectorPredicate_MultipleSelectors(t *testing.T) { + cfg := config.NewDefault() + selector1, _ := labels.Parse("app=reloader") + selector2, _ := labels.Parse("type=config") + cfg.ResourceSelectors = []labels.Selector{selector1, selector2} + predicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + labels map[string]string + wantAllow bool + }{ + { + name: "matches first selector", + labels: map[string]string{"app": "reloader"}, + wantAllow: true, + }, + { + name: "matches second selector", + labels: map[string]string{"type": "config"}, + wantAllow: true, + }, + { + name: "matches both selectors", + labels: map[string]string{"app": "reloader", "type": "config"}, + wantAllow: true, + }, + { + name: "matches neither selector", + labels: map[string]string{"other": "value"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestLabelSelectorPredicate_Update(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cmMatching := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + cmNotMatching := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "other"}, + }, + } + + e := event.UpdateEvent{ObjectNew: cmMatching} + if !predicate.Update(e) { + t.Error("Update() should allow matching labels") + } + + e = event.UpdateEvent{ObjectNew: cmNotMatching} + if predicate.Update(e) { + t.Error("Update() should block non-matching labels") + } +} + +func TestLabelSelectorPredicate_Delete(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + e := event.DeleteEvent{Object: cm} + if !predicate.Delete(e) { + t.Error("Delete() should allow matching labels") + } +} + +func TestLabelSelectorPredicate_Generic(t *testing.T) { + cfg := config.NewDefault() + selector, _ := labels.Parse("app=reloader") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: map[string]string{"app": "reloader"}, + }, + } + + e := event.GenericEvent{Object: cm} + if !predicate.Generic(e) { + t.Error("Generic() should allow matching labels") + } +} + +func TestCombinedFiltering(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + selector, _ := labels.Parse("managed=true") + cfg.ResourceSelectors = []labels.Selector{selector} + + nsPredicate := NamespaceFilterPredicate(cfg) + labelPredicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + namespace string + labels map[string]string + wantNSAllow bool + wantLabelAllow bool + }{ + { + name: "allowed namespace and matching labels", + namespace: "default", + labels: map[string]string{"managed": "true"}, + wantNSAllow: true, + wantLabelAllow: true, + }, + { + name: "allowed namespace but non-matching labels", + namespace: "default", + labels: map[string]string{"managed": "false"}, + wantNSAllow: true, + wantLabelAllow: false, + }, + { + name: "ignored namespace with matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "true"}, + wantNSAllow: false, + wantLabelAllow: true, + }, + { + name: "ignored namespace and non-matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "false"}, + wantNSAllow: false, + wantLabelAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + + gotNS := nsPredicate.Create(e) + if gotNS != tt.wantNSAllow { + t.Errorf("Namespace predicate Create() = %v, want %v", gotNS, tt.wantNSAllow) + } + + gotLabel := labelPredicate.Create(e) + if gotLabel != tt.wantLabelAllow { + t.Errorf("Label predicate Create() = %v, want %v", gotLabel, tt.wantLabelAllow) + } + + // Both must be true for the event to pass through + combinedAllow := gotNS && gotLabel + expectedCombined := tt.wantNSAllow && tt.wantLabelAllow + if combinedAllow != expectedCombined { + t.Errorf("Combined allow = %v, want %v", combinedAllow, expectedCombined) + } + }) + } +} + +func TestFilteringWithSecrets(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + nsPredicate := NamespaceFilterPredicate(cfg) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + } + + e := event.CreateEvent{Object: secret} + if !nsPredicate.Create(e) { + t.Error("Should allow secret in non-ignored namespace") + } + + secret.Namespace = "kube-system" + e = event.CreateEvent{Object: secret} + if nsPredicate.Create(e) { + t.Error("Should block secret in ignored namespace") + } +} + +func TestExistsLabelSelector(t *testing.T) { + cfg := config.NewDefault() + // Selector that checks if label exists (any value) + selector, _ := labels.Parse("managed") + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + tests := []struct { + name string + labels map[string]string + wantAllow bool + }{ + { + name: "label exists with value true", + labels: map[string]string{"managed": "true"}, + wantAllow: true, + }, + { + name: "label exists with value false", + labels: map[string]string{"managed": "false"}, + wantAllow: true, + }, + { + name: "label exists with empty value", + labels: map[string]string{"managed": ""}, + wantAllow: true, + }, + { + name: "label does not exist", + labels: map[string]string{"other": "value"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index 169ae9ea0..d0f681d08 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -2,7 +2,9 @@ package reload import ( "context" + "encoding/json" "fmt" + "time" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/workload" @@ -224,7 +226,51 @@ func (s *Service) ApplyReload( AutoReload: autoReload, } - return s.strategy.Apply(input) + // Apply the strategy-specific changes + updated, err := s.strategy.Apply(input) + if err != nil { + return false, err + } + + // Always set the attribution annotation regardless of strategy + if updated { + s.setAttributionAnnotation(wl, resourceName, resourceType, namespace, hash, container) + } + + return updated, nil +} + +// setAttributionAnnotation sets the last-reloaded-from annotation on the pod template. +// This is always set regardless of the reload strategy for audit purposes. +func (s *Service) setAttributionAnnotation( + wl workload.WorkloadAccessor, + resourceName string, + resourceType ResourceType, + namespace string, + hash string, + container *corev1.Container, +) { + containerName := "" + if container != nil { + containerName = container.Name + } + + source := ReloadSource{ + Kind: string(resourceType), + Name: resourceName, + Namespace: namespace, + Hash: hash, + Containers: []string{containerName}, + ReloadedAt: time.Now().UTC(), + } + + sourceJSON, err := json.Marshal(source) + if err != nil { + // Non-fatal: skip annotation if marshaling fails + return + } + + wl.SetPodTemplateAnnotation(s.cfg.Annotations.LastReloadedFrom, string(sourceJSON)) } // findTargetContainer finds the container to target for the reload. diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go new file mode 100644 index 000000000..068804249 --- /dev/null +++ b/internal/pkg/reload/service_test.go @@ -0,0 +1,620 @@ +package reload + +import ( + "context" + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestService_ProcessConfigMap_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create a deployment with auto annotation that uses the configmap + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true") + } + + if !decisions[0].AutoReload { + t.Error("Expected AutoReload to be true") + } + + if decisions[0].Hash == "" { + t.Error("Expected Hash to be non-empty") + } +} + +func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create a deployment with explicit configmap annotation + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }) + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true for explicit annotation") + } + + if decisions[0].AutoReload { + t.Error("Expected AutoReload to be false for explicit annotation") + } +} + +func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create a deployment with auto annotation + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + // ConfigMap with ignore annotation + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/ignore": "true", + }, + }, + Data: map[string]string{ + "key": "value", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + // Should still get a decision, but ShouldReload should be false + for _, d := range decisions { + if d.ShouldReload { + t.Error("Expected ShouldReload to be false for ignored resource") + } + } +} + +func TestService_ProcessSecret_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create a deployment with auto annotation that uses the secret + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secret", + }, + }, + }, + } + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "key": []byte("value"), + }, + } + + change := SecretChange{ + Secret: secret, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessSecret(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true") + } + + if !decisions[0].AutoReload { + t.Error("Expected AutoReload to be true") + } +} + +func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + svc := NewService(cfg) + + // Create a deployment with explicit configmap annotation + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }) + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeDelete, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + if len(decisions) != 1 { + t.Fatalf("Expected 1 decision, got %d", len(decisions)) + } + + if !decisions[0].ShouldReload { + t.Error("Expected ShouldReload to be true for delete event") + } + + // Hash should be empty for delete events + if decisions[0].Hash != "" { + t.Errorf("Expected empty hash for delete event, got %s", decisions[0].Hash) + } +} + +func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = false // Disabled by default + svc := NewService(cfg) + + deploy := createTestDeployment("test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }) + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeDelete, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + // Should return nil when delete events are disabled + if decisions != nil { + t.Error("Expected nil decisions when delete events are disabled") + } +} + +func TestService_ApplyReload_EnvVarStrategy(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg) + + deploy := createTestDeployment("test-deploy", "default", nil) + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true") + } + + // Verify env var was added + containers := accessor.GetContainers() + if len(containers) == 0 { + t.Fatal("No containers found") + } + + found := false + for _, env := range containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" && env.Value == "abc123hash" { + found = true + break + } + } + + if !found { + t.Error("Expected env var STAKATER_TEST_CM_CONFIGMAP to be set") + } + + // Verify attribution annotation was set + annotations := accessor.GetPodTemplateAnnotations() + if annotations["reloader.stakater.com/last-reloaded-from"] == "" { + t.Error("Expected last-reloaded-from annotation to be set") + } +} + +func TestService_ApplyReload_AnnotationStrategy(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyAnnotations + svc := NewService(cfg) + + deploy := createTestDeployment("test-deploy", "default", nil) + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true") + } + + // Verify annotation was added + annotations := accessor.GetPodTemplateAnnotations() + if annotations["reloader.stakater.com/last-reloaded-from"] == "" { + t.Error("Expected last-reloaded-from annotation to be set") + } +} + +func TestService_ApplyReload_EnvVarDeletion(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg) + + deploy := createTestDeployment("test-deploy", "default", nil) + // Pre-add an env var + deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "oldhash"}, + {Name: "OTHER_VAR", Value: "keep"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + // Empty hash signals deletion + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if !updated { + t.Error("Expected updated to be true for env var removal") + } + + // Verify env var was removed + containers := accessor.GetContainers() + for _, env := range containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" { + t.Error("Expected env var STAKATER_TEST_CM_CONFIGMAP to be removed") + } + } + + // Verify other env var was kept + found := false + for _, env := range containers[0].Env { + if env.Name == "OTHER_VAR" { + found = true + break + } + } + if !found { + t.Error("Expected OTHER_VAR to be kept") + } +} + +func TestService_ApplyReload_NoChangeIfSameHash(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = config.ReloadStrategyEnvVars + svc := NewService(cfg) + + deploy := createTestDeployment("test-deploy", "default", nil) + // Pre-add env var with same hash + deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "abc123hash"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + ctx := context.Background() + updated, err := svc.ApplyReload(ctx, accessor, "test-cm", ResourceTypeConfigMap, "default", "abc123hash", false) + + if err != nil { + t.Fatalf("ApplyReload failed: %v", err) + } + + if updated { + t.Error("Expected updated to be false when hash is unchanged") + } +} + +func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create multiple workloads + deploy1 := createTestDeployment("deploy1", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "shared-cm", + }, + }, + }, + }, + } + + deploy2 := createTestDeployment("deploy2", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "shared-cm", + }, + }, + }, + }, + } + + // Deploy3 doesn't use the configmap + deploy3 := createTestDeployment("deploy3", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy1), + workload.NewDeploymentWorkload(deploy2), + workload.NewDeploymentWorkload(deploy3), + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shared-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + if len(decisions) != 3 { + t.Fatalf("Expected 3 decisions, got %d", len(decisions)) + } + + // Count how many should reload + reloadCount := 0 + for _, d := range decisions { + if d.ShouldReload { + reloadCount++ + } + } + + // Only deploy1 and deploy2 should reload (they use the configmap) + if reloadCount != 2 { + t.Errorf("Expected 2 workloads to reload, got %d", reloadCount) + } +} + +func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Create deployments in different namespaces + deploy1 := createTestDeployment("deploy1", "namespace-a", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + deploy2 := createTestDeployment("deploy2", "namespace-b", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }, + }, + } + + workloads := []workload.WorkloadAccessor{ + workload.NewDeploymentWorkload(deploy1), + workload.NewDeploymentWorkload(deploy2), + } + + // ConfigMap in namespace-a + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "namespace-a", + }, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeUpdate, + } + + decisions := svc.ProcessConfigMap(change, workloads) + + // Should only affect deploy1 (same namespace) + reloadCount := 0 + for _, d := range decisions { + if d.ShouldReload { + reloadCount++ + } + } + + if reloadCount != 1 { + t.Errorf("Expected 1 workload to reload (same namespace), got %d", reloadCount) + } +} + +// Helper function to create a test deployment +func createTestDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { + replicas := int32(1) + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx:latest", + }, + }, + }, + }, + }, + } +} diff --git a/internal/pkg/reload/strategy.go b/internal/pkg/reload/strategy.go index ce938a7ef..9a147fe2e 100644 --- a/internal/pkg/reload/strategy.go +++ b/internal/pkg/reload/strategy.go @@ -74,7 +74,8 @@ func (s *EnvVarStrategy) Name() string { return string(config.ReloadStrategyEnvVars) } -// Apply adds or updates an environment variable to trigger a restart. +// Apply adds, updates, or removes an environment variable to trigger a restart. +// When hash is empty (resource deleted), the env var is removed. func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { if input.Container == nil { return false, fmt.Errorf("container is required for env-var strategy") @@ -82,6 +83,11 @@ func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { envVarName := s.envVarName(input.ResourceName, input.ResourceType) + // Handle deletion: remove the env var when hash is empty + if input.Hash == "" { + return s.removeEnvVar(input.Container, envVarName), nil + } + // Check if env var already exists for i := range input.Container.Env { if input.Container.Env[i].Name == envVarName { @@ -104,6 +110,20 @@ func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { return true, nil } +// removeEnvVar removes an environment variable from a container. +// Returns true if a variable was removed. +func (s *EnvVarStrategy) removeEnvVar(container *corev1.Container, name string) bool { + for i := range container.Env { + if container.Env[i].Name == name { + // Remove by replacing with last element and truncating + container.Env[i] = container.Env[len(container.Env)-1] + container.Env = container.Env[:len(container.Env)-1] + return true + } + } + return false +} + // envVarName generates the environment variable name for a resource. func (s *EnvVarStrategy) envVarName(resourceName string, resourceType ResourceType) string { var postfix string From 3cb45e8dc79cc30c245d5322ee21f5b6c1e37321 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:53 +0100 Subject: [PATCH 07/35] feat: Implement NamespaceReconciler for namespace label selector filtering --- internal/pkg/controller/controller_test.go | 3 + .../pkg/controller/namespace_reconciler.go | 155 ++++ .../controller/namespace_reconciler_test.go | 295 ++++++++ internal/pkg/reload/predicate.go | 19 +- internal/pkg/reload/predicate_test.go | 107 +++ internal/pkg/workload/workload_test.go | 694 ++++++++++++++++++ 6 files changed, 1270 insertions(+), 3 deletions(-) create mode 100644 internal/pkg/controller/namespace_reconciler.go create mode 100644 internal/pkg/controller/namespace_reconciler_test.go create mode 100644 internal/pkg/workload/workload_test.go diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index 63e6be3e2..58580c795 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -1,3 +1,6 @@ +//go:build integration +// +build integration + package controller import ( diff --git a/internal/pkg/controller/namespace_reconciler.go b/internal/pkg/controller/namespace_reconciler.go new file mode 100644 index 000000000..ab25fff0c --- /dev/null +++ b/internal/pkg/controller/namespace_reconciler.go @@ -0,0 +1,155 @@ +package controller + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// NamespaceCache provides thread-safe access to the set of namespaces +// that match the configured namespace label selector. +type NamespaceCache struct { + mu sync.RWMutex + namespaces map[string]struct{} + enabled bool +} + +// NewNamespaceCache creates a new NamespaceCache. +// If enabled is false, all namespace checks return true (allow all). +func NewNamespaceCache(enabled bool) *NamespaceCache { + return &NamespaceCache{ + namespaces: make(map[string]struct{}), + enabled: enabled, + } +} + +// Add adds a namespace to the cache. +func (c *NamespaceCache) Add(name string) { + c.mu.Lock() + defer c.mu.Unlock() + c.namespaces[name] = struct{}{} +} + +// Remove removes a namespace from the cache. +func (c *NamespaceCache) Remove(name string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.namespaces, name) +} + +// Contains checks if a namespace is in the cache. +// If namespace selectors are not enabled, always returns true. +func (c *NamespaceCache) Contains(name string) bool { + if !c.enabled { + return true + } + c.mu.RLock() + defer c.mu.RUnlock() + _, ok := c.namespaces[name] + return ok +} + +// List returns a copy of all namespace names in the cache. +func (c *NamespaceCache) List() []string { + c.mu.RLock() + defer c.mu.RUnlock() + result := make([]string, 0, len(c.namespaces)) + for name := range c.namespaces { + result = append(result, name) + } + return result +} + +// IsEnabled returns whether namespace selector filtering is enabled. +func (c *NamespaceCache) IsEnabled() bool { + return c.enabled +} + +// NamespaceReconciler watches Namespace objects and maintains a cache +// of namespaces that match the configured label selector. +type NamespaceReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + Cache *NamespaceCache +} + +// Reconcile handles Namespace events and updates the namespace cache. +func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("namespace", req.Name) + + var ns corev1.Namespace + if err := r.Get(ctx, req.NamespacedName, &ns); err != nil { + if errors.IsNotFound(err) { + // Namespace was deleted - remove from cache + r.Cache.Remove(req.Name) + log.V(1).Info("removed namespace from cache (deleted)") + return ctrl.Result{}, nil + } + log.Error(err, "failed to get Namespace") + return ctrl.Result{}, err + } + + // Check if namespace matches any of the configured selectors + if r.matchesSelectors(&ns) { + r.Cache.Add(ns.Name) + log.V(1).Info("added namespace to cache") + } else { + // Labels might have changed, remove from cache if no longer matches + r.Cache.Remove(ns.Name) + log.V(1).Info("removed namespace from cache (labels no longer match)") + } + + return ctrl.Result{}, nil +} + +// matchesSelectors checks if the namespace matches any configured label selector. +func (r *NamespaceReconciler) matchesSelectors(ns *corev1.Namespace) bool { + if len(r.Config.NamespaceSelectors) == 0 { + // No selectors configured - should not happen since reconciler is only + // set up when selectors are configured, but handle gracefully + return true + } + + nsLabels := ns.GetLabels() + if nsLabels == nil { + nsLabels = make(map[string]string) + } + + for _, selector := range r.Config.NamespaceSelectors { + if selector.Matches(nsLabelsSet(nsLabels)) { + return true + } + } + + return false +} + +// nsLabelsSet implements labels.Labels interface for a map. +type nsLabelsSet map[string]string + +func (ls nsLabelsSet) Has(key string) bool { + _, ok := ls[key] + return ok +} + +func (ls nsLabelsSet) Get(key string) string { + return ls[key] +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + Complete(r) +} + +// Ensure NamespaceReconciler implements reconcile.Reconciler +var _ reconcile.Reconciler = &NamespaceReconciler{} diff --git a/internal/pkg/controller/namespace_reconciler_test.go b/internal/pkg/controller/namespace_reconciler_test.go new file mode 100644 index 000000000..2ad83d1e1 --- /dev/null +++ b/internal/pkg/controller/namespace_reconciler_test.go @@ -0,0 +1,295 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestNamespaceCache_Basic(t *testing.T) { + cache := controller.NewNamespaceCache(true) + + // Test Add and Contains + cache.Add("namespace-1") + if !cache.Contains("namespace-1") { + t.Error("Cache should contain namespace-1") + } + if cache.Contains("namespace-2") { + t.Error("Cache should not contain namespace-2") + } + + // Test Remove + cache.Remove("namespace-1") + if cache.Contains("namespace-1") { + t.Error("Cache should not contain namespace-1 after removal") + } +} + +func TestNamespaceCache_Disabled(t *testing.T) { + cache := controller.NewNamespaceCache(false) + + // When disabled, Contains should always return true + if !cache.Contains("any-namespace") { + t.Error("Disabled cache should return true for any namespace") + } + if !cache.Contains("other-namespace") { + t.Error("Disabled cache should return true for any namespace") + } +} + +func TestNamespaceCache_List(t *testing.T) { + cache := controller.NewNamespaceCache(true) + cache.Add("ns-1") + cache.Add("ns-2") + cache.Add("ns-3") + + list := cache.List() + if len(list) != 3 { + t.Errorf("Expected 3 namespaces, got %d", len(list)) + } + + // Check all namespaces are in the list + found := make(map[string]bool) + for _, ns := range list { + found[ns] = true + } + for _, expected := range []string{"ns-1", "ns-2", "ns-3"} { + if !found[expected] { + t.Errorf("Expected %s in list", expected) + } + } +} + +func TestNamespaceCache_IsEnabled(t *testing.T) { + enabledCache := controller.NewNamespaceCache(true) + disabledCache := controller.NewNamespaceCache(false) + + if !enabledCache.IsEnabled() { + t.Error("EnabledCache.IsEnabled() should return true") + } + if disabledCache.IsEnabled() { + t.Error("DisabledCache.IsEnabled() should return false") + } +} + +func TestNamespaceReconciler_Add(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + Labels: map[string]string{"env": "production"}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(ns). + Build() + + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + reconciler := &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-ns"}, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + + if !cache.Contains("test-ns") { + t.Error("Cache should contain test-ns after reconcile") + } +} + +func TestNamespaceReconciler_Remove_LabelChange(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Namespace with non-matching labels + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + Labels: map[string]string{"env": "staging"}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(ns). + Build() + + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + // Pre-populate cache + cache.Add("test-ns") + + reconciler := &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-ns"}, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + + if cache.Contains("test-ns") { + t.Error("Cache should not contain test-ns after reconcile (labels no longer match)") + } +} + +func TestNamespaceReconciler_Remove_Delete(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // No namespace in cluster (simulates delete) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + // Pre-populate cache + cache.Add("deleted-ns") + + reconciler := &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "deleted-ns"}, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + + if cache.Contains("deleted-ns") { + t.Error("Cache should not contain deleted-ns after reconcile") + } +} + +func TestNamespaceReconciler_MultipleSelectors(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + Labels: map[string]string{"team": "platform"}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(ns). + Build() + + cfg := config.NewDefault() + selector1, _ := labels.Parse("env=production") + selector2, _ := labels.Parse("team=platform") + cfg.NamespaceSelectors = []labels.Selector{selector1, selector2} + + cache := controller.NewNamespaceCache(true) + reconciler := &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-ns"}, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + + // Should be added because it matches second selector (team=platform) + if !cache.Contains("test-ns") { + t.Error("Cache should contain test-ns (matches second selector)") + } +} + +func TestNamespaceReconciler_NoLabels(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Namespace with no labels + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(ns). + Build() + + cfg := config.NewDefault() + selector, _ := labels.Parse("env=production") + cfg.NamespaceSelectors = []labels.Selector{selector} + + cache := controller.NewNamespaceCache(true) + reconciler := &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-ns"}, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + + if cache.Contains("test-ns") { + t.Error("Cache should not contain test-ns (no labels)") + } +} diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index 7e6381e2e..030ded5dd 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -72,8 +72,19 @@ func SecretPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { } } +// NamespaceChecker defines the interface for checking if a namespace is allowed. +type NamespaceChecker interface { + Contains(name string) bool +} + // NamespaceFilterPredicate returns a predicate that filters resources by namespace. func NamespaceFilterPredicate(cfg *config.Config) predicate.Predicate { + return NamespaceFilterPredicateWithCache(cfg, nil) +} + +// NamespaceFilterPredicateWithCache returns a predicate that filters resources by namespace, +// using the provided NamespaceChecker for namespace selector filtering. +func NamespaceFilterPredicateWithCache(cfg *config.Config, nsCache NamespaceChecker) predicate.Predicate { return predicate.NewPredicateFuncs(func(obj client.Object) bool { namespace := obj.GetNamespace() @@ -82,9 +93,11 @@ func NamespaceFilterPredicate(cfg *config.Config) predicate.Predicate { return false } - // Check namespace selectors - // Note: For now, we pass through and let the controller handle selector matching - // A more efficient implementation would check labels here + // Check namespace selector cache if provided + if nsCache != nil && !nsCache.Contains(namespace) { + return false + } + return true }) } diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go index 5386121ea..450590027 100644 --- a/internal/pkg/reload/predicate_test.go +++ b/internal/pkg/reload/predicate_test.go @@ -500,3 +500,110 @@ func TestExistsLabelSelector(t *testing.T) { }) } } + +// mockNamespaceChecker implements NamespaceChecker for testing. +type mockNamespaceChecker struct { + allowed map[string]bool +} + +func (m *mockNamespaceChecker) Contains(name string) bool { + return m.allowed[name] +} + +func TestNamespaceFilterPredicateWithCache(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + cacheAllowed map[string]bool + eventNamespace string + wantAllow bool + }{ + { + name: "allowed by cache and not ignored", + ignoredNamespaces: []string{"kube-system"}, + cacheAllowed: map[string]bool{"production": true}, + eventNamespace: "production", + wantAllow: true, + }, + { + name: "blocked by cache", + ignoredNamespaces: []string{}, + cacheAllowed: map[string]bool{"production": true}, + eventNamespace: "staging", + wantAllow: false, + }, + { + name: "blocked by ignore list even if in cache", + ignoredNamespaces: []string{"kube-system"}, + cacheAllowed: map[string]bool{"kube-system": true}, + eventNamespace: "kube-system", + wantAllow: false, + }, + { + name: "ignore list checked before cache", + ignoredNamespaces: []string{"blocked-ns"}, + cacheAllowed: map[string]bool{"blocked-ns": true}, + eventNamespace: "blocked-ns", + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + + cache := &mockNamespaceChecker{allowed: tt.cacheAllowed} + predicate := NamespaceFilterPredicateWithCache(cfg, cache) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestNamespaceFilterPredicateWithCache_NilCache(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + // Nil cache should allow all namespaces (only check ignore list) + predicate := NamespaceFilterPredicateWithCache(cfg, nil) + + tests := []struct { + namespace string + wantAllow bool + }{ + {"default", true}, + {"production", true}, + {"kube-system", false}, // Should still respect ignore list + } + + for _, tt := range tests { + t.Run(tt.namespace, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v for namespace %s", got, tt.wantAllow, tt.namespace) + } + }) + } +} diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go new file mode 100644 index 000000000..b7cb25104 --- /dev/null +++ b/internal/pkg/workload/workload_test.go @@ -0,0 +1,694 @@ +package workload + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestDeploymentWorkload_BasicGetters(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deploy", + Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if w.Kind() != KindDeployment { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDeployment) + } + if w.GetName() != "test-deploy" { + t.Errorf("GetName() = %v, want test-deploy", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != deploy { + t.Error("GetObject() should return the underlying deployment") + } +} + +func TestDeploymentWorkload_PodTemplateAnnotations(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + // Test get + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + // Test set + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDeploymentWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + // No annotations set + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + // Should initialize nil map + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + // Should work with nil initial map + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDeploymentWorkload_Containers(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + InitContainers: []corev1.Container{ + {Name: "init", Image: "busybox"}, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + // Test get containers + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + // Test get init containers + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + // Test set containers + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + // Test set init containers + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDeploymentWorkload_Volumes(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestDeploymentWorkload_UsesConfigMap_Volume(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "my-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("my-config") { + t.Error("UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDeploymentWorkload_UsesConfigMap_ProjectedVolume(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "projected-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("projected-config") { + t.Error("UsesConfigMap should return true for projected ConfigMap volume") + } +} + +func TestDeploymentWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "env-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("env-config") { + t.Error("UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestDeploymentWorkload_UsesConfigMap_EnvVar(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "var-config", + }, + Key: "some-key", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("var-config") { + t.Error("UsesConfigMap should return true for env var ConfigMapKeyRef") + } +} + +func TestDeploymentWorkload_UsesConfigMap_InitContainer(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "init-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesConfigMap("init-config") { + t.Error("UsesConfigMap should return true for init container ConfigMap") + } +} + +func TestDeploymentWorkload_UsesSecret_Volume(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("my-secret") { + t.Error("UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDeploymentWorkload_UsesSecret_ProjectedVolume(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "projected-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("projected-secret") { + t.Error("UsesSecret should return true for projected Secret volume") + } +} + +func TestDeploymentWorkload_UsesSecret_EnvFrom(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "env-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("env-secret") { + t.Error("UsesSecret should return true for envFrom Secret") + } +} + +func TestDeploymentWorkload_UsesSecret_EnvVar(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Env: []corev1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "var-secret", + }, + Key: "some-key", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("var-secret") { + t.Error("UsesSecret should return true for env var SecretKeyRef") + } +} + +func TestDeploymentWorkload_UsesSecret_InitContainer(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "init-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + if !w.UsesSecret("init-secret") { + t.Error("UsesSecret should return true for init container Secret") + } +} + +func TestDeploymentWorkload_GetEnvFromSources(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + }, + }, + { + Name: "sidecar", + EnvFrom: []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "init-cm"}}}, + }, + }, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + sources := w.GetEnvFromSources() + if len(sources) != 3 { + t.Errorf("GetEnvFromSources() returned %d sources, want 3", len(sources)) + } +} + +func TestDeploymentWorkload_DeepCopy(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + copy := w.DeepCopy() + + // Modify original + w.SetPodTemplateAnnotation("modified", "true") + + // Copy should not be affected + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestDeploymentWorkload_GetOwnerReferences(t *testing.T) { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "test-rs", + }, + }, + }, + } + + w := NewDeploymentWorkload(deploy) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-rs" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-rs", refs) + } +} + +// DaemonSet tests +func TestDaemonSetWorkload_BasicGetters(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + + w := NewDaemonSetWorkload(ds) + + if w.Kind() != KindDaemonSet { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDaemonSet) + } + if w.GetName() != "test-ds" { + t.Errorf("GetName() = %v, want test-ds", w.GetName()) + } +} + +func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ds-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesConfigMap("ds-config") { + t.Error("DaemonSet UsesConfigMap should return true for ConfigMap volume") + } +} + +// StatefulSet tests +func TestStatefulSetWorkload_BasicGetters(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + Namespace: "test-ns", + }, + } + + w := NewStatefulSetWorkload(sts) + + if w.Kind() != KindStatefulSet { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindStatefulSet) + } + if w.GetName() != "test-sts" { + t.Errorf("GetName() = %v, want test-sts", w.GetName()) + } +} + +func TestStatefulSetWorkload_UsesSecret(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "sts-secret", + }, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesSecret("sts-secret") { + t.Error("StatefulSet UsesSecret should return true for Secret volume") + } +} + +// Test that workloads implement the interface +func TestWorkloadInterface(t *testing.T) { + var _ WorkloadAccessor = (*DeploymentWorkload)(nil) + var _ WorkloadAccessor = (*DaemonSetWorkload)(nil) + var _ WorkloadAccessor = (*StatefulSetWorkload)(nil) +} From f70c4d2a43a721122b54c70dd0f4a35cf49b0dd8 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:53 +0100 Subject: [PATCH 08/35] feat: Argo rollouts workload + refactor of alerting --- internal/pkg/alerting/alerter.go | 51 +++ internal/pkg/alerting/alerter_test.go | 267 ++++++++++++++ internal/pkg/alerting/gchat.go | 90 +++++ internal/pkg/alerting/http.go | 57 +++ internal/pkg/alerting/raw.go | 57 +++ internal/pkg/alerting/slack.go | 57 +++ internal/pkg/alerting/teams.go | 81 +++++ internal/pkg/config/config.go | 17 +- .../pkg/controller/deployment_reconciler.go | 89 +++++ internal/pkg/controller/retry.go | 231 +++++++++++++ internal/pkg/reload/pause.go | 130 +++++++ internal/pkg/reload/pause_test.go | 327 ++++++++++++++++++ internal/pkg/workload/deployment.go | 5 + internal/pkg/workload/registry.go | 6 + internal/pkg/workload/rollout.go | 274 +++++++++++++++ internal/pkg/workload/workload_test.go | 223 ++++++++++++ 16 files changed, 1959 insertions(+), 3 deletions(-) create mode 100644 internal/pkg/alerting/alerter.go create mode 100644 internal/pkg/alerting/alerter_test.go create mode 100644 internal/pkg/alerting/gchat.go create mode 100644 internal/pkg/alerting/http.go create mode 100644 internal/pkg/alerting/raw.go create mode 100644 internal/pkg/alerting/slack.go create mode 100644 internal/pkg/alerting/teams.go create mode 100644 internal/pkg/controller/deployment_reconciler.go create mode 100644 internal/pkg/reload/pause.go create mode 100644 internal/pkg/reload/pause_test.go create mode 100644 internal/pkg/workload/rollout.go diff --git a/internal/pkg/alerting/alerter.go b/internal/pkg/alerting/alerter.go new file mode 100644 index 000000000..5213d382a --- /dev/null +++ b/internal/pkg/alerting/alerter.go @@ -0,0 +1,51 @@ +package alerting + +import ( + "context" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +// AlertMessage contains the details of a reload event to be sent as an alert. +type AlertMessage struct { + WorkloadKind string + WorkloadName string + WorkloadNamespace string + ResourceKind string + ResourceName string + ResourceNamespace string + Timestamp time.Time +} + +// Alerter is the interface for sending reload notifications. +type Alerter interface { + Send(ctx context.Context, message AlertMessage) error +} + +// NewAlerter creates an Alerter based on the configuration. +// Returns a NoOpAlerter if alerting is disabled. +func NewAlerter(cfg *config.Config) Alerter { + alertCfg := cfg.Alerting + if !alertCfg.Enabled || alertCfg.WebhookURL == "" { + return &NoOpAlerter{} + } + + switch alertCfg.Sink { + case "slack": + return NewSlackAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + case "teams": + return NewTeamsAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + case "gchat": + return NewGChatAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + default: + return NewRawAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + } +} + +// NoOpAlerter is an Alerter that does nothing. +type NoOpAlerter struct{} + +func (a *NoOpAlerter) Send(ctx context.Context, message AlertMessage) error { + return nil +} diff --git a/internal/pkg/alerting/alerter_test.go b/internal/pkg/alerting/alerter_test.go new file mode 100644 index 000000000..6e5724f4d --- /dev/null +++ b/internal/pkg/alerting/alerter_test.go @@ -0,0 +1,267 @@ +package alerting + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" +) + +func TestNewAlerter_Disabled(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = false + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*NoOpAlerter); !ok { + t.Error("Expected NoOpAlerter when alerting is disabled") + } +} + +func TestNewAlerter_NoWebhookURL(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "" + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*NoOpAlerter); !ok { + t.Error("Expected NoOpAlerter when webhook URL is empty") + } +} + +func TestNewAlerter_Slack(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "slack" + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*SlackAlerter); !ok { + t.Error("Expected SlackAlerter for sink=slack") + } +} + +func TestNewAlerter_Teams(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "teams" + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*TeamsAlerter); !ok { + t.Error("Expected TeamsAlerter for sink=teams") + } +} + +func TestNewAlerter_GChat(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "gchat" + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*GChatAlerter); !ok { + t.Error("Expected GChatAlerter for sink=gchat") + } +} + +func TestNewAlerter_Raw(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "raw" + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*RawAlerter); !ok { + t.Error("Expected RawAlerter for sink=raw") + } +} + +func TestNewAlerter_DefaultIsRaw(t *testing.T) { + cfg := config.NewDefault() + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "" // Empty sink should default to raw + + alerter := NewAlerter(cfg) + if _, ok := alerter.(*RawAlerter); !ok { + t.Error("Expected RawAlerter for empty sink") + } +} + +func TestNoOpAlerter_Send(t *testing.T) { + alerter := &NoOpAlerter{} + err := alerter.Send(context.Background(), AlertMessage{}) + if err != nil { + t.Errorf("NoOpAlerter.Send() error = %v, want nil", err) + } +} + +func TestSlackAlerter_Send(t *testing.T) { + var receivedBody []byte + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("Expected POST request, got %s", r.Method) + } + if r.Header.Get("Content-Type") != "application/json" { + t.Errorf("Expected Content-Type application/json, got %s", r.Header.Get("Content-Type")) + } + receivedBody = make([]byte, r.ContentLength) + r.Body.Read(receivedBody) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + alerter := NewSlackAlerter(server.URL, "", "Test Cluster") + msg := AlertMessage{ + WorkloadKind: "Deployment", + WorkloadName: "nginx", + WorkloadNamespace: "default", + ResourceKind: "ConfigMap", + ResourceName: "nginx-config", + ResourceNamespace: "default", + Timestamp: time.Now(), + } + + err := alerter.Send(context.Background(), msg) + if err != nil { + t.Fatalf("SlackAlerter.Send() error = %v", err) + } + + var slackMsg slackMessage + if err := json.Unmarshal(receivedBody, &slackMsg); err != nil { + t.Fatalf("Failed to unmarshal slack message: %v", err) + } + + if slackMsg.Text == "" { + t.Error("Expected non-empty text in slack message") + } +} + +func TestTeamsAlerter_Send(t *testing.T) { + var receivedBody []byte + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedBody = make([]byte, r.ContentLength) + r.Body.Read(receivedBody) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + alerter := NewTeamsAlerter(server.URL, "", "") + msg := AlertMessage{ + WorkloadKind: "Deployment", + WorkloadName: "nginx", + WorkloadNamespace: "default", + ResourceKind: "ConfigMap", + ResourceName: "nginx-config", + ResourceNamespace: "default", + Timestamp: time.Now(), + } + + err := alerter.Send(context.Background(), msg) + if err != nil { + t.Fatalf("TeamsAlerter.Send() error = %v", err) + } + + var teamsMsg teamsMessage + if err := json.Unmarshal(receivedBody, &teamsMsg); err != nil { + t.Fatalf("Failed to unmarshal teams message: %v", err) + } + + if teamsMsg.Type != "MessageCard" { + t.Errorf("Expected @type=MessageCard, got %s", teamsMsg.Type) + } +} + +func TestGChatAlerter_Send(t *testing.T) { + var receivedBody []byte + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedBody = make([]byte, r.ContentLength) + r.Body.Read(receivedBody) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + alerter := NewGChatAlerter(server.URL, "", "") + msg := AlertMessage{ + WorkloadKind: "Deployment", + WorkloadName: "nginx", + WorkloadNamespace: "default", + ResourceKind: "ConfigMap", + ResourceName: "nginx-config", + ResourceNamespace: "default", + Timestamp: time.Now(), + } + + err := alerter.Send(context.Background(), msg) + if err != nil { + t.Fatalf("GChatAlerter.Send() error = %v", err) + } + + var gchatMsg gchatMessage + if err := json.Unmarshal(receivedBody, &gchatMsg); err != nil { + t.Fatalf("Failed to unmarshal gchat message: %v", err) + } + + if len(gchatMsg.Cards) != 1 { + t.Errorf("Expected 1 card, got %d", len(gchatMsg.Cards)) + } +} + +func TestRawAlerter_Send(t *testing.T) { + var receivedBody []byte + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedBody = make([]byte, r.ContentLength) + r.Body.Read(receivedBody) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + alerter := NewRawAlerter(server.URL, "", "custom-info") + msg := AlertMessage{ + WorkloadKind: "Deployment", + WorkloadName: "nginx", + WorkloadNamespace: "default", + ResourceKind: "ConfigMap", + ResourceName: "nginx-config", + ResourceNamespace: "default", + Timestamp: time.Now(), + } + + err := alerter.Send(context.Background(), msg) + if err != nil { + t.Fatalf("RawAlerter.Send() error = %v", err) + } + + var rawMsg rawMessage + if err := json.Unmarshal(receivedBody, &rawMsg); err != nil { + t.Fatalf("Failed to unmarshal raw message: %v", err) + } + + if rawMsg.Event != "reload" { + t.Errorf("Expected event=reload, got %s", rawMsg.Event) + } + if rawMsg.WorkloadName != "nginx" { + t.Errorf("Expected workloadName=nginx, got %s", rawMsg.WorkloadName) + } + if rawMsg.Additional != "custom-info" { + t.Errorf("Expected additional=custom-info, got %s", rawMsg.Additional) + } +} + +func TestAlerter_WebhookError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal Server Error")) + })) + defer server.Close() + + alerter := NewRawAlerter(server.URL, "", "") + err := alerter.Send(context.Background(), AlertMessage{}) + if err == nil { + t.Error("Expected error for non-2xx response") + } +} diff --git a/internal/pkg/alerting/gchat.go b/internal/pkg/alerting/gchat.go new file mode 100644 index 000000000..73e2b303f --- /dev/null +++ b/internal/pkg/alerting/gchat.go @@ -0,0 +1,90 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// GChatAlerter sends alerts to Google Chat webhooks. +type GChatAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewGChatAlerter creates a new GChatAlerter. +func NewGChatAlerter(webhookURL, proxyURL, additional string) *GChatAlerter { + return &GChatAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// gchatMessage represents a Google Chat message. +type gchatMessage struct { + Text string `json:"text,omitempty"` + Cards []gchatCard `json:"cards,omitempty"` +} + +type gchatCard struct { + Header gchatHeader `json:"header"` + Sections []gchatSection `json:"sections"` +} + +type gchatHeader struct { + Title string `json:"title"` + Subtitle string `json:"subtitle,omitempty"` +} + +type gchatSection struct { + Widgets []gchatWidget `json:"widgets"` +} + +type gchatWidget struct { + KeyValue *gchatKeyValue `json:"keyValue,omitempty"` +} + +type gchatKeyValue struct { + TopLabel string `json:"topLabel"` + Content string `json:"content"` +} + +func (a *GChatAlerter) Send(ctx context.Context, message AlertMessage) error { + msg := a.buildMessage(message) + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling gchat message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *GChatAlerter) buildMessage(msg AlertMessage) gchatMessage { + widgets := []gchatWidget{ + {KeyValue: &gchatKeyValue{TopLabel: "Workload", Content: fmt.Sprintf("%s/%s (%s)", msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind)}}, + {KeyValue: &gchatKeyValue{TopLabel: "Resource", Content: fmt.Sprintf("%s/%s (%s)", msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind)}}, + {KeyValue: &gchatKeyValue{TopLabel: "Time", Content: msg.Timestamp.Format("2006-01-02 15:04:05 UTC")}}, + } + + subtitle := "" + if a.additional != "" { + subtitle = a.additional + } + + return gchatMessage{ + Cards: []gchatCard{ + { + Header: gchatHeader{ + Title: "Reloader triggered reload", + Subtitle: subtitle, + }, + Sections: []gchatSection{ + {Widgets: widgets}, + }, + }, + }, + } +} diff --git a/internal/pkg/alerting/http.go b/internal/pkg/alerting/http.go new file mode 100644 index 000000000..827091e71 --- /dev/null +++ b/internal/pkg/alerting/http.go @@ -0,0 +1,57 @@ +package alerting + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// httpClient wraps http.Client with common configuration. +type httpClient struct { + client *http.Client +} + +// newHTTPClient creates a new httpClient with optional proxy support. +func newHTTPClient(proxyURL string) *httpClient { + transport := &http.Transport{} + + if proxyURL != "" { + proxy, err := url.Parse(proxyURL) + if err == nil { + transport.Proxy = http.ProxyURL(proxy) + } + } + + return &httpClient{ + client: &http.Client{ + Transport: transport, + Timeout: 10 * time.Second, + }, + } +} + +// post sends a POST request with JSON body. +func (c *httpClient) post(ctx context.Context, url string, body []byte) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} diff --git a/internal/pkg/alerting/raw.go b/internal/pkg/alerting/raw.go new file mode 100644 index 000000000..ad0add08e --- /dev/null +++ b/internal/pkg/alerting/raw.go @@ -0,0 +1,57 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// RawAlerter sends alerts as raw JSON to a webhook. +type RawAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewRawAlerter creates a new RawAlerter. +func NewRawAlerter(webhookURL, proxyURL, additional string) *RawAlerter { + return &RawAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// rawMessage is the JSON payload for raw webhook alerts. +type rawMessage struct { + Event string `json:"event"` + WorkloadKind string `json:"workloadKind"` + WorkloadName string `json:"workloadName"` + WorkloadNamespace string `json:"workloadNamespace"` + ResourceKind string `json:"resourceKind"` + ResourceName string `json:"resourceName"` + ResourceNamespace string `json:"resourceNamespace"` + Timestamp string `json:"timestamp"` + Additional string `json:"additional,omitempty"` +} + +func (a *RawAlerter) Send(ctx context.Context, message AlertMessage) error { + msg := rawMessage{ + Event: "reload", + WorkloadKind: message.WorkloadKind, + WorkloadName: message.WorkloadName, + WorkloadNamespace: message.WorkloadNamespace, + ResourceKind: message.ResourceKind, + ResourceName: message.ResourceName, + ResourceNamespace: message.ResourceNamespace, + Timestamp: message.Timestamp.Format("2006-01-02T15:04:05Z07:00"), + Additional: a.additional, + } + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling raw message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} diff --git a/internal/pkg/alerting/slack.go b/internal/pkg/alerting/slack.go new file mode 100644 index 000000000..1b9171180 --- /dev/null +++ b/internal/pkg/alerting/slack.go @@ -0,0 +1,57 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// SlackAlerter sends alerts to Slack webhooks. +type SlackAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewSlackAlerter creates a new SlackAlerter. +func NewSlackAlerter(webhookURL, proxyURL, additional string) *SlackAlerter { + return &SlackAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +type slackMessage struct { + Text string `json:"text"` +} + +func (a *SlackAlerter) Send(ctx context.Context, message AlertMessage) error { + text := a.formatMessage(message) + msg := slackMessage{Text: text} + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling slack message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *SlackAlerter) formatMessage(msg AlertMessage) string { + text := fmt.Sprintf( + "Reloader triggered reload\n"+ + "*Workload:* %s/%s (%s)\n"+ + "*Resource:* %s/%s (%s)\n"+ + "*Time:* %s", + msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind, + msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind, + msg.Timestamp.Format("2006-01-02 15:04:05 UTC"), + ) + + if a.additional != "" { + text = a.additional + "\n" + text + } + + return text +} diff --git a/internal/pkg/alerting/teams.go b/internal/pkg/alerting/teams.go new file mode 100644 index 000000000..99b08d5c8 --- /dev/null +++ b/internal/pkg/alerting/teams.go @@ -0,0 +1,81 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" +) + +// TeamsAlerter sends alerts to Microsoft Teams webhooks. +type TeamsAlerter struct { + webhookURL string + additional string + client *httpClient +} + +// NewTeamsAlerter creates a new TeamsAlerter. +func NewTeamsAlerter(webhookURL, proxyURL, additional string) *TeamsAlerter { + return &TeamsAlerter{ + webhookURL: webhookURL, + additional: additional, + client: newHTTPClient(proxyURL), + } +} + +// teamsMessage represents a Microsoft Teams message card. +type teamsMessage struct { + Type string `json:"@type"` + Context string `json:"@context"` + ThemeColor string `json:"themeColor"` + Summary string `json:"summary"` + Sections []teamsSection `json:"sections"` +} + +type teamsSection struct { + ActivityTitle string `json:"activityTitle"` + ActivitySubtitle string `json:"activitySubtitle,omitempty"` + Facts []teamsFact `json:"facts"` +} + +type teamsFact struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (a *TeamsAlerter) Send(ctx context.Context, message AlertMessage) error { + msg := a.buildMessage(message) + + body, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshaling teams message: %w", err) + } + + return a.client.post(ctx, a.webhookURL, body) +} + +func (a *TeamsAlerter) buildMessage(msg AlertMessage) teamsMessage { + facts := []teamsFact{ + {Name: "Workload", Value: fmt.Sprintf("%s/%s (%s)", msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind)}, + {Name: "Resource", Value: fmt.Sprintf("%s/%s (%s)", msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind)}, + {Name: "Time", Value: msg.Timestamp.Format("2006-01-02 15:04:05 UTC")}, + } + + subtitle := "" + if a.additional != "" { + subtitle = a.additional + } + + return teamsMessage{ + Type: "MessageCard", + Context: "http://schema.org/extensions", + ThemeColor: "0076D7", + Summary: "Reloader triggered reload", + Sections: []teamsSection{ + { + ActivityTitle: "Reloader triggered reload", + ActivitySubtitle: subtitle, + Facts: facts, + }, + }, + } +} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 6288d5f9a..9ce3e0cef 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -140,9 +140,20 @@ type AnnotationConfig struct { // AlertingConfig holds configuration for alerting integrations. type AlertingConfig struct { - SlackWebhookURL string - TeamsWebhookURL string - GChatWebhookURL string + // Enabled enables alerting notifications on reload events. + Enabled bool + + // WebhookURL is the webhook URL to send alerts to. + WebhookURL string + + // Sink determines the alert format: "slack", "teams", "gchat", or "raw" (default). + Sink string + + // Proxy is an optional HTTP proxy for webhook requests. + Proxy string + + // Additional is optional context prepended to alert messages. + Additional string } // LeaderElectionConfig holds configuration for leader election. diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go new file mode 100644 index 000000000..d28272c56 --- /dev/null +++ b/internal/pkg/controller/deployment_reconciler.go @@ -0,0 +1,89 @@ +package controller + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// DeploymentReconciler reconciles Deployment objects to handle pause expiration. +// This reconciler watches for deployments that were paused by Reloader and +// unpauses them when the pause period expires. +type DeploymentReconciler struct { + client.Client + Log logr.Logger + Config *config.Config + PauseHandler *reload.PauseHandler +} + +// Reconcile handles Deployment pause expiration. +func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("deployment", req.NamespacedName) + + var deploy appsv1.Deployment + if err := r.Get(ctx, req.NamespacedName, &deploy); err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Check if this deployment was paused by Reloader + if !r.PauseHandler.IsPausedByReloader(&deploy) { + return ctrl.Result{}, nil + } + + // Check if pause period has expired + expired, remainingTime, err := r.PauseHandler.CheckPauseExpired(&deploy) + if err != nil { + log.Error(err, "Failed to check pause expiration") + return ctrl.Result{}, err + } + + if !expired { + // Still within pause period - requeue to check again + log.V(1).Info("Deployment pause not yet expired", "remaining", remainingTime) + return ctrl.Result{RequeueAfter: remainingTime}, nil + } + + // Pause period has expired - unpause the deployment + log.Info("Unpausing deployment after pause period expired") + r.PauseHandler.ClearPause(&deploy) + + if err := r.Update(ctx, &deploy, client.FieldOwner(FieldManager)); err != nil { + log.Error(err, "Failed to unpause deployment") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the DeploymentReconciler with the manager. +func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&appsv1.Deployment{}). + WithEventFilter(r.pausedByReloaderPredicate()). + Complete(r) +} + +// pausedByReloaderPredicate returns a predicate that only selects deployments +// that have been paused by Reloader (have the paused-at annotation). +func (r *DeploymentReconciler) pausedByReloaderPredicate() predicate.Predicate { + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + + // Only process if deployment has our paused-at annotation + _, hasPausedAt := annotations[r.Config.Annotations.PausedAt] + return hasPausedAt + }) +} diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index f81e8e923..f8af3de0f 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -2,16 +2,23 @@ package controller import ( "context" + "maps" "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/workload" + batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" ) // UpdateWorkloadWithRetry updates a workload with exponential backoff on conflict. // On conflict, it re-fetches the object, re-applies the reload changes, and retries. +// For Jobs and CronJobs, special handling is applied: +// - Jobs are deleted and recreated with the same spec +// - CronJobs create a new Job from their template +// For Argo Rollouts, special handling is applied based on the rollout strategy annotation. func UpdateWorkloadWithRetry( ctx context.Context, c client.Client, @@ -22,6 +29,31 @@ func UpdateWorkloadWithRetry( namespace string, hash string, autoReload bool, +) (bool, error) { + // Handle special workload types + switch wl.Kind() { + case workload.KindJob: + return updateJobWithRecreate(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + case workload.KindCronJob: + return updateCronJobWithNewJob(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + case workload.KindArgoRollout: + return updateArgoRollout(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + default: + return updateStandardWorkload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + } +} + +// updateStandardWorkload updates Deployments, DaemonSets, StatefulSets, etc. +func updateStandardWorkload( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, ) (bool, error) { var updated bool isFirstAttempt := true @@ -66,3 +98,202 @@ func UpdateWorkloadWithRetry( return updated, err } + +// updateJobWithRecreate deletes the Job and recreates it with the updated spec. +// Jobs are immutable after creation, so we must delete and recreate. +func updateJobWithRecreate( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + jobWl, ok := wl.(*workload.JobWorkload) + if !ok { + return false, nil + } + + // Apply reload changes to the workload + updated, err := reloadService.ApplyReload( + ctx, + wl, + resourceName, + resourceType, + namespace, + hash, + autoReload, + ) + if err != nil { + return false, err + } + + if !updated { + return false, nil + } + + oldJob := jobWl.GetJob() + newJob := oldJob.DeepCopy() + + // Delete the old job with background propagation + policy := metav1.DeletePropagationBackground + if err := c.Delete(ctx, oldJob, &client.DeleteOptions{ + PropagationPolicy: &policy, + }); err != nil { + if !errors.IsNotFound(err) { + return false, err + } + } + + // Clear fields that should not be specified when creating a new Job + newJob.ResourceVersion = "" + newJob.UID = "" + newJob.CreationTimestamp = metav1.Time{} + newJob.Status = batchv1.JobStatus{} + + // Remove problematic labels that are auto-generated + delete(newJob.Spec.Template.Labels, "controller-uid") + delete(newJob.Spec.Template.Labels, batchv1.ControllerUidLabel) + delete(newJob.Spec.Template.Labels, batchv1.JobNameLabel) + delete(newJob.Spec.Template.Labels, "job-name") + + // Remove the selector to allow it to be auto-generated + newJob.Spec.Selector = nil + + // Create the new job with same spec + if err := c.Create(ctx, newJob, client.FieldOwner(FieldManager)); err != nil { + return false, err + } + + return true, nil +} + +// updateCronJobWithNewJob creates a new Job from the CronJob's template. +// CronJobs don't get updated directly; instead, a new Job is triggered. +func updateCronJobWithNewJob( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + cronJobWl, ok := wl.(*workload.CronJobWorkload) + if !ok { + return false, nil + } + + // Apply reload changes to get the updated spec + updated, err := reloadService.ApplyReload( + ctx, + wl, + resourceName, + resourceType, + namespace, + hash, + autoReload, + ) + if err != nil { + return false, err + } + + if !updated { + return false, nil + } + + cronJob := cronJobWl.GetCronJob() + + // Build annotations for the new Job + annotations := make(map[string]string) + annotations["cronjob.kubernetes.io/instantiate"] = "manual" + maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) + + // Create a new Job from the CronJob template + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: cronJob.Name + "-", + Namespace: cronJob.Namespace, + Annotations: annotations, + Labels: cronJob.Spec.JobTemplate.Labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob")), + }, + }, + Spec: cronJob.Spec.JobTemplate.Spec, + } + + if err := c.Create(ctx, job, client.FieldOwner(FieldManager)); err != nil { + return false, err + } + + return true, nil +} + +// updateArgoRollout updates an Argo Rollout using its custom Update method. +// This handles the rollout strategy annotation to determine whether to do +// a standard rollout or set the restartAt field. +func updateArgoRollout( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + rolloutWl, ok := wl.(*workload.RolloutWorkload) + if !ok { + return false, nil + } + + var updated bool + isFirstAttempt := true + + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + // On retry, re-fetch the object to get the latest ResourceVersion + if !isFirstAttempt { + obj := rolloutWl.GetObject() + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if errors.IsNotFound(err) { + // Object was deleted, nothing to update + return nil + } + return err + } + } + isFirstAttempt = false + + // Apply reload changes (this modifies the workload in-place) + var applyErr error + updated, applyErr = reloadService.ApplyReload( + ctx, + wl, + resourceName, + resourceType, + namespace, + hash, + autoReload, + ) + if applyErr != nil { + return applyErr + } + + if !updated { + return nil + } + + // Use the RolloutWorkload's Update method which handles the rollout strategy + return rolloutWl.Update(ctx, c) + }) + + return updated, err +} diff --git a/internal/pkg/reload/pause.go b/internal/pkg/reload/pause.go new file mode 100644 index 000000000..1d00d313d --- /dev/null +++ b/internal/pkg/reload/pause.go @@ -0,0 +1,130 @@ +package reload + +import ( + "fmt" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" +) + +// PauseHandler handles pause deployment logic. +type PauseHandler struct { + cfg *config.Config +} + +// NewPauseHandler creates a new PauseHandler. +func NewPauseHandler(cfg *config.Config) *PauseHandler { + return &PauseHandler{cfg: cfg} +} + +// ShouldPause checks if a deployment should be paused after reload. +func (h *PauseHandler) ShouldPause(wl workload.WorkloadAccessor) bool { + if wl.Kind() != workload.KindDeployment { + return false + } + + annotations := wl.GetAnnotations() + if annotations == nil { + return false + } + + pausePeriod := annotations[h.cfg.Annotations.PausePeriod] + return pausePeriod != "" +} + +// GetPausePeriod returns the configured pause period for a workload. +func (h *PauseHandler) GetPausePeriod(wl workload.WorkloadAccessor) (time.Duration, error) { + annotations := wl.GetAnnotations() + if annotations == nil { + return 0, fmt.Errorf("no annotations on workload") + } + + pausePeriodStr := annotations[h.cfg.Annotations.PausePeriod] + if pausePeriodStr == "" { + return 0, fmt.Errorf("no pause period annotation") + } + + return time.ParseDuration(pausePeriodStr) +} + +// ApplyPause pauses a deployment and sets the paused-at annotation. +func (h *PauseHandler) ApplyPause(wl workload.WorkloadAccessor) error { + deployWl, ok := wl.(*workload.DeploymentWorkload) + if !ok { + return fmt.Errorf("workload is not a deployment") + } + + deploy := deployWl.GetDeployment() + + // Set paused flag + deploy.Spec.Paused = true + + // Set paused-at annotation + if deploy.Annotations == nil { + deploy.Annotations = make(map[string]string) + } + deploy.Annotations[h.cfg.Annotations.PausedAt] = time.Now().UTC().Format(time.RFC3339) + + return nil +} + +// CheckPauseExpired checks if the pause period has expired for a deployment. +func (h *PauseHandler) CheckPauseExpired(deploy *appsv1.Deployment) (expired bool, remainingTime time.Duration, err error) { + annotations := deploy.GetAnnotations() + if annotations == nil { + return false, 0, fmt.Errorf("no annotations on deployment") + } + + pausePeriodStr := annotations[h.cfg.Annotations.PausePeriod] + if pausePeriodStr == "" { + return false, 0, fmt.Errorf("no pause period annotation") + } + + pausedAtStr := annotations[h.cfg.Annotations.PausedAt] + if pausedAtStr == "" { + return false, 0, fmt.Errorf("no paused-at annotation") + } + + pausePeriod, err := time.ParseDuration(pausePeriodStr) + if err != nil { + return false, 0, fmt.Errorf("invalid pause period %q: %w", pausePeriodStr, err) + } + + pausedAt, err := time.Parse(time.RFC3339, pausedAtStr) + if err != nil { + return false, 0, fmt.Errorf("invalid paused-at time %q: %w", pausedAtStr, err) + } + + elapsed := time.Since(pausedAt) + if elapsed >= pausePeriod { + return true, 0, nil + } + + return false, pausePeriod - elapsed, nil +} + +// ClearPause removes the pause from a deployment. +func (h *PauseHandler) ClearPause(deploy *appsv1.Deployment) { + deploy.Spec.Paused = false + delete(deploy.Annotations, h.cfg.Annotations.PausedAt) + // Keep pause-period annotation (user's config) +} + +// IsPausedByReloader checks if a deployment was paused by Reloader. +func (h *PauseHandler) IsPausedByReloader(deploy *appsv1.Deployment) bool { + if !deploy.Spec.Paused { + return false + } + + annotations := deploy.GetAnnotations() + if annotations == nil { + return false + } + + _, hasPausedAt := annotations[h.cfg.Annotations.PausedAt] + _, hasPausePeriod := annotations[h.cfg.Annotations.PausePeriod] + + return hasPausedAt && hasPausePeriod +} diff --git a/internal/pkg/reload/pause_test.go b/internal/pkg/reload/pause_test.go new file mode 100644 index 000000000..9c7992d0f --- /dev/null +++ b/internal/pkg/reload/pause_test.go @@ -0,0 +1,327 @@ +package reload + +import ( + "testing" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPauseHandler_ShouldPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + workload workload.WorkloadAccessor + want bool + }{ + { + name: "deployment with pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + want: true, + }, + { + name: "deployment without pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }), + want: false, + }, + { + name: "daemonset with pause period (ignored)", + workload: workload.NewDaemonSetWorkload(&appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := handler.ShouldPause(tt.workload) + if got != tt.want { + t.Errorf("ShouldPause() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPauseHandler_GetPausePeriod(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + workload workload.WorkloadAccessor + wantPeriod time.Duration + wantErr bool + }{ + { + name: "valid pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }), + wantPeriod: 5 * time.Minute, + wantErr: false, + }, + { + name: "invalid pause period", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "invalid", + }, + }, + }), + wantErr: true, + }, + { + name: "no pause period annotation", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := handler.GetPausePeriod(tt.workload) + if (err != nil) != tt.wantErr { + t.Errorf("GetPausePeriod() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && got != tt.wantPeriod { + t.Errorf("GetPausePeriod() = %v, want %v", got, tt.wantPeriod) + } + }) + } +} + +func TestPauseHandler_ApplyPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deploy", + }, + Spec: appsv1.DeploymentSpec{ + Paused: false, + }, + } + + wl := workload.NewDeploymentWorkload(deploy) + err := handler.ApplyPause(wl) + if err != nil { + t.Fatalf("ApplyPause() error = %v", err) + } + + if !deploy.Spec.Paused { + t.Error("Expected deployment to be paused") + } + + pausedAt := deploy.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } + + // Verify the timestamp is valid + _, err = time.Parse(time.RFC3339, pausedAt) + if err != nil { + t.Errorf("Invalid paused-at timestamp: %v", err) + } +} + +func TestPauseHandler_CheckPauseExpired(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + deploy *appsv1.Deployment + wantExpired bool + wantErr bool + }{ + { + name: "pause expired", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "1ms", + cfg.Annotations.PausedAt: time.Now().Add(-time.Second).UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + wantExpired: true, + wantErr: false, + }, + { + name: "pause not expired", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "1h", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + wantExpired: false, + wantErr: false, + }, + { + name: "no paused-at annotation", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid pause period", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "invalid", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expired, _, err := handler.CheckPauseExpired(tt.deploy) + if (err != nil) != tt.wantErr { + t.Errorf("CheckPauseExpired() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && expired != tt.wantExpired { + t.Errorf("CheckPauseExpired() expired = %v, want %v", expired, tt.wantExpired) + } + }) + } +} + +func TestPauseHandler_ClearPause(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{ + Paused: true, + }, + } + + handler.ClearPause(deploy) + + if deploy.Spec.Paused { + t.Error("Expected deployment to be unpaused") + } + + if _, exists := deploy.Annotations[cfg.Annotations.PausedAt]; exists { + t.Error("Expected paused-at annotation to be removed") + } + + // Pause period should be preserved (user's config) + if deploy.Annotations[cfg.Annotations.PausePeriod] != "5m" { + t.Error("Expected pause-period annotation to be preserved") + } +} + +func TestPauseHandler_IsPausedByReloader(t *testing.T) { + cfg := config.NewDefault() + handler := NewPauseHandler(cfg) + + tests := []struct { + name string + deploy *appsv1.Deployment + want bool + }{ + { + name: "paused by reloader", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: true, + }, + { + name: "paused but not by reloader (no paused-at)", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + }, + }, + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: false, + }, + { + name: "not paused", + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + cfg.Annotations.PausePeriod: "5m", + cfg.Annotations.PausedAt: time.Now().UTC().Format(time.RFC3339), + }, + }, + Spec: appsv1.DeploymentSpec{Paused: false}, + }, + want: false, + }, + { + name: "no annotations", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{Paused: true}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := handler.IsPausedByReloader(tt.deploy) + if got != tt.want { + t.Errorf("IsPausedByReloader() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go index 0ddb5b45f..1b5ab5f89 100644 --- a/internal/pkg/workload/deployment.go +++ b/internal/pkg/workload/deployment.go @@ -192,3 +192,8 @@ func (w *DeploymentWorkload) UsesSecret(name string) bool { func (w *DeploymentWorkload) GetOwnerReferences() []metav1.OwnerReference { return w.deployment.OwnerReferences } + +// GetDeployment returns the underlying Deployment for special handling. +func (w *DeploymentWorkload) GetDeployment() *appsv1.Deployment { + return w.deployment +} diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 55f55b2d6..8525d5624 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -3,6 +3,7 @@ package workload import ( "fmt" + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,6 +49,11 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { return NewJobWorkload(o), nil case *batchv1.CronJob: return NewCronJobWorkload(o), nil + case *argorolloutv1alpha1.Rollout: + if !r.argoRolloutsEnabled { + return nil, fmt.Errorf("Argo Rollouts support is not enabled") + } + return NewRolloutWorkload(o), nil default: return nil, fmt.Errorf("unsupported object type: %T", obj) } diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go new file mode 100644 index 000000000..7ea9643d1 --- /dev/null +++ b/internal/pkg/workload/rollout.go @@ -0,0 +1,274 @@ +package workload + +import ( + "context" + "fmt" + "time" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// RolloutStrategy defines how Argo Rollouts are updated. +type RolloutStrategy string + +const ( + // RolloutStrategyRollout performs a standard rollout update. + RolloutStrategyRollout RolloutStrategy = "rollout" + + // RolloutStrategyRestart sets the restartAt field to trigger a restart. + RolloutStrategyRestart RolloutStrategy = "restart" +) + +// RolloutStrategyAnnotation is the annotation key for specifying the rollout strategy. +const RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" + +// RolloutWorkload wraps an Argo Rollout. +type RolloutWorkload struct { + rollout *argorolloutv1alpha1.Rollout +} + +// NewRolloutWorkload creates a new RolloutWorkload. +func NewRolloutWorkload(r *argorolloutv1alpha1.Rollout) *RolloutWorkload { + return &RolloutWorkload{rollout: r} +} + +// Ensure RolloutWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*RolloutWorkload)(nil) + +func (w *RolloutWorkload) Kind() Kind { + return KindArgoRollout +} + +func (w *RolloutWorkload) GetObject() client.Object { + return w.rollout +} + +func (w *RolloutWorkload) GetName() string { + return w.rollout.Name +} + +func (w *RolloutWorkload) GetNamespace() string { + return w.rollout.Namespace +} + +func (w *RolloutWorkload) GetAnnotations() map[string]string { + return w.rollout.Annotations +} + +func (w *RolloutWorkload) GetPodTemplateAnnotations() map[string]string { + if w.rollout.Spec.Template.Annotations == nil { + w.rollout.Spec.Template.Annotations = make(map[string]string) + } + return w.rollout.Spec.Template.Annotations +} + +func (w *RolloutWorkload) SetPodTemplateAnnotation(key, value string) { + if w.rollout.Spec.Template.Annotations == nil { + w.rollout.Spec.Template.Annotations = make(map[string]string) + } + w.rollout.Spec.Template.Annotations[key] = value +} + +func (w *RolloutWorkload) GetContainers() []corev1.Container { + return w.rollout.Spec.Template.Spec.Containers +} + +func (w *RolloutWorkload) SetContainers(containers []corev1.Container) { + w.rollout.Spec.Template.Spec.Containers = containers +} + +func (w *RolloutWorkload) GetInitContainers() []corev1.Container { + return w.rollout.Spec.Template.Spec.InitContainers +} + +func (w *RolloutWorkload) SetInitContainers(containers []corev1.Container) { + w.rollout.Spec.Template.Spec.InitContainers = containers +} + +func (w *RolloutWorkload) GetVolumes() []corev1.Volume { + return w.rollout.Spec.Template.Spec.Volumes +} + +// Update updates the Rollout. It uses the rollout strategy annotation to determine +// whether to do a standard rollout or set the restartAt field. +func (w *RolloutWorkload) Update(ctx context.Context, c client.Client) error { + strategy := w.getStrategy() + switch strategy { + case RolloutStrategyRestart: + // Use merge patch to set restartAt field + restartAt := metav1.NewTime(time.Now()) + w.rollout.Spec.RestartAt = &restartAt + } + // For both strategies, we update the rollout (annotations have already been set) + return c.Update(ctx, w.rollout) +} + +// getStrategy returns the rollout strategy from the annotation. +func (w *RolloutWorkload) getStrategy() RolloutStrategy { + annotations := w.rollout.GetAnnotations() + if annotations == nil { + return RolloutStrategyRollout + } + strategy := annotations[RolloutStrategyAnnotation] + switch RolloutStrategy(strategy) { + case RolloutStrategyRestart: + return RolloutStrategyRestart + default: + return RolloutStrategyRollout + } +} + +func (w *RolloutWorkload) DeepCopy() Workload { + return &RolloutWorkload{rollout: w.rollout.DeepCopy()} +} + +func (w *RolloutWorkload) GetEnvFromSources() []corev1.EnvFromSource { + var sources []corev1.EnvFromSource + for _, container := range w.rollout.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.rollout.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *RolloutWorkload) UsesConfigMap(name string) bool { + spec := &w.rollout.Spec.Template.Spec + + // Check volumes + for _, vol := range spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *RolloutWorkload) UsesSecret(name string) bool { + spec := &w.rollout.Spec.Template.Spec + + // Check volumes + for _, vol := range spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + // Check containers + for _, container := range spec.Containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + // Check init containers + for _, container := range spec.InitContainers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + + return false +} + +func (w *RolloutWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.rollout.OwnerReferences +} + +// GetRollout returns the underlying Rollout for special handling. +func (w *RolloutWorkload) GetRollout() *argorolloutv1alpha1.Rollout { + return w.rollout +} + +// GetStrategy returns the configured rollout strategy. +func (w *RolloutWorkload) GetStrategy() RolloutStrategy { + return w.getStrategy() +} + +// String returns a string representation of the strategy. +func (s RolloutStrategy) String() string { + return string(s) +} + +// ToRolloutStrategy converts a string to RolloutStrategy. +func ToRolloutStrategy(s string) RolloutStrategy { + switch RolloutStrategy(s) { + case RolloutStrategyRestart: + return RolloutStrategyRestart + case RolloutStrategyRollout: + return RolloutStrategyRollout + default: + return RolloutStrategyRollout + } +} + +// Validate checks if the rollout strategy is valid. +func (s RolloutStrategy) Validate() error { + switch s { + case RolloutStrategyRollout, RolloutStrategyRestart: + return nil + default: + return fmt.Errorf("invalid rollout strategy: %s", s) + } +} diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go index b7cb25104..b616e0c60 100644 --- a/internal/pkg/workload/workload_test.go +++ b/internal/pkg/workload/workload_test.go @@ -3,6 +3,7 @@ package workload import ( "testing" + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -691,4 +692,226 @@ func TestWorkloadInterface(t *testing.T) { var _ WorkloadAccessor = (*DeploymentWorkload)(nil) var _ WorkloadAccessor = (*DaemonSetWorkload)(nil) var _ WorkloadAccessor = (*StatefulSetWorkload)(nil) + var _ WorkloadAccessor = (*RolloutWorkload)(nil) +} + +// RolloutWorkload tests +func TestRolloutWorkload_BasicGetters(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rollout", + Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewRolloutWorkload(rollout) + + if w.Kind() != KindArgoRollout { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindArgoRollout) + } + if w.GetName() != "test-rollout" { + t.Errorf("GetName() = %v, want test-rollout", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != rollout { + t.Error("GetObject() should return the underlying rollout") + } +} + +func TestRolloutWorkload_PodTemplateAnnotations(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout) + + // Test get + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + // Test set + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestRolloutWorkload_GetStrategy_Default(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + + w := NewRolloutWorkload(rollout) + + if w.GetStrategy() != RolloutStrategyRollout { + t.Errorf("GetStrategy() = %v, want %v (default)", w.GetStrategy(), RolloutStrategyRollout) + } +} + +func TestRolloutWorkload_GetStrategy_Restart(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + RolloutStrategyAnnotation: "restart", + }, + }, + } + + w := NewRolloutWorkload(rollout) + + if w.GetStrategy() != RolloutStrategyRestart { + t.Errorf("GetStrategy() = %v, want %v", w.GetStrategy(), RolloutStrategyRestart) + } +} + +func TestRolloutWorkload_UsesConfigMap_Volume(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "rollout-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout) + + if !w.UsesConfigMap("rollout-config") { + t.Error("Rollout UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("Rollout UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestRolloutWorkload_UsesSecret_EnvFrom(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "rollout-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout) + + if !w.UsesSecret("rollout-secret") { + t.Error("Rollout UsesSecret should return true for Secret envFrom") + } + if w.UsesSecret("other-secret") { + t.Error("Rollout UsesSecret should return false for non-existent Secret") + } +} + +func TestRolloutWorkload_DeepCopy(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: argorolloutv1alpha1.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "original": "value", + }, + }, + }, + }, + } + + w := NewRolloutWorkload(rollout) + copy := w.DeepCopy() + + // Verify copy is independent + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.(*RolloutWorkload).GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestRolloutStrategy_Validate(t *testing.T) { + tests := []struct { + strategy RolloutStrategy + wantErr bool + }{ + {RolloutStrategyRollout, false}, + {RolloutStrategyRestart, false}, + {RolloutStrategy("invalid"), true}, + {RolloutStrategy(""), true}, + } + + for _, tt := range tests { + err := tt.strategy.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Validate(%s) error = %v, wantErr %v", tt.strategy, err, tt.wantErr) + } + } +} + +func TestToRolloutStrategy(t *testing.T) { + tests := []struct { + input string + expected RolloutStrategy + }{ + {"rollout", RolloutStrategyRollout}, + {"restart", RolloutStrategyRestart}, + {"invalid", RolloutStrategyRollout}, // defaults to rollout + {"", RolloutStrategyRollout}, // defaults to rollout + } + + for _, tt := range tests { + result := ToRolloutStrategy(tt.input) + if result != tt.expected { + t.Errorf("ToRolloutStrategy(%s) = %v, want %v", tt.input, result, tt.expected) + } + } } From 8b3ad893362f3afbe82cd783d9a81b8511330ac5 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:54 +0100 Subject: [PATCH 09/35] chore: Fix formatting issues --- internal/pkg/alerting/gchat.go | 4 ++-- internal/pkg/leadership/leadership_test.go | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/pkg/alerting/gchat.go b/internal/pkg/alerting/gchat.go index 73e2b303f..8ad0c2f7f 100644 --- a/internal/pkg/alerting/gchat.go +++ b/internal/pkg/alerting/gchat.go @@ -24,8 +24,8 @@ func NewGChatAlerter(webhookURL, proxyURL, additional string) *GChatAlerter { // gchatMessage represents a Google Chat message. type gchatMessage struct { - Text string `json:"text,omitempty"` - Cards []gchatCard `json:"cards,omitempty"` + Text string `json:"text,omitempty"` + Cards []gchatCard `json:"cards,omitempty"` } type gchatCard struct { diff --git a/internal/pkg/leadership/leadership_test.go b/internal/pkg/leadership/leadership_test.go index eed070561..d850e7a9e 100644 --- a/internal/pkg/leadership/leadership_test.go +++ b/internal/pkg/leadership/leadership_test.go @@ -1,3 +1,6 @@ +//go:build integration +// +build integration + package leadership import ( From f48c5ac1b3f8c0bc9850533175f91b727d04ccb1 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:54 +0100 Subject: [PATCH 10/35] feat: Migration to new implementation --- go.mod | 17 +- go.sum | 43 +- internal/pkg/alerts/alert.go | 154 - internal/pkg/alerts/slack_alert.go | 61 - internal/pkg/app/app.go | 9 - internal/pkg/callbacks/rolling_upgrade.go | 579 --- .../pkg/callbacks/rolling_upgrade_test.go | 773 --- internal/pkg/cmd/reloader.go | 265 - internal/pkg/config/config.go | 171 +- internal/pkg/config/flags.go | 14 +- internal/pkg/constants/constants.go | 27 +- internal/pkg/constants/enums.go | 15 - .../pkg/controller/configmap_reconciler.go | 28 + internal/pkg/controller/controller.go | 282 -- internal/pkg/controller/controller_test.go | 2368 --------- internal/pkg/controller/manager.go | 65 +- internal/pkg/controller/secret_reconciler.go | 28 + internal/pkg/crypto/sha.go | 20 - internal/pkg/crypto/sha_test.go | 15 - internal/pkg/handler/create.go | 47 - internal/pkg/handler/delete.go | 100 - internal/pkg/handler/handler.go | 9 - internal/pkg/handler/pause_deployment.go | 242 - internal/pkg/handler/pause_deployment_test.go | 391 -- internal/pkg/handler/update.go | 53 - internal/pkg/handler/upgrade.go | 619 --- internal/pkg/handler/upgrade_test.go | 4288 ----------------- internal/pkg/leadership/leadership.go | 107 - internal/pkg/leadership/leadership_test.go | 216 - internal/pkg/metadata/metadata.go | 35 +- internal/pkg/metadata/metadata_test.go | 14 +- internal/pkg/options/flags.go | 92 - internal/pkg/reload/hasher.go | 11 - internal/pkg/reload/matcher.go | 63 +- internal/pkg/reload/matcher_test.go | 6 +- internal/pkg/reload/pause_test.go | 8 +- internal/pkg/reload/predicate_test.go | 48 +- internal/pkg/reload/service.go | 75 - internal/pkg/reload/strategy.go | 42 +- internal/pkg/testutil/kube.go | 1231 ----- internal/pkg/util/interface.go | 50 - internal/pkg/util/util.go | 128 - internal/pkg/util/util_test.go | 186 - main.go | 14 - pkg/common/common.go | 358 -- pkg/common/common_test.go | 224 - pkg/common/config.go | 48 - pkg/common/metainfo.go | 129 - pkg/common/reload_source.go | 39 - pkg/kube/client.go | 118 - pkg/kube/resourcemapper.go | 13 - 51 files changed, 269 insertions(+), 13669 deletions(-) delete mode 100644 internal/pkg/alerts/alert.go delete mode 100644 internal/pkg/alerts/slack_alert.go delete mode 100644 internal/pkg/app/app.go delete mode 100644 internal/pkg/callbacks/rolling_upgrade.go delete mode 100644 internal/pkg/callbacks/rolling_upgrade_test.go delete mode 100644 internal/pkg/cmd/reloader.go delete mode 100644 internal/pkg/constants/enums.go delete mode 100644 internal/pkg/controller/controller.go delete mode 100644 internal/pkg/controller/controller_test.go delete mode 100644 internal/pkg/crypto/sha.go delete mode 100644 internal/pkg/crypto/sha_test.go delete mode 100644 internal/pkg/handler/create.go delete mode 100644 internal/pkg/handler/delete.go delete mode 100644 internal/pkg/handler/handler.go delete mode 100644 internal/pkg/handler/pause_deployment.go delete mode 100644 internal/pkg/handler/pause_deployment_test.go delete mode 100644 internal/pkg/handler/update.go delete mode 100644 internal/pkg/handler/upgrade.go delete mode 100644 internal/pkg/handler/upgrade_test.go delete mode 100644 internal/pkg/leadership/leadership.go delete mode 100644 internal/pkg/leadership/leadership_test.go delete mode 100644 internal/pkg/options/flags.go delete mode 100644 internal/pkg/testutil/kube.go delete mode 100644 internal/pkg/util/interface.go delete mode 100644 internal/pkg/util/util.go delete mode 100644 internal/pkg/util/util_test.go delete mode 100644 main.go delete mode 100644 pkg/common/common.go delete mode 100644 pkg/common/common_test.go delete mode 100644 pkg/common/config.go delete mode 100644 pkg/common/metainfo.go delete mode 100644 pkg/common/reload_source.go delete mode 100644 pkg/kube/client.go delete mode 100644 pkg/kube/resourcemapper.go diff --git a/go.mod b/go.mod index bff72edbf..206a9d1b2 100644 --- a/go.mod +++ b/go.mod @@ -5,19 +5,14 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 github.com/go-logr/logr v1.4.2 - github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 - github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 - github.com/parnurzeal/gorequest v0.3.0 + github.com/go-logr/zerologr v1.2.3 github.com/prometheus/client_golang v1.22.0 - github.com/sirupsen/logrus v1.9.3 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 - github.com/stretchr/testify v1.10.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 - k8s.io/kubectl v0.32.3 - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/controller-runtime v0.19.4 ) @@ -25,7 +20,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -42,18 +36,16 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/moul/http2curl v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect - github.com/smartystreets/goconvey v1.7.2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect golang.org/x/net v0.39.0 // indirect @@ -70,6 +62,7 @@ require ( k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect diff --git a/go.sum b/go.sum index 945a27578..02ee8daab 100644 --- a/go.sum +++ b/go.sum @@ -4,13 +4,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0= -github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -25,6 +24,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= +github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -33,6 +34,7 @@ github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZ github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -49,16 +51,12 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -71,25 +69,23 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo= -github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI= -github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -105,20 +101,16 @@ github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2b github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= -github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= -github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -138,7 +130,6 @@ golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6R golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -153,7 +144,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= @@ -165,7 +158,6 @@ golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -186,7 +178,6 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= @@ -201,8 +192,6 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= -k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= diff --git a/internal/pkg/alerts/alert.go b/internal/pkg/alerts/alert.go deleted file mode 100644 index 6b9568ff0..000000000 --- a/internal/pkg/alerts/alert.go +++ /dev/null @@ -1,154 +0,0 @@ -package alert - -import ( - "fmt" - "os" - "strings" - - "github.com/parnurzeal/gorequest" - "github.com/sirupsen/logrus" -) - -type AlertSink string - -const ( - AlertSinkSlack AlertSink = "slack" - AlertSinkTeams AlertSink = "teams" - AlertSinkGoogleChat AlertSink = "gchat" - AlertSinkRaw AlertSink = "raw" -) - -// function to send alert msg to webhook service -func SendWebhookAlert(msg string) { - webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL") - if !ok { - logrus.Error("ALERT_WEBHOOK_URL env variable not provided") - return - } - webhook_url = strings.TrimSpace(webhook_url) - alert_sink := os.Getenv("ALERT_SINK") - alert_sink = strings.ToLower(strings.TrimSpace(alert_sink)) - - // Provision to add Proxy to reach webhook server if required - webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY") - webhook_proxy = strings.TrimSpace(webhook_proxy) - - // Provision to add Additional information in the alert. e.g ClusterName - alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO") - if ok { - alert_additional_info = strings.TrimSpace(alert_additional_info) - msg = fmt.Sprintf("%s : %s", alert_additional_info, msg) - } - - switch AlertSink(alert_sink) { - case AlertSinkSlack: - sendSlackAlert(webhook_url, webhook_proxy, msg) - case AlertSinkTeams: - sendTeamsAlert(webhook_url, webhook_proxy, msg) - case AlertSinkGoogleChat: - sendGoogleChatAlert(webhook_url, webhook_proxy, msg) - default: - msg = strings.ReplaceAll(msg, "*", "") - sendRawWebhookAlert(webhook_url, webhook_proxy, msg) - } -} - -// function to handle server redirection -func redirectPolicy(req gorequest.Request, via []gorequest.Request) error { - return fmt.Errorf("incorrect token (redirection)") -} - -// function to send alert to slack -func sendSlackAlert(webhookUrl string, proxy string, msg string) []error { - attachment := Attachment{ - Text: msg, - Color: "good", - AuthorName: "Reloader", - } - - payload := WebhookMessage{ - Attachments: []Attachment{attachment}, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(payload). - End() - - if err != nil { - return err - } - if resp.StatusCode >= 400 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to Microsoft Teams webhook -func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error { - attachment := Attachment{ - Text: msg, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(attachment). - End() - - if err != nil { - return err - } - if resp.StatusCode != 200 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to Google Chat webhook -func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error { - payload := map[string]interface{}{ - "text": msg, - } - - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - RedirectPolicy(redirectPolicy). - Send(payload). - End() - - if err != nil { - return err - } - if resp.StatusCode != 200 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} - -// function to send alert to webhook service as text -func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error { - request := gorequest.New().Proxy(proxy) - resp, _, err := request. - Post(webhookUrl). - Type("text"). - RedirectPolicy(redirectPolicy). - Send(msg). - End() - - if err != nil { - return err - } - if resp.StatusCode >= 400 { - return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)} - } - - return nil -} diff --git a/internal/pkg/alerts/slack_alert.go b/internal/pkg/alerts/slack_alert.go deleted file mode 100644 index a21727a25..000000000 --- a/internal/pkg/alerts/slack_alert.go +++ /dev/null @@ -1,61 +0,0 @@ -package alert - -type WebhookMessage struct { - Username string `json:"username,omitempty"` - IconEmoji string `json:"icon_emoji,omitempty"` - IconURL string `json:"icon_url,omitempty"` - Channel string `json:"channel,omitempty"` - ThreadTimestamp string `json:"thread_ts,omitempty"` - Text string `json:"text,omitempty"` - Attachments []Attachment `json:"attachments,omitempty"` - Parse string `json:"parse,omitempty"` - ResponseType string `json:"response_type,omitempty"` - ReplaceOriginal bool `json:"replace_original,omitempty"` - DeleteOriginal bool `json:"delete_original,omitempty"` - ReplyBroadcast bool `json:"reply_broadcast,omitempty"` -} - -type Attachment struct { - Color string `json:"color,omitempty"` - Fallback string `json:"fallback,omitempty"` - - CallbackID string `json:"callback_id,omitempty"` - ID int `json:"id,omitempty"` - - AuthorID string `json:"author_id,omitempty"` - AuthorName string `json:"author_name,omitempty"` - AuthorSubname string `json:"author_subname,omitempty"` - AuthorLink string `json:"author_link,omitempty"` - AuthorIcon string `json:"author_icon,omitempty"` - - Title string `json:"title,omitempty"` - TitleLink string `json:"title_link,omitempty"` - Pretext string `json:"pretext,omitempty"` - Text string `json:"text,omitempty"` - - ImageURL string `json:"image_url,omitempty"` - ThumbURL string `json:"thumb_url,omitempty"` - - ServiceName string `json:"service_name,omitempty"` - ServiceIcon string `json:"service_icon,omitempty"` - FromURL string `json:"from_url,omitempty"` - OriginalURL string `json:"original_url,omitempty"` - - MarkdownIn []string `json:"mrkdwn_in,omitempty"` - - Footer string `json:"footer,omitempty"` - FooterIcon string `json:"footer_icon,omitempty"` -} - -type Field struct { - Title string `json:"title"` - Value string `json:"value"` - Short bool `json:"short"` -} - -type Action struct { - Type string `json:"type"` - Text string `json:"text"` - Url string `json:"url"` - Style string `json:"style"` -} diff --git a/internal/pkg/app/app.go b/internal/pkg/app/app.go deleted file mode 100644 index 8d09188fc..000000000 --- a/internal/pkg/app/app.go +++ /dev/null @@ -1,9 +0,0 @@ -package app - -import "github.com/stakater/Reloader/internal/pkg/cmd" - -// Run runs the command -func Run() error { - cmd := cmd.NewReloaderCommand() - return cmd.Execute() -} diff --git a/internal/pkg/callbacks/rolling_upgrade.go b/internal/pkg/callbacks/rolling_upgrade.go deleted file mode 100644 index 13e5a63cd..000000000 --- a/internal/pkg/callbacks/rolling_upgrade.go +++ /dev/null @@ -1,579 +0,0 @@ -package callbacks - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - - "maps" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" -) - -// ItemFunc is a generic function to return a specific resource in given namespace -type ItemFunc func(kube.Clients, string, string) (runtime.Object, error) - -// ItemsFunc is a generic function to return a specific resource array in given namespace -type ItemsFunc func(kube.Clients, string) []runtime.Object - -// ContainersFunc is a generic func to return containers -type ContainersFunc func(runtime.Object) []v1.Container - -// InitContainersFunc is a generic func to return containers -type InitContainersFunc func(runtime.Object) []v1.Container - -// VolumesFunc is a generic func to return volumes -type VolumesFunc func(runtime.Object) []v1.Volume - -// UpdateFunc performs the resource update -type UpdateFunc func(kube.Clients, string, runtime.Object) error - -// PatchFunc performs the resource patch -type PatchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error - -// PatchTemplateFunc is a generic func to return strategic merge JSON patch template -type PatchTemplatesFunc func() PatchTemplates - -// AnnotationsFunc is a generic func to return annotations -type AnnotationsFunc func(runtime.Object) map[string]string - -// PodAnnotationsFunc is a generic func to return annotations -type PodAnnotationsFunc func(runtime.Object) map[string]string - -// RollingUpgradeFuncs contains generic functions to perform rolling upgrade -type RollingUpgradeFuncs struct { - ItemFunc ItemFunc - ItemsFunc ItemsFunc - AnnotationsFunc AnnotationsFunc - PodAnnotationsFunc PodAnnotationsFunc - ContainersFunc ContainersFunc - ContainerPatchPathFunc ContainersFunc - InitContainersFunc InitContainersFunc - UpdateFunc UpdateFunc - PatchFunc PatchFunc - PatchTemplatesFunc PatchTemplatesFunc - VolumesFunc VolumesFunc - ResourceType string - SupportsPatch bool -} - -// PatchTemplates contains merge JSON patch templates -type PatchTemplates struct { - AnnotationTemplate string - EnvVarTemplate string - DeleteEnvVarTemplate string -} - -// GetDeploymentItem returns the deployment in given namespace -func GetDeploymentItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - deployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get deployment %v", err) - return nil, err - } - - if deployment.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - deployment.Spec.Template.Annotations = annotations - } - - return deployment, nil -} - -// GetDeploymentItems returns the deployments in given namespace -func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object { - deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list deployments %v", err) - } - - items := make([]runtime.Object, len(deployments.Items)) - // Ensure we always have pod annotations to add to - for i, v := range deployments.Items { - if v.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - deployments.Items[i].Spec.Template.Annotations = annotations - } - items[i] = &deployments.Items[i] - } - - return items -} - -// GetCronJobItem returns the job in given namespace -func GetCronJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - cronjob, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get cronjob %v", err) - return nil, err - } - - return cronjob, nil -} - -// GetCronJobItems returns the jobs in given namespace -func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object { - cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list cronjobs %v", err) - } - - items := make([]runtime.Object, len(cronjobs.Items)) - // Ensure we always have pod annotations to add to - for i, v := range cronjobs.Items { - if v.Spec.JobTemplate.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - cronjobs.Items[i].Spec.JobTemplate.Spec.Template.Annotations = annotations - } - items[i] = &cronjobs.Items[i] - } - - return items -} - -// GetJobItem returns the job in given namespace -func GetJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - job, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get job %v", err) - return nil, err - } - - return job, nil -} - -// GetJobItems returns the jobs in given namespace -func GetJobItems(clients kube.Clients, namespace string) []runtime.Object { - jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list jobs %v", err) - } - - items := make([]runtime.Object, len(jobs.Items)) - // Ensure we always have pod annotations to add to - for i, v := range jobs.Items { - if v.Spec.Template.Annotations == nil { - annotations := make(map[string]string) - jobs.Items[i].Spec.Template.Annotations = annotations - } - items[i] = &jobs.Items[i] - } - - return items -} - -// GetDaemonSetItem returns the daemonSet in given namespace -func GetDaemonSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - daemonSet, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get daemonSet %v", err) - return nil, err - } - - return daemonSet, nil -} - -// GetDaemonSetItems returns the daemonSets in given namespace -func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object { - daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list daemonSets %v", err) - } - - items := make([]runtime.Object, len(daemonSets.Items)) - // Ensure we always have pod annotations to add to - for i, v := range daemonSets.Items { - if v.Spec.Template.Annotations == nil { - daemonSets.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &daemonSets.Items[i] - } - - return items -} - -// GetStatefulSetItem returns the statefulSet in given namespace -func GetStatefulSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - statefulSet, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get statefulSet %v", err) - return nil, err - } - - return statefulSet, nil -} - -// GetStatefulSetItems returns the statefulSets in given namespace -func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object { - statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list statefulSets %v", err) - } - - items := make([]runtime.Object, len(statefulSets.Items)) - // Ensure we always have pod annotations to add to - for i, v := range statefulSets.Items { - if v.Spec.Template.Annotations == nil { - statefulSets.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &statefulSets.Items[i] - } - - return items -} - -// GetRolloutItem returns the rollout in given namespace -func GetRolloutItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) { - rollout, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), name, meta_v1.GetOptions{}) - if err != nil { - logrus.Errorf("Failed to get Rollout %v", err) - return nil, err - } - - return rollout, nil -} - -// GetRolloutItems returns the rollouts in given namespace -func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object { - rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{}) - if err != nil { - logrus.Errorf("Failed to list Rollouts %v", err) - } - - items := make([]runtime.Object, len(rollouts.Items)) - // Ensure we always have pod annotations to add to - for i, v := range rollouts.Items { - if v.Spec.Template.Annotations == nil { - rollouts.Items[i].Spec.Template.Annotations = make(map[string]string) - } - items[i] = &rollouts.Items[i] - } - - return items -} - -// GetDeploymentAnnotations returns the annotations of given deployment -func GetDeploymentAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Annotations == nil { - item.(*appsv1.Deployment).Annotations = make(map[string]string) - } - return item.(*appsv1.Deployment).Annotations -} - -// GetCronJobAnnotations returns the annotations of given cronjob -func GetCronJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Annotations == nil { - item.(*batchv1.CronJob).Annotations = make(map[string]string) - } - return item.(*batchv1.CronJob).Annotations -} - -// GetJobAnnotations returns the annotations of given job -func GetJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Annotations == nil { - item.(*batchv1.Job).Annotations = make(map[string]string) - } - return item.(*batchv1.Job).Annotations -} - -// GetDaemonSetAnnotations returns the annotations of given daemonSet -func GetDaemonSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Annotations == nil { - item.(*appsv1.DaemonSet).Annotations = make(map[string]string) - } - return item.(*appsv1.DaemonSet).Annotations -} - -// GetStatefulSetAnnotations returns the annotations of given statefulSet -func GetStatefulSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Annotations == nil { - item.(*appsv1.StatefulSet).Annotations = make(map[string]string) - } - return item.(*appsv1.StatefulSet).Annotations -} - -// GetRolloutAnnotations returns the annotations of given rollout -func GetRolloutAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string) - } - return item.(*argorolloutv1alpha1.Rollout).Annotations -} - -// GetDeploymentPodAnnotations returns the pod's annotations of given deployment -func GetDeploymentPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Spec.Template.Annotations == nil { - item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.Deployment).Spec.Template.Annotations -} - -// GetCronJobPodAnnotations returns the pod's annotations of given cronjob -func GetCronJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil { - item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) - } - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations -} - -// GetJobPodAnnotations returns the pod's annotations of given job -func GetJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Spec.Template.Annotations == nil { - item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string) - } - return item.(*batchv1.Job).Spec.Template.Annotations -} - -// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet -func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil { - item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.DaemonSet).Spec.Template.Annotations -} - -// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet -func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil { - item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string) - } - return item.(*appsv1.StatefulSet).Spec.Template.Annotations -} - -// GetRolloutPodAnnotations returns the pod's annotations of given rollout -func GetRolloutPodAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string) - } - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations -} - -// GetDeploymentContainers returns the containers of given deployment -func GetDeploymentContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.Containers -} - -// GetCronJobContainers returns the containers of given cronjob -func GetCronJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers -} - -// GetJobContainers returns the containers of given job -func GetJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.Containers -} - -// GetDaemonSetContainers returns the containers of given daemonSet -func GetDaemonSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers -} - -// GetStatefulSetContainers returns the containers of given statefulSet -func GetStatefulSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers -} - -// GetRolloutContainers returns the containers of given rollout -func GetRolloutContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers -} - -// GetDeploymentInitContainers returns the containers of given deployment -func GetDeploymentInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers -} - -// GetCronJobInitContainers returns the containers of given cronjob -func GetCronJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers -} - -// GetJobInitContainers returns the containers of given job -func GetJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.InitContainers -} - -// GetDaemonSetInitContainers returns the containers of given daemonSet -func GetDaemonSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers -} - -// GetStatefulSetInitContainers returns the containers of given statefulSet -func GetStatefulSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers -} - -// GetRolloutInitContainers returns the containers of given rollout -func GetRolloutInitContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers -} - -// GetPatchTemplates returns patch templates -func GetPatchTemplates() PatchTemplates { - return PatchTemplates{ - AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, // strategic merge patch - EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, // strategic merge patch - DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, // JSON patch - } -} - -// UpdateDeployment performs rolling upgrade on deployment -func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error { - deployment := resource.(*appsv1.Deployment) - _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -// PatchDeployment performs rolling upgrade on deployment -func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - deployment := resource.(*appsv1.Deployment) - _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// CreateJobFromCronjob performs rolling upgrade on cronjob -func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error { - cronJob := resource.(*batchv1.CronJob) - - annotations := make(map[string]string) - annotations["cronjob.kubernetes.io/instantiate"] = "manual" - maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) - - job := &batchv1.Job{ - ObjectMeta: meta_v1.ObjectMeta{ - GenerateName: cronJob.Name + "-", - Namespace: cronJob.Namespace, - Annotations: annotations, - Labels: cronJob.Spec.JobTemplate.Labels, - OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))}, - }, - Spec: cronJob.Spec.JobTemplate.Spec, - } - _, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: CronJob") -} - -// ReCreateJobFromjob performs rolling upgrade on job -func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error { - oldJob := resource.(*batchv1.Job) - job := oldJob.DeepCopy() - - // Delete the old job - policy := meta_v1.DeletePropagationBackground - err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy}) - if err != nil { - return err - } - - // Remove fields that should not be specified when creating a new Job - job.ResourceVersion = "" - job.UID = "" - job.CreationTimestamp = meta_v1.Time{} - job.Status = batchv1.JobStatus{} - - // Remove problematic labels - delete(job.Spec.Template.Labels, "controller-uid") - delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel) - delete(job.Spec.Template.Labels, batchv1.JobNameLabel) - delete(job.Spec.Template.Labels, "job-name") - - // Remove the selector to allow it to be auto-generated - job.Spec.Selector = nil - - // Create the new job with same spec - _, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: Job") -} - -// UpdateDaemonSet performs rolling upgrade on daemonSet -func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error { - daemonSet := resource.(*appsv1.DaemonSet) - _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - daemonSet := resource.(*appsv1.DaemonSet) - _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// UpdateStatefulSet performs rolling upgrade on statefulSet -func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error { - statefulSet := resource.(*appsv1.StatefulSet) - _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - return err -} - -func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - statefulSet := resource.(*appsv1.StatefulSet) - _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) - return err -} - -// UpdateRollout performs rolling upgrade on rollout -func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error { - rollout := resource.(*argorolloutv1alpha1.Rollout) - strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation] - var err error - switch options.ToArgoRolloutStrategy(strategy) { - case options.RestartStrategy: - _, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"}) - case options.RolloutStrategy: - _, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"}) - } - return err -} - -func PatchRollout(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - return errors.New("not supported patching: Rollout") -} - -// GetDeploymentVolumes returns the Volumes of given deployment -func GetDeploymentVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes -} - -// GetCronJobVolumes returns the Volumes of given cronjob -func GetCronJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes -} - -// GetJobVolumes returns the Volumes of given job -func GetJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.Job).Spec.Template.Spec.Volumes -} - -// GetDaemonSetVolumes returns the Volumes of given daemonSet -func GetDaemonSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes -} - -// GetStatefulSetVolumes returns the Volumes of given statefulSet -func GetStatefulSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes -} - -// GetRolloutVolumes returns the Volumes of given rollout -func GetRolloutVolumes(item runtime.Object) []v1.Volume { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes -} diff --git a/internal/pkg/callbacks/rolling_upgrade_test.go b/internal/pkg/callbacks/rolling_upgrade_test.go deleted file mode 100644 index 452867f47..000000000 --- a/internal/pkg/callbacks/rolling_upgrade_test.go +++ /dev/null @@ -1,773 +0,0 @@ -package callbacks_test - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/fake" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" - patchtypes "k8s.io/apimachinery/pkg/types" - - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/kube" -) - -var ( - clients = setupTestClients() -) - -type testFixtures struct { - defaultContainers []v1.Container - defaultInitContainers []v1.Container - defaultVolumes []v1.Volume - namespace string -} - -func newTestFixtures() testFixtures { - return testFixtures{ - defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}}, - defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}}, - defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}}, - namespace: "default", - } -} - -func setupTestClients() kube.Clients { - return kube.Clients{ - KubernetesClient: fake.NewSimpleClientset(), - ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(), - } -} - -// TestUpdateRollout test update rollout strategy annotation -func TestUpdateRollout(t *testing.T) { - namespace := "test-ns" - - cases := map[string]struct { - name string - strategy string - isRestart bool - }{ - "test-without-strategy": { - name: "defaults to rollout strategy", - strategy: "", - isRestart: false, - }, - "test-with-restart-strategy": { - name: "triggers a restart strategy", - strategy: "restart", - isRestart: true, - }, - "test-with-rollout-strategy": { - name: "triggers a rollout strategy", - strategy: "rollout", - isRestart: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - rollout, err := testutil.CreateRollout( - clients.ArgoRolloutClient, name, namespace, - map[string]string{options.RolloutStrategyAnnotation: tc.strategy}, - ) - if err != nil { - t.Errorf("creating rollout: %v", err) - } - modifiedChan := watchRollout(rollout.Name, namespace) - - err = callbacks.UpdateRollout(clients, namespace, rollout) - if err != nil { - t.Errorf("updating rollout: %v", err) - } - rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts( - namespace).Get(context.TODO(), rollout.Name, metav1.GetOptions{}) - - if err != nil { - t.Errorf("getting rollout: %v", err) - } - if isRestartStrategy(rollout) == tc.isRestart { - t.Errorf("Should not be a restart strategy") - } - select { - case <-modifiedChan: - // object has been modified - case <-time.After(1 * time.Second): - t.Errorf("Rollout has not been updated") - } - }) - } -} - -func TestPatchRollout(t *testing.T) { - namespace := "test-ns" - rollout := testutil.GetRollout(namespace, "test", map[string]string{options.RolloutStrategyAnnotation: ""}) - err := callbacks.PatchRollout(clients, namespace, rollout, patchtypes.StrategicMergePatchType, []byte(`{"spec": {}}`)) - assert.EqualError(t, err, "not supported patching: Rollout") -} - -func TestResourceItem(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - getItemFunc func(kube.Clients, string, string) (runtime.Object, error) - deleteFunc func(kube.Clients, string, string) error - }{ - { - name: "Deployment", - createFunc: createTestDeploymentWithAnnotations, - getItemFunc: callbacks.GetDeploymentItem, - deleteFunc: deleteTestDeployment, - }, - { - name: "CronJob", - createFunc: createTestCronJobWithAnnotations, - getItemFunc: callbacks.GetCronJobItem, - deleteFunc: deleteTestCronJob, - }, - { - name: "Job", - createFunc: createTestJobWithAnnotations, - getItemFunc: callbacks.GetJobItem, - deleteFunc: deleteTestJob, - }, - { - name: "DaemonSet", - createFunc: createTestDaemonSetWithAnnotations, - getItemFunc: callbacks.GetDaemonSetItem, - deleteFunc: deleteTestDaemonSet, - }, - { - name: "StatefulSet", - createFunc: createTestStatefulSetWithAnnotations, - getItemFunc: callbacks.GetStatefulSetItem, - deleteFunc: deleteTestStatefulSet, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - _, err = tt.getItemFunc(clients, accessor.GetName(), fixtures.namespace) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestResourceItems(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string) error - getItemsFunc func(kube.Clients, string) []runtime.Object - deleteFunc func(kube.Clients, string) error - expectedCount int - }{ - { - name: "Deployments", - createFunc: createTestDeployments, - getItemsFunc: callbacks.GetDeploymentItems, - deleteFunc: deleteTestDeployments, - expectedCount: 2, - }, - { - name: "CronJobs", - createFunc: createTestCronJobs, - getItemsFunc: callbacks.GetCronJobItems, - deleteFunc: deleteTestCronJobs, - expectedCount: 2, - }, - { - name: "Jobs", - createFunc: createTestJobs, - getItemsFunc: callbacks.GetJobItems, - deleteFunc: deleteTestJobs, - expectedCount: 2, - }, - { - name: "DaemonSets", - createFunc: createTestDaemonSets, - getItemsFunc: callbacks.GetDaemonSetItems, - deleteFunc: deleteTestDaemonSets, - expectedCount: 2, - }, - { - name: "StatefulSets", - createFunc: createTestStatefulSets, - getItemsFunc: callbacks.GetStatefulSetItems, - deleteFunc: deleteTestStatefulSets, - expectedCount: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.createFunc(clients, fixtures.namespace) - assert.NoError(t, err) - - items := tt.getItemsFunc(clients, fixtures.namespace) - assert.Equal(t, tt.expectedCount, len(items)) - }) - } -} - -func TestGetAnnotations(t *testing.T) { - testAnnotations := map[string]string{"version": "1"} - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) map[string]string - }{ - {"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations}, - {"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations}, - {"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations}, - {"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations}, - {"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations}, - {"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, testAnnotations, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetPodAnnotations(t *testing.T) { - testAnnotations := map[string]string{"version": "1"} - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) map[string]string - }{ - {"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations}, - {"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations}, - {"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations}, - {"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations}, - {"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations}, - {"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, testAnnotations, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetContainers(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Container - }{ - {"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers}, - {"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers}, - {"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers}, - {"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers}, - {"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers}, - {"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource)) - }) - } -} - -func TestGetInitContainers(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Container - }{ - {"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers}, - {"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers}, - {"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers}, - {"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers}, - {"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers}, - {"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource)) - }) - } -} - -func TestUpdateResources(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - updateFunc func(kube.Clients, string, runtime.Object) error - deleteFunc func(kube.Clients, string, string) error - }{ - {"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment, deleteTestDeployment}, - {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet, deleteTestDaemonSet}, - {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet, deleteTestStatefulSet}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = tt.updateFunc(clients, fixtures.namespace, resource) - assert.NoError(t, err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestPatchResources(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - createFunc func(kube.Clients, string, string) (runtime.Object, error) - patchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error - deleteFunc func(kube.Clients, string, string) error - assertFunc func(err error) - }{ - {"Deployment", createTestDeploymentWithAnnotations, callbacks.PatchDeployment, deleteTestDeployment, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).Annotations["test"]) - }}, - {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).Annotations["test"]) - }}, - {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) { - assert.NoError(t, err) - patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace) - assert.NoError(t, err) - assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).Annotations["test"]) - }}, - {"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) { - assert.EqualError(t, err, "not supported patching: CronJob") - }}, - {"Job", createTestJobWithAnnotations, callbacks.PatchJob, deleteTestJob, func(err error) { - assert.EqualError(t, err, "not supported patching: Job") - }}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resource, err := tt.createFunc(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = tt.patchFunc(clients, fixtures.namespace, resource, patchtypes.StrategicMergePatchType, []byte(`{"metadata":{"annotations":{"test":"test"}}}`)) - tt.assertFunc(err) - - accessor, err := meta.Accessor(resource) - assert.NoError(t, err) - - err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName()) - assert.NoError(t, err) - }) - } -} - -func TestCreateJobFromCronjob(t *testing.T) { - fixtures := newTestFixtures() - - runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - cronJob := runtimeObj.(*batchv1.CronJob) - err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob) - assert.NoError(t, err) - - jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - - ownerFound := false - for _, job := range jobList.Items { - if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) { - ownerFound = true - break - } - } - assert.Truef(t, ownerFound, "Missing CronJob owner reference") - - err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name) - assert.NoError(t, err) -} - -func TestReCreateJobFromJob(t *testing.T) { - fixtures := newTestFixtures() - - job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1") - assert.NoError(t, err) - - err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job)) - assert.NoError(t, err) - - err = deleteTestJob(clients, fixtures.namespace, "test-job") - assert.NoError(t, err) -} - -func TestGetVolumes(t *testing.T) { - fixtures := newTestFixtures() - - tests := []struct { - name string - resource runtime.Object - getFunc func(runtime.Object) []v1.Volume - }{ - {"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes}, - {"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes}, - {"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes}, - {"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes}, - {"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource)) - }) - } -} - -func TesGetPatchTemplateAnnotation(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.AnnotationTemplate) - assert.Equal(t, 2, strings.Count(templates.AnnotationTemplate, "%s")) -} - -func TestGetPatchTemplateEnvVar(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.EnvVarTemplate) - assert.Equal(t, 3, strings.Count(templates.EnvVarTemplate, "%s")) -} - -func TestGetPatchDeleteTemplateEnvVar(t *testing.T) { - templates := callbacks.GetPatchTemplates() - assert.NotEmpty(t, templates.DeleteEnvVarTemplate) - assert.Equal(t, 2, strings.Count(templates.DeleteEnvVarTemplate, "%d")) -} - -// Helper functions - -func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool { - return rollout.Spec.RestartAt == nil -} - -func watchRollout(name, namespace string) chan interface{} { - timeOut := int64(1) - modifiedChan := make(chan interface{}) - watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), metav1.ListOptions{TimeoutSeconds: &timeOut}) - go watchModified(watcher, name, modifiedChan) - return modifiedChan -} - -func watchModified(watcher watch.Interface, name string, modifiedChan chan interface{}) { - for event := range watcher.ResultChan() { - item := event.Object.(*argorolloutv1alpha1.Rollout) - if item.Name == name { - switch event.Type { - case watch.Modified: - modifiedChan <- nil - } - return - } - } -} - -func createTestDeployments(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestDeployments(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteDeployment(clients.KubernetesClient, namespace, fmt.Sprintf("test-deployment-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestCronJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestCronJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteCronJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-cron-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestJobs(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-job-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestDaemonSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestDaemonSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-daemonset-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createTestStatefulSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - _, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false) - if err != nil { - return err - } - } - return nil -} - -func deleteTestStatefulSets(clients kube.Clients, namespace string) error { - for i := 1; i <= 2; i++ { - err := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-statefulset-%d", i)) - if err != nil { - return err - } - } - return nil -} - -func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Annotations = annotations - case *appsv1.DaemonSet: - v.Spec.Template.Annotations = annotations - case *appsv1.StatefulSet: - v.Spec.Template.Annotations = annotations - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Annotations = annotations - case *batchv1.Job: - v.Spec.Template.Annotations = annotations - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Annotations = annotations - } - return obj -} - -func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.Containers = containers - case *appsv1.DaemonSet: - v.Spec.Template.Spec.Containers = containers - case *appsv1.StatefulSet: - v.Spec.Template.Spec.Containers = containers - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers - case *batchv1.Job: - v.Spec.Template.Spec.Containers = containers - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Spec.Containers = containers - } - return obj -} - -func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.InitContainers = initContainers - case *appsv1.DaemonSet: - v.Spec.Template.Spec.InitContainers = initContainers - case *appsv1.StatefulSet: - v.Spec.Template.Spec.InitContainers = initContainers - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers - case *batchv1.Job: - v.Spec.Template.Spec.InitContainers = initContainers - case *argorolloutv1alpha1.Rollout: - v.Spec.Template.Spec.InitContainers = initContainers - } - return obj -} - -func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object { - switch v := obj.(type) { - case *appsv1.Deployment: - v.Spec.Template.Spec.Volumes = volumes - case *batchv1.CronJob: - v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes - case *batchv1.Job: - v.Spec.Template.Spec.Volumes = volumes - case *appsv1.DaemonSet: - v.Spec.Template.Spec.Volumes = volumes - case *appsv1.StatefulSet: - v.Spec.Template.Spec.Volumes = volumes - } - return obj -} - -func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) -} - -func deleteTestDeployment(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - daemonSet := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-daemonset", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{}) -} - -func deleteTestDaemonSet(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - statefulSet := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefulset", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) -} - -func deleteTestStatefulSet(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - cronJob := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cronjob", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{}) -} - -func deleteTestCronJob(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-job", - Namespace: namespace, - Annotations: map[string]string{"version": version}, - }, - } - return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) -} - -func deleteTestJob(clients kube.Clients, namespace, name string) error { - return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} - -func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool { - for _, ownerRef := range ownerRefs { - if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name { - return true - } - } - return false -} diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go deleted file mode 100644 index 3b86bd20b..000000000 --- a/internal/pkg/cmd/reloader.go +++ /dev/null @@ -1,265 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "net/http" - _ "net/http/pprof" - "os" - - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/leadership" - - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" -) - -// cfg holds the configuration for this reloader instance. -// It is populated by flag parsing and used throughout the application. -var cfg *config.Config - -// NewReloaderCommand starts the reloader controller -func NewReloaderCommand() *cobra.Command { - // Create config with defaults - cfg = config.NewDefault() - - cmd := &cobra.Command{ - Use: "reloader", - Short: "A watcher for your Kubernetes cluster", - PreRunE: validateFlags, - Run: startReloader, - } - - // Bind flags to the new config package - config.BindFlags(cmd.PersistentFlags(), cfg) - - return cmd -} - -func validateFlags(*cobra.Command, []string) error { - // Apply post-parse flag processing (converts string flags to proper types) - if err := config.ApplyFlags(cfg); err != nil { - return fmt.Errorf("applying flags: %w", err) - } - - // Validate the configuration - if err := cfg.Validate(); err != nil { - return fmt.Errorf("validating config: %w", err) - } - - // Sync new config to old options package for backward compatibility - // This bridge allows existing code to keep working during migration - syncConfigToOptions(cfg) - - // Validate that HA options are correct - if cfg.EnableHA { - if err := validateHAEnvs(); err != nil { - return err - } - } - - return nil -} - -// syncConfigToOptions bridges the new Config struct to the old options package. -// This allows existing code to continue working during the migration period. -// TODO: Remove this once all code is migrated to use Config directly. -func syncConfigToOptions(cfg *config.Config) { - options.AutoReloadAll = cfg.AutoReloadAll - options.ConfigmapUpdateOnChangeAnnotation = cfg.Annotations.ConfigmapReload - options.SecretUpdateOnChangeAnnotation = cfg.Annotations.SecretReload - options.ReloaderAutoAnnotation = cfg.Annotations.Auto - options.ConfigmapReloaderAutoAnnotation = cfg.Annotations.ConfigmapAuto - options.SecretReloaderAutoAnnotation = cfg.Annotations.SecretAuto - options.IgnoreResourceAnnotation = cfg.Annotations.Ignore - options.ConfigmapExcludeReloaderAnnotation = cfg.Annotations.ConfigmapExclude - options.SecretExcludeReloaderAnnotation = cfg.Annotations.SecretExclude - options.AutoSearchAnnotation = cfg.Annotations.Search - options.SearchMatchAnnotation = cfg.Annotations.Match - options.RolloutStrategyAnnotation = cfg.Annotations.RolloutStrategy - options.PauseDeploymentAnnotation = cfg.Annotations.PausePeriod - options.PauseDeploymentTimeAnnotation = cfg.Annotations.PausedAt - options.LogFormat = cfg.LogFormat - options.LogLevel = cfg.LogLevel - options.WebhookUrl = cfg.WebhookURL - options.ResourcesToIgnore = cfg.IgnoredResources - options.WorkloadTypesToIgnore = cfg.IgnoredWorkloads - options.NamespacesToIgnore = cfg.IgnoredNamespaces - options.NamespaceSelectors = cfg.NamespaceSelectorStrings - options.ResourceSelectors = cfg.ResourceSelectorStrings - options.EnableHA = cfg.EnableHA - options.SyncAfterRestart = cfg.SyncAfterRestart - options.EnablePProf = cfg.EnablePProf - options.PProfAddr = cfg.PProfAddr - - // Convert ReloadStrategy to string for old options - options.ReloadStrategy = string(cfg.ReloadStrategy) - - // Convert bool flags to string for old options (IsArgoRollouts, ReloadOnCreate, ReloadOnDelete) - if cfg.ArgoRolloutsEnabled { - options.IsArgoRollouts = "true" - } else { - options.IsArgoRollouts = "false" - } - if cfg.ReloadOnCreate { - options.ReloadOnCreate = "true" - } else { - options.ReloadOnCreate = "false" - } - if cfg.ReloadOnDelete { - options.ReloadOnDelete = "true" - } else { - options.ReloadOnDelete = "false" - } -} - -func configureLogging(logFormat, logLevel string) error { - switch logFormat { - case "json": - logrus.SetFormatter(&logrus.JSONFormatter{}) - default: - // just let the library use default on empty string. - if logFormat != "" { - return fmt.Errorf("unsupported logging formatter: %q", logFormat) - } - } - // set log level - level, err := logrus.ParseLevel(logLevel) - if err != nil { - return err - } - logrus.SetLevel(level) - return nil -} - -func validateHAEnvs() error { - podName, podNamespace := getHAEnvs() - - if podName == "" { - return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv) - } - if podNamespace == "" { - return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv) - } - return nil -} - -func getHAEnvs() (string, string) { - podName := os.Getenv(constants.PodNameEnv) - podNamespace := os.Getenv(constants.PodNamespaceEnv) - - return podName, podNamespace -} - -func startReloader(cmd *cobra.Command, args []string) { - common.GetCommandLineOptions() - err := configureLogging(cfg.LogFormat, cfg.LogLevel) - if err != nil { - logrus.Warn(err) - } - - logrus.Info("Starting Reloader") - isGlobal := false - currentNamespace := os.Getenv("KUBERNETES_NAMESPACE") - if len(currentNamespace) == 0 { - currentNamespace = v1.NamespaceAll - isGlobal = true - logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.") - } - - // create the clientset - clientset, err := kube.GetKubernetesClient() - if err != nil { - logrus.Fatal(err) - } - - // Use config's IgnoredResources (already validated and normalized to lowercase) - ignoredResourcesList := util.List(cfg.IgnoredResources) - - ignoredNamespacesList := cfg.IgnoredNamespaces - namespaceLabelSelector := "" - - if isGlobal { - namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors) - if err != nil { - logrus.Fatal(err) - } - } - - resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors) - if err != nil { - logrus.Fatal(err) - } - - if len(namespaceLabelSelector) > 0 { - logrus.Warnf("namespace-selector is set, will only detect changes in namespaces with these labels: %s.", namespaceLabelSelector) - } - - if len(resourceLabelSelector) > 0 { - logrus.Warnf("resource-label-selector is set, will only detect changes on resources with these labels: %s.", resourceLabelSelector) - } - - if cfg.WebhookURL != "" { - logrus.Warnf("webhook-url is set, will only send webhook, no resources will be reloaded") - } - - collectors := metrics.SetupPrometheusEndpoint() - - var controllers []*controller.Controller - for k := range kube.ResourceMap { - if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { - continue - } - - c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, resourceLabelSelector, collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - controllers = append(controllers, c) - - // If HA is enabled we only run the controller when we're the leader - if cfg.EnableHA { - continue - } - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - logrus.Infof("Starting Controller to watch resource type: %s", k) - go c.Run(1, stop) - } - - // Run leadership election - if cfg.EnableHA { - podName, podNamespace := getHAEnvs() - lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers) - } - - common.PublishMetaInfoConfigmap(clientset) - - if cfg.EnablePProf { - go startPProfServer() - } - - leadership.SetupLivenessEndpoint() - logrus.Fatal(http.ListenAndServe(cfg.MetricsAddr, nil)) -} - -func startPProfServer() { - logrus.Infof("Starting pprof server on %s", cfg.PProfAddr) - if err := http.ListenAndServe(cfg.PProfAddr, nil); err != nil { - logrus.Errorf("Failed to start pprof server: %v", err) - } -} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 9ce3e0cef..e7ec77f7a 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -1,5 +1,4 @@ // Package config provides configuration management for Reloader. -// It replaces the old global variables pattern with an immutable Config struct. package config import ( @@ -12,11 +11,7 @@ import ( type ReloadStrategy string const ( - // ReloadStrategyEnvVars adds/updates environment variables to trigger restart. - // This is the default and recommended strategy for GitOps compatibility. - ReloadStrategyEnvVars ReloadStrategy = "env-vars" - - // ReloadStrategyAnnotations adds/updates pod template annotations to trigger restart. + ReloadStrategyEnvVars ReloadStrategy = "env-vars" ReloadStrategyAnnotations ReloadStrategy = "annotations" ) @@ -24,143 +19,81 @@ const ( type ArgoRolloutStrategy string const ( - // ArgoRolloutStrategyRestart uses the restart mechanism for Argo Rollouts. ArgoRolloutStrategyRestart ArgoRolloutStrategy = "restart" - - // ArgoRolloutStrategyRollout uses the rollout mechanism for Argo Rollouts. ArgoRolloutStrategyRollout ArgoRolloutStrategy = "rollout" ) // Config holds all configuration for Reloader. -// This struct is immutable after creation - all fields should be set during initialization. type Config struct { - // Annotations holds customizable annotation keys. - Annotations AnnotationConfig - - // AutoReloadAll enables automatic reload for all resources without requiring annotations. - AutoReloadAll bool - - // ReloadStrategy determines how workload restarts are triggered. - ReloadStrategy ReloadStrategy - - // ArgoRolloutsEnabled enables support for Argo Rollouts workload type. + Annotations AnnotationConfig + AutoReloadAll bool + ReloadStrategy ReloadStrategy ArgoRolloutsEnabled bool - - // ArgoRolloutStrategy determines how Argo Rollouts are updated. ArgoRolloutStrategy ArgoRolloutStrategy - - // ReloadOnCreate enables watching for resource creation events. - ReloadOnCreate bool - - // ReloadOnDelete enables watching for resource deletion events. - ReloadOnDelete bool - - // SyncAfterRestart triggers a sync operation after a restart is performed. - SyncAfterRestart bool - - // EnableHA enables high-availability mode with leader election. - EnableHA bool - - // WebhookURL is an optional URL to send notifications to instead of triggering reload. - WebhookURL string - - // Filtering configuration - IgnoredResources []string // ConfigMaps/Secrets to ignore (case-insensitive) - IgnoredWorkloads []string // Workload types to ignore - IgnoredNamespaces []string // Namespaces to ignore - NamespaceSelectors []labels.Selector - ResourceSelectors []labels.Selector - - // Raw selector strings (for backward compatibility with old code) + ReloadOnCreate bool + ReloadOnDelete bool + SyncAfterRestart bool + EnableHA bool + WebhookURL string + + IgnoredResources []string + IgnoredWorkloads []string + IgnoredNamespaces []string + NamespaceSelectors []labels.Selector + ResourceSelectors []labels.Selector NamespaceSelectorStrings []string ResourceSelectorStrings []string - // Logging configuration - LogFormat string // "json" or "" for default - LogLevel string // trace, debug, info, warning, error, fatal, panic - - // Metrics configuration - MetricsAddr string // Address to serve metrics on (default :9090) - - // Health probe configuration - HealthAddr string // Address to serve health probes on (default :8081) - - // Profiling configuration + LogFormat string + LogLevel string + MetricsAddr string + HealthAddr string EnablePProf bool PProfAddr string - // Alerting configuration - Alerting AlertingConfig - - // Leader election configuration - LeaderElection LeaderElectionConfig - - // WatchedNamespace limits watching to a specific namespace (empty = all namespaces) + Alerting AlertingConfig + LeaderElection LeaderElectionConfig WatchedNamespace string - - // SyncPeriod is the period for re-syncing watched resources - SyncPeriod time.Duration + SyncPeriod time.Duration } -// AnnotationConfig holds all customizable annotation keys. +// AnnotationConfig holds customizable annotation keys. type AnnotationConfig struct { - // Prefix is the base prefix for all annotations (default: reloader.stakater.com) - Prefix string - - // Auto annotations - Auto string // reloader.stakater.com/auto - ConfigmapAuto string // configmap.reloader.stakater.com/auto - SecretAuto string // secret.reloader.stakater.com/auto - - // Reload annotations (explicit resource names) - ConfigmapReload string // configmap.reloader.stakater.com/reload - SecretReload string // secret.reloader.stakater.com/reload - - // Exclude annotations - ConfigmapExclude string // configmaps.exclude.reloader.stakater.com/reload - SecretExclude string // secrets.exclude.reloader.stakater.com/reload - - // Ignore annotation - Ignore string // reloader.stakater.com/ignore - - // Search/Match annotations - Search string // reloader.stakater.com/search - Match string // reloader.stakater.com/match - - // Rollout strategy annotation - RolloutStrategy string // reloader.stakater.com/rollout-strategy - - // Pause annotations - PausePeriod string // deployment.reloader.stakater.com/pause-period - PausedAt string // deployment.reloader.stakater.com/paused-at - - // Last reloaded from annotation (set by Reloader) - LastReloadedFrom string // reloader.stakater.com/last-reloaded-from + Prefix string + Auto string + ConfigmapAuto string + SecretAuto string + ConfigmapReload string + SecretReload string + ConfigmapExclude string + SecretExclude string + Ignore string + Search string + Match string + RolloutStrategy string + PausePeriod string + PausedAt string + LastReloadedFrom string } // AlertingConfig holds configuration for alerting integrations. type AlertingConfig struct { - // Enabled enables alerting notifications on reload events. - Enabled bool - - // WebhookURL is the webhook URL to send alerts to. + Enabled bool WebhookURL string - - // Sink determines the alert format: "slack", "teams", "gchat", or "raw" (default). - Sink string - - // Proxy is an optional HTTP proxy for webhook requests. - Proxy string - - // Additional is optional context prepended to alert messages. + Sink string + Proxy string Additional string } // LeaderElectionConfig holds configuration for leader election. type LeaderElectionConfig struct { - LockName string - Namespace string - Identity string + LockName string + Namespace string + Identity string + LeaseDuration time.Duration + RenewDeadline time.Duration + RetryPeriod time.Duration + ReleaseOnCancel bool } // NewDefault creates a Config with default values. @@ -189,7 +122,11 @@ func NewDefault() *Config { PProfAddr: ":6060", Alerting: AlertingConfig{}, LeaderElection: LeaderElectionConfig{ - LockName: "stakater-reloader-lock", + LockName: "reloader-leader-election", + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, + ReleaseOnCancel: true, }, WatchedNamespace: "", SyncPeriod: 0, @@ -247,7 +184,6 @@ func (c *Config) IsNamespaceIgnored(namespace string) bool { return false } -// equalFold is a simple case-insensitive string comparison. func equalFold(s, t string) bool { if len(s) != len(t) { return false @@ -255,7 +191,6 @@ func equalFold(s, t string) bool { for i := 0; i < len(s); i++ { c1, c2 := s[i], t[i] if c1 != c2 { - // Convert to lowercase for comparison if 'A' <= c1 && c1 <= 'Z' { c1 += 'a' - 'A' } diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index ef423082c..f3fe49552 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -46,9 +46,21 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { fs.BoolVar(&cfg.SyncAfterRestart, "sync-after-restart", cfg.SyncAfterRestart, "Trigger sync operation after restart") - // High availability + // High availability / Leader election fs.BoolVar(&cfg.EnableHA, "enable-ha", cfg.EnableHA, "Enable high-availability mode with leader election") + fs.StringVar(&cfg.LeaderElection.LockName, "leader-election-id", cfg.LeaderElection.LockName, + "Name of the lease resource for leader election") + fs.StringVar(&cfg.LeaderElection.Namespace, "leader-election-namespace", cfg.LeaderElection.Namespace, + "Namespace for the leader election lease (defaults to pod namespace)") + fs.DurationVar(&cfg.LeaderElection.LeaseDuration, "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, + "Duration that non-leader candidates will wait before attempting to acquire leadership") + fs.DurationVar(&cfg.LeaderElection.RenewDeadline, "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, + "Duration that the acting leader will retry refreshing leadership before giving up") + fs.DurationVar(&cfg.LeaderElection.RetryPeriod, "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, + "Duration between leader election retries") + fs.BoolVar(&cfg.LeaderElection.ReleaseOnCancel, "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, + "Release the leader lock when the manager is stopped") // Webhook fs.StringVar(&cfg.WebhookURL, "webhook-url", cfg.WebhookURL, diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go index 18d1cc759..c1dc81a03 100644 --- a/internal/pkg/constants/constants.go +++ b/internal/pkg/constants/constants.go @@ -1,32 +1,7 @@ package constants +// Environment variable names for pod identity in HA mode. const ( - // DefaultHttpListenAddr is the default listening address for global http server - DefaultHttpListenAddr = ":9090" - - // ConfigmapEnvVarPostfix is a postfix for configmap envVar - ConfigmapEnvVarPostfix = "CONFIGMAP" - // SecretEnvVarPostfix is a postfix for secret envVar - SecretEnvVarPostfix = "SECRET" - // EnvVarPrefix is a Prefix for environment variable - EnvVarPrefix = "STAKATER_" - - // ReloaderAnnotationPrefix is a Prefix for all reloader annotations - ReloaderAnnotationPrefix = "reloader.stakater.com" - // LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload - LastReloadedFromAnnotation = "last-reloaded-from" - - // ReloadStrategyFlag The reload strategy flag name - ReloadStrategyFlag = "reload-strategy" - // EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart - EnvVarsReloadStrategy = "env-vars" - // AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart - AnnotationsReloadStrategy = "annotations" -) - -// Leadership election related consts -const ( - LockName string = "stakater-reloader-lock" PodNameEnv string = "POD_NAME" PodNamespaceEnv string = "POD_NAMESPACE" ) diff --git a/internal/pkg/constants/enums.go b/internal/pkg/constants/enums.go deleted file mode 100644 index 43fc60352..000000000 --- a/internal/pkg/constants/enums.go +++ /dev/null @@ -1,15 +0,0 @@ -package constants - -// Result is a status for deployment update -type Result int - -const ( - // Updated is returned when environment variable is created/updated - Updated Result = 1 + iota - // NotUpdated is returned when environment variable is found but had value equals to the new value - NotUpdated - // NoEnvVarFound is returned when no environment variable is found - NoEnvVarFound - // NoContainerFound is returned when no environment variable is found - NoContainerFound -) diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index cb78a7ba9..447348c00 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" @@ -33,6 +34,7 @@ type ConfigMapReconciler struct { Collectors *metrics.Collectors EventRecorder *events.Recorder WebhookClient *webhook.Client + Alerter alerting.Alerter initialized bool initOnce sync.Once @@ -131,6 +133,19 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) + + // Send alert notification + if err := r.Alerter.Send(ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: "ConfigMap", + ResourceName: cm.Name, + ResourceNamespace: cm.Namespace, + Timestamp: time.Now(), + }); err != nil { + log.Error(err, "failed to send alert") + } } } @@ -202,6 +217,19 @@ func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request if updated { r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "ConfigMap", req.Name) r.recordMetrics(true, req.Namespace) + + // Send alert notification + if err := r.Alerter.Send(ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: "ConfigMap", + ResourceName: req.Name, + ResourceNamespace: req.Namespace, + Timestamp: time.Now(), + }); err != nil { + log.Error(err, "failed to send alert") + } } } diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go deleted file mode 100644 index 15b2e0f17..000000000 --- a/internal/pkg/controller/controller.go +++ /dev/null @@ -1,282 +0,0 @@ -package controller - -import ( - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/kube" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/kubectl/pkg/scheme" - "k8s.io/utils/strings/slices" -) - -// Controller for checking events -type Controller struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - resource string - ignoredNamespaces util.List - collectors metrics.Collectors - recorder record.EventRecorder - namespaceSelector string - resourceSelector string -} - -// controllerInitialized flag determines whether controlled is being initialized -var secretControllerInitialized bool = false -var configmapControllerInitialized bool = false -var selectedNamespacesCache []string - -// NewController for initializing a Controller -func NewController( - client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, error) { - - if options.SyncAfterRestart { - secretControllerInitialized = true - configmapControllerInitialized = true - } - - c := Controller{ - client: client, - namespace: namespace, - ignoredNamespaces: ignoredNamespaces, - namespaceSelector: namespaceLabelSelector, - resourceSelector: resourceLabelSelector, - resource: resource, - } - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ - Interface: client.CoreV1().Events(""), - }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)}) - - queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]()) - - optionsModifier := func(options *metav1.ListOptions) { - if resource == "namespaces" { - options.LabelSelector = c.namespaceSelector - } else if len(c.resourceSelector) > 0 { - options.LabelSelector = c.resourceSelector - } else { - options.FieldSelector = fields.Everything().String() - } - } - - listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier) - - _, informer := cache.NewInformerWithOptions(cache.InformerOptions{ - ListerWatcher: listWatcher, - ObjectType: kube.ResourceMap[resource], - ResyncPeriod: 0, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: c.Add, - UpdateFunc: c.Update, - DeleteFunc: c.Delete, - }, - Indexers: cache.Indexers{}, - }) - c.informer = informer - c.queue = queue - c.collectors = collectors - c.recorder = recorder - - logrus.Infof("created controller for: %s", resource) - return &c, nil -} - -// Add function to add a new object to the queue in case of creating a resource -func (c *Controller) Add(obj interface{}) { - - switch object := obj.(type) { - case *v1.Namespace: - c.addSelectedNamespaceToCache(*object) - return - } - - if options.ReloadOnCreate == "true" { - if !c.resourceInIgnoredNamespace(obj) && c.resourceInSelectedNamespaces(obj) && secretControllerInitialized && configmapControllerInitialized { - c.queue.Add(handler.ResourceCreatedHandler{ - Resource: obj, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } - } -} - -func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool { - switch object := raw.(type) { - case *v1.ConfigMap: - return c.ignoredNamespaces.Contains(object.Namespace) - case *v1.Secret: - return c.ignoredNamespaces.Contains(object.Namespace) - } - return false -} - -func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool { - if len(c.namespaceSelector) == 0 { - return true - } - - switch object := raw.(type) { - case *v1.ConfigMap: - if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { - return true - } - case *v1.Secret: - if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { - return true - } - } - return false -} - -func (c *Controller) addSelectedNamespaceToCache(namespace v1.Namespace) { - selectedNamespacesCache = append(selectedNamespacesCache, namespace.GetName()) - logrus.Infof("added namespace to be watched: %s", namespace.GetName()) -} - -func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) { - for i, v := range selectedNamespacesCache { - if v == namespace.GetName() { - selectedNamespacesCache = append(selectedNamespacesCache[:i], selectedNamespacesCache[i+1:]...) - logrus.Infof("removed namespace from watch: %s", namespace.GetName()) - return - } - } -} - -// Update function to add an old object and a new object to the queue in case of updating a resource -func (c *Controller) Update(old interface{}, new interface{}) { - switch new.(type) { - case *v1.Namespace: - return - } - - if !c.resourceInIgnoredNamespace(new) && c.resourceInSelectedNamespaces(new) { - c.queue.Add(handler.ResourceUpdatedHandler{ - Resource: new, - OldResource: old, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } -} - -// Delete function to add an object to the queue in case of deleting a resource -func (c *Controller) Delete(old interface{}) { - - if options.ReloadOnDelete == "true" { - if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized { - c.queue.Add(handler.ResourceDeleteHandler{ - Resource: old, - Collectors: c.collectors, - Recorder: c.recorder, - }) - } - } - - switch object := old.(type) { - case *v1.Namespace: - c.removeSelectedNamespaceFromCache(*object) - return - } -} - -// Run function for controller which handles the queue -func (c *Controller) Run(threadiness int, stopCh chan struct{}) { - defer runtime.HandleCrash() - - // Let the workers stop when we are done - defer c.queue.ShutDown() - - go c.informer.Run(stopCh) - - // Wait for all involved caches to be synced, before processing items from the queue is started - if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) { - runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) - return - } - - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - <-stopCh - logrus.Infof("Stopping Controller") -} - -func (c *Controller) runWorker() { - // At this point the controller is fully initialized and we can start processing the resources - if c.resource == string(v1.ResourceSecrets) { - secretControllerInitialized = true - } else if c.resource == string(v1.ResourceConfigMaps) { - configmapControllerInitialized = true - } - - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - // Wait until there is a new item in the working queue - resourceHandler, quit := c.queue.Get() - if quit { - return false - } - // Tell the queue that we are done with processing this key. This unblocks the key for other workers - // This allows safe parallel processing because two events with the same key are never processed in - // parallel. - defer c.queue.Done(resourceHandler) - - // Invoke the method containing the business logic - err := resourceHandler.(handler.ResourceHandler).Handle() - // Handle the error if something went wrong during the execution of the business logic - c.handleErr(err, resourceHandler) - return true -} - -// handleErr checks if an error happened and makes sure we will retry later. -func (c *Controller) handleErr(err error, key interface{}) { - if err == nil { - // Forget about the #AddRateLimited history of the key on every successful synchronization. - // This ensures that future processing of updates for this key is not delayed because of - // an outdated error history. - c.queue.Forget(key) - return - } - - // This controller retries 5 times if something goes wrong. After that, it stops trying. - if c.queue.NumRequeues(key) < 5 { - logrus.Errorf("Error syncing events: %v", err) - - // Re-enqueue the key rate limited. Based on the rate limiter on the - // queue and the re-enqueue history, the key will be processed later again. - c.queue.AddRateLimited(key) - return - } - - c.queue.Forget(key) - // Report to an external entity that, even after several retries, we could not successfully process this key - runtime.HandleError(err) - logrus.Errorf("Dropping key out of the queue: %v", err) - logrus.Debugf("Dropping the key %q out of the queue: %v", key, err) -} diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go deleted file mode 100644 index 58580c795..000000000 --- a/internal/pkg/controller/controller_test.go +++ /dev/null @@ -1,2368 +0,0 @@ -//go:build integration -// +build integration - -package controller - -import ( - "context" - "os" - "testing" - "time" - - "github.com/stakater/Reloader/internal/pkg/constants" - - "github.com/stakater/Reloader/internal/pkg/metrics" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" -) - -var ( - clients = kube.GetClients() - namespace = "test-reloader-" + testutil.RandSeq(5) - configmapNamePrefix = "testconfigmap-reloader" - secretNamePrefix = "testsecret-reloader" - data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - collectors = metrics.NewCollectors() -) - -const ( - sleepDuration = 3 * time.Second -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(namespace, clients.KubernetesClient) - - logrus.Infof("Creating controller") - for k := range kube.ResourceMap { - if k == "namespaces" { - continue - } - c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - go c.Run(1, stop) - } - time.Sleep(sleepDuration) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(namespace, clients.KubernetesClient) - - os.Exit(retCode) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create pod annotation var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon creating the secret -func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon creating the secret -func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -func TestController_resourceInIgnoredNamespace(t *testing.T) { - type fields struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - ignoredNamespaces util.List - } - type args struct { - raw interface{} - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetConfigmap("system", "testcm", "test"), - }, - want: true, - }, - { - name: "TestSecretResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetSecret("system", "testsecret", "test"), - }, - want: true, - }, - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetConfigmap("some-other-namespace", "testcm", "test"), - }, - want: false, - }, - { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetSecret("some-other-namespace", "testsecret", "test"), - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Controller{ - client: tt.fields.client, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace, - ignoredNamespaces: tt.fields.ignoredNamespaces, - } - if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestController_resourceInNamespaceSelector(t *testing.T) { - type fields struct { - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace v1.Namespace - namespaceSelector string - } - type args struct { - raw interface{} - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "TestConfigMapResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"), - }, - want: true, - }, { - name: "TestConfigMapResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, - }, - }, - args: args{ - raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"), - }, - want: false, - }, - { - name: "TestSecretResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "testsecret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("not-selected-namespace", "secret", "test"), - }, - want: false, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyExists", - fields: fields{ - namespaceSelector: "select", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorValueIn", - fields: fields{ - namespaceSelector: "select in (select1, select2, select3)", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "select2", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist", - fields: fields{ - namespaceSelector: "!select2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorMultipleConditions", - fields: fields{ - namespaceSelector: "select,select2=this2,select3!=this4", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - "select3": "this3", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset() - namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) - logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) - - c := &Controller{ - client: fakeClient, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace.Name, - namespaceSelector: tt.fields.namespaceSelector, - } - - listOptions := metav1.ListOptions{} - listOptions.LabelSelector = tt.fields.namespaceSelector - namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions) - - for _, ns := range namespaces.Items { - c.addSelectedNamespaceToCache(ns) - } - - if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want) - } - - for _, ns := range namespaces.Items { - c.removeSelectedNamespaceFromCache(ns) - } - }) - } -} diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index d4914cdd3..2c887340a 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -3,10 +3,10 @@ package controller import ( "context" "fmt" - "time" argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" @@ -36,12 +36,10 @@ type ManagerOptions struct { } // NewManager creates a new controller-runtime manager with the given options. +// This follows controller-runtime and operator-sdk conventions for leader election. func NewManager(opts ManagerOptions) (ctrl.Manager, error) { cfg := opts.Config - - leaseDuration := 15 * time.Second - renewDeadline := 10 * time.Second - retryPeriod := 2 * time.Second + le := cfg.LeaderElection mgrOpts := ctrl.Options{ Scheme: runtimeScheme, @@ -49,11 +47,19 @@ func NewManager(opts ManagerOptions) (ctrl.Manager, error) { BindAddress: cfg.MetricsAddr, }, HealthProbeBindAddress: cfg.HealthAddr, - LeaderElection: cfg.EnableHA, - LeaderElectionID: "reloader-leader-election", - LeaseDuration: &leaseDuration, - RenewDeadline: &renewDeadline, - RetryPeriod: &retryPeriod, + + // Leader election configuration following operator-sdk best practices: + // - LeaderElection enables/disables leader election + // - LeaderElectionID is the name of the lease resource + // - LeaderElectionNamespace where the lease is created (defaults to pod namespace) + // - LeaderElectionReleaseOnCancel allows faster failover by releasing the lock on shutdown + LeaderElection: cfg.EnableHA, + LeaderElectionID: le.LockName, + LeaderElectionNamespace: le.Namespace, + LeaderElectionReleaseOnCancel: le.ReleaseOnCancel, + LeaseDuration: &le.LeaseDuration, + RenewDeadline: &le.RenewDeadline, + RetryPeriod: &le.RetryPeriod, } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOpts) @@ -61,6 +67,10 @@ func NewManager(opts ManagerOptions) (ctrl.Manager, error) { return nil, fmt.Errorf("creating manager: %w", err) } + // Add health and readiness probes. + // The healthz probe reports whether the manager is running. + // The readyz probe reports whether the manager is ready to serve requests. + // When leader election is enabled, readyz will fail until this instance becomes leader. if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { return nil, fmt.Errorf("setting up health check: %w", err) } @@ -76,6 +86,13 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col registry := workload.NewRegistry(cfg.ArgoRolloutsEnabled) reloadService := reload.NewService(cfg) eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) + pauseHandler := reload.NewPauseHandler(cfg) + + // Create alerter based on configuration + alerter := alerting.NewAlerter(cfg) + if cfg.Alerting.Enabled { + log.Info("alerting enabled", "sink", cfg.Alerting.Sink) + } // Create webhook client if URL is configured var webhookClient *webhook.Client @@ -84,6 +101,7 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col log.Info("webhook mode enabled", "url", cfg.WebhookURL) } + // Setup ConfigMap reconciler if !cfg.IsResourceIgnored("configmaps") { if err := (&ConfigMapReconciler{ Client: mgr.GetClient(), @@ -94,11 +112,13 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col Collectors: collectors, EventRecorder: eventRecorder, WebhookClient: webhookClient, + Alerter: alerter, }).SetupWithManager(mgr); err != nil { return fmt.Errorf("setting up configmap reconciler: %w", err) } } + // Setup Secret reconciler if !cfg.IsResourceIgnored("secrets") { if err := (&SecretReconciler{ Client: mgr.GetClient(), @@ -109,11 +129,36 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col Collectors: collectors, EventRecorder: eventRecorder, WebhookClient: webhookClient, + Alerter: alerter, }).SetupWithManager(mgr); err != nil { return fmt.Errorf("setting up secret reconciler: %w", err) } } + // Setup Namespace reconciler if namespace selectors are configured + if len(cfg.NamespaceSelectors) > 0 { + nsCache := NewNamespaceCache(true) + if err := (&NamespaceReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("namespace-reconciler"), + Config: cfg, + Cache: nsCache, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up namespace reconciler: %w", err) + } + log.Info("namespace reconciler enabled for label selector filtering") + } + + // Setup Deployment reconciler for pause handling + if err := (&DeploymentReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("deployment-reconciler"), + Config: cfg, + PauseHandler: pauseHandler, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up deployment reconciler: %w", err) + } + return nil } diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index 0d68ae131..8c5dbd2b2 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" @@ -33,6 +34,7 @@ type SecretReconciler struct { Collectors *metrics.Collectors EventRecorder *events.Recorder WebhookClient *webhook.Client + Alerter alerting.Alerter initialized bool initOnce sync.Once @@ -131,6 +133,19 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) + + // Send alert notification + if err := r.Alerter.Send(ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: "Secret", + ResourceName: secret.Name, + ResourceNamespace: secret.Namespace, + Timestamp: time.Now(), + }); err != nil { + log.Error(err, "failed to send alert") + } } } @@ -199,6 +214,19 @@ func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, l if updated { r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "Secret", req.Name) r.recordMetrics(true, req.Namespace) + + // Send alert notification + if err := r.Alerter.Send(ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: "Secret", + ResourceName: req.Name, + ResourceNamespace: req.Namespace, + Timestamp: time.Now(), + }); err != nil { + log.Error(err, "failed to send alert") + } } } diff --git a/internal/pkg/crypto/sha.go b/internal/pkg/crypto/sha.go deleted file mode 100644 index 043fc2273..000000000 --- a/internal/pkg/crypto/sha.go +++ /dev/null @@ -1,20 +0,0 @@ -package crypto - -import ( - "crypto/sha1" - "fmt" - "io" - - "github.com/sirupsen/logrus" -) - -// GenerateSHA generates SHA from string -func GenerateSHA(data string) string { - hasher := sha1.New() - _, err := io.WriteString(hasher, data) - if err != nil { - logrus.Errorf("Unable to write data in hash writer %v", err) - } - sha := hasher.Sum(nil) - return fmt.Sprintf("%x", sha) -} diff --git a/internal/pkg/crypto/sha_test.go b/internal/pkg/crypto/sha_test.go deleted file mode 100644 index 60d5af635..000000000 --- a/internal/pkg/crypto/sha_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package crypto - -import ( - "testing" -) - -// TestGenerateSHA generates the sha from given data and verifies whether it is correct or not -func TestGenerateSHA(t *testing.T) { - data := "www.stakater.com" - sha := "abd4ed82fb04548388a6cf3c339fd9dc84d275df" - result := GenerateSHA(data) - if result != sha { - t.Errorf("Failed to generate SHA") - } -} diff --git a/internal/pkg/handler/create.go b/internal/pkg/handler/create.go deleted file mode 100644 index fab737888..000000000 --- a/internal/pkg/handler/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package handler - -import ( - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" -) - -// ResourceCreatedHandler contains new objects -type ResourceCreatedHandler struct { - Resource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes the newly created resource -func (r ResourceCreatedHandler) Handle() error { - if r.Resource == nil { - logrus.Errorf("Resource creation handler received nil resource") - } else { - config, _ := r.GetConfig() - // Send webhook - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy) - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceCreatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} diff --git a/internal/pkg/handler/delete.go b/internal/pkg/handler/delete.go deleted file mode 100644 index 65c671e89..000000000 --- a/internal/pkg/handler/delete.go +++ /dev/null @@ -1,100 +0,0 @@ -package handler - -import ( - "fmt" - "slices" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/common" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" -) - -// ResourceDeleteHandler contains new objects -type ResourceDeleteHandler struct { - Resource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes resources being deleted -func (r ResourceDeleteHandler) Handle() error { - if r.Resource == nil { - logrus.Errorf("Resource delete handler received nil resource") - } else { - config, _ := r.GetConfig() - // Send webhook - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy) - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceDeleteHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} - -func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - return removePodAnnotations(upgradeFuncs, item, config, autoReload) - } - - return removeContainerEnvVars(upgradeFuncs, item, config, autoReload) -} - -func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - config.SHAValue = testutil.GetSHAfromEmptyData() - return updatePodAnnotations(upgradeFuncs, item, config, autoReload) -} - -func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - envVar := getEnvVarName(config.ResourceName, config.Type) - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - //remove if env var exists - if len(container.Env) > 0 { - index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool { - return envVariable.Name == envVar - }) - if index != -1 { - var patch []byte - if upgradeFuncs.SupportsPatch { - containers := upgradeFuncs.ContainersFunc(item) - containerIndex := slices.IndexFunc(containers, func(c v1.Container) bool { - return c.Name == container.Name - }) - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate, containerIndex, index) - } - - container.Env = append(container.Env[:index], container.Env[index+1:]...) - return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.JSONPatchType, Bytes: patch}} - } - } - - return InvokeStrategyResult{constants.NotUpdated, nil} -} diff --git a/internal/pkg/handler/handler.go b/internal/pkg/handler/handler.go deleted file mode 100644 index 1f5858e58..000000000 --- a/internal/pkg/handler/handler.go +++ /dev/null @@ -1,9 +0,0 @@ -package handler - -import "github.com/stakater/Reloader/pkg/common" - -// ResourceHandler handles the creation and update of resources -type ResourceHandler interface { - Handle() error - GetConfig() (common.Config, string) -} diff --git a/internal/pkg/handler/pause_deployment.go b/internal/pkg/handler/pause_deployment.go deleted file mode 100644 index 28d1b9efd..000000000 --- a/internal/pkg/handler/pause_deployment.go +++ /dev/null @@ -1,242 +0,0 @@ -package handler - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - app "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - patchtypes "k8s.io/apimachinery/pkg/types" -) - -// Keeps track of currently active timers -var activeTimers = make(map[string]*time.Timer) - -// Returns unique key for the activeTimers map -func getTimerKey(namespace, deploymentName string) string { - return fmt.Sprintf("%s/%s", namespace, deploymentName) -} - -// Checks if a deployment is currently paused -func IsPaused(deployment *app.Deployment) bool { - return deployment.Spec.Paused -} - -// Deployment paused by reloader ? -func IsPausedByReloader(deployment *app.Deployment) bool { - if IsPaused(deployment) { - pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation] - return pausedAtAnnotationValue != "" - } - return false -} - -// Returns the time, the deployment was paused by reloader, nil otherwise -func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) { - if !IsPausedByReloader(deployment) { - return nil, nil - } - - pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation] - parsedTime, err := time.Parse(time.RFC3339, pausedAtStr) - if err != nil { - return nil, err - } - - return &parsedTime, nil -} - -// ParsePauseDuration parses the pause interval value and returns a time.Duration -func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) { - pauseDuration, err := time.ParseDuration(pauseIntervalValue) - if err != nil { - logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err) - return 0, err - } - return pauseDuration, nil -} - -// Pauses a deployment for a specified duration and creates a timer to resume it -// after the specified duration -func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) { - deploymentName := deployment.Name - pauseDuration, err := ParsePauseDuration(pauseIntervalValue) - - if err != nil { - return nil, err - } - - if !IsPaused(deployment) { - logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration) - - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - pausePatch, err := CreatePausePatch() - if err != nil { - logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err) - return deployment, err - } - - err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch) - - if err != nil { - logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return deployment, err - } - - updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - - CreateResumeTimer(deployment, clients, namespace, pauseDuration) - return updatedDeployment, err - } - - if !IsPausedByReloader(deployment) { - logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace) - return deployment, nil - } - - // Deployment has already been paused by reloader, check for timer - logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace) - - timerKey := getTimerKey(namespace, deploymentName) - _, timerExists := activeTimers[timerKey] - - if !timerExists { - logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one", - deploymentName, namespace) - HandleMissingTimer(deployment, pauseDuration, clients, namespace) - } - return deployment, nil -} - -// Handles the case where missing timers for deployments that have been paused by reloader. -// Could occur after new leader election or reloader restart -func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) { - deploymentName := deployment.Name - pauseStartTime, err := GetPauseStartTime(deployment) - if err != nil { - logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately", - deploymentName, namespace, err) - ResumeDeployment(deployment, namespace, clients) - return - } - - if pauseStartTime == nil { - return - } - - elapsedPauseTime := time.Since(*pauseStartTime) - remainingPauseTime := pauseDuration - elapsedPauseTime - - if remainingPauseTime <= 0 { - logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately", - deploymentName, namespace) - ResumeDeployment(deployment, namespace, clients) - return - } - - logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s", - deploymentName, namespace, remainingPauseTime) - CreateResumeTimer(deployment, clients, namespace, remainingPauseTime) -} - -// CreateResumeTimer creates a timer to resume the deployment after the specified duration -func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) { - deploymentName := deployment.Name - timerKey := getTimerKey(namespace, deployment.Name) - - // Check if there's an existing timer for this deployment - if _, exists := activeTimers[timerKey]; exists { - logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation", - deploymentName, namespace) - return - } - - // Create and store the new timer - timer := time.AfterFunc(pauseDuration, func() { - ResumeDeployment(deployment, namespace, clients) - }) - - // Add the new timer to the map - activeTimers[timerKey] = timer - - logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s", - deploymentName, namespace, pauseDuration) -} - -// ResumeDeployment resumes a deployment that has been paused by reloader -func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) { - deploymentName := deployment.Name - - currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - - if err != nil { - logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return - } - - if !IsPausedByReloader(currentDeployment) { - logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace) - return - } - - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - resumePatch, err := CreateResumePatch() - if err != nil { - logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err) - return - } - - // Remove the timer - timerKey := getTimerKey(namespace, deploymentName) - if timer, exists := activeTimers[timerKey]; exists { - timer.Stop() - delete(activeTimers, timerKey) - logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace) - } - - err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch) - - if err != nil { - logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err) - return - } - - logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace) -} - -func CreatePausePatch() ([]byte, error) { - patchData := map[string]interface{}{ - "spec": map[string]interface{}{ - "paused": true, - }, - "metadata": map[string]interface{}{ - "annotations": map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339), - }, - }, - } - - return json.Marshal(patchData) -} - -func CreateResumePatch() ([]byte, error) { - patchData := map[string]interface{}{ - "spec": map[string]interface{}{ - "paused": false, - }, - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - options.PauseDeploymentTimeAnnotation: nil, - }, - }, - } - - return json.Marshal(patchData) -} diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go deleted file mode 100644 index c14cbfcbe..000000000 --- a/internal/pkg/handler/pause_deployment_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package handler - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - testclient "k8s.io/client-go/kubernetes/fake" -) - -func TestIsPaused(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - paused bool - }{ - { - name: "paused deployment", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - paused: true, - }, - { - name: "unpaused deployment", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - paused: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - result := IsPaused(test.deployment) - assert.Equal(t, test.paused, result) - }) - } -} - -func TestIsPausedByReloader(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - pausedByReloader bool - }{ - { - name: "paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339), - }, - }, - }, - pausedByReloader: true, - }, - { - name: "not paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{}, - }, - }, - pausedByReloader: false, - }, - { - name: "not paused", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - pausedByReloader: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pausedByReloader := IsPausedByReloader(test.deployment) - assert.Equal(t, test.pausedByReloader, pausedByReloader) - }) - } -} - -func TestGetPauseStartTime(t *testing.T) { - now := time.Now() - nowStr := now.Format(time.RFC3339) - - tests := []struct { - name string - deployment *appsv1.Deployment - pausedByReloader bool - expectedStartTime time.Time - }{ - { - name: "valid pause time", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: nowStr, - }, - }, - }, - pausedByReloader: true, - expectedStartTime: now, - }, - { - name: "not paused by reloader", - deployment: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - pausedByReloader: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actualStartTime, err := GetPauseStartTime(test.deployment) - - assert.NoError(t, err) - - if !test.pausedByReloader { - assert.Nil(t, actualStartTime) - } else { - assert.NotNil(t, actualStartTime) - assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second) - } - }) - } -} - -func TestParsePauseDuration(t *testing.T) { - tests := []struct { - name string - pauseIntervalValue string - expectedDuration time.Duration - invalidDuration bool - }{ - { - name: "valid duration", - pauseIntervalValue: "10s", - expectedDuration: 10 * time.Second, - invalidDuration: false, - }, - { - name: "valid minute duration", - pauseIntervalValue: "2m", - expectedDuration: 2 * time.Minute, - invalidDuration: false, - }, - { - name: "invalid duration", - pauseIntervalValue: "invalid", - expectedDuration: 0, - invalidDuration: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actualDuration, err := ParsePauseDuration(test.pauseIntervalValue) - - if test.invalidDuration { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expectedDuration, actualDuration) - } - }) - } -} - -func TestHandleMissingTimerSimple(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - shouldBePaused bool // Should be unpaused after HandleMissingTimer ? - }{ - { - name: "deployment paused by reloader, pause period has expired and no timer", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-1", - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339), - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - shouldBePaused: false, - }, - { - name: "deployment paused by reloader, pause period expires in the future and no timer", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-2", - Annotations: map[string]string{ - options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339), - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - shouldBePaused: true, - }, - } - - for _, test := range tests { - // Clean up any timers at the end of the test - defer func() { - for key, timer := range activeTimers { - timer.Stop() - delete(activeTimers, key) - } - }() - - t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() - clients := kube.Clients{ - KubernetesClient: fakeClient, - } - - _, err := fakeClient.AppsV1().Deployments("default").Create( - context.TODO(), - test.deployment, - metav1.CreateOptions{}) - assert.NoError(t, err, "Expected no error when creating deployment") - - pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation]) - HandleMissingTimer(test.deployment, pauseDuration, clients, "default") - - updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{}) - - assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused, - "Deployment should have correct paused state after timer expiration") - - if test.shouldBePaused { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.NotEmpty(t, pausedAtAnnotationValue, - "Pause annotation should be present and contain a value when deployment is paused") - } - }) - } -} - -func TestPauseDeployment(t *testing.T) { - tests := []struct { - name string - deployment *appsv1.Deployment - expectedError bool - expectedPaused bool - expectedAnnotation bool // Should have pause time annotation - pauseInterval string - }{ - { - name: "deployment without pause annotation", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Annotations: map[string]string{}, - }, - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - expectedError: true, - expectedPaused: false, - expectedAnnotation: false, - pauseInterval: "", - }, - { - name: "deployment already paused but not by reloader", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Annotations: map[string]string{ - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: true, - }, - }, - expectedError: false, - expectedPaused: true, - expectedAnnotation: false, - pauseInterval: "5m", - }, - { - name: "deployment unpaused that needs to be paused by reloader", - deployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment-3", - Annotations: map[string]string{ - options.PauseDeploymentAnnotation: "5m", - }, - }, - Spec: appsv1.DeploymentSpec{ - Paused: false, - }, - }, - expectedError: false, - expectedPaused: true, - expectedAnnotation: true, - pauseInterval: "5m", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() - clients := kube.Clients{ - KubernetesClient: fakeClient, - } - - _, err := fakeClient.AppsV1().Deployments("default").Create( - context.TODO(), - test.deployment, - metav1.CreateOptions{}) - assert.NoError(t, err, "Expected no error when creating deployment") - - updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval) - if test.expectedError { - assert.Error(t, err, "Expected an error pausing the deployment") - return - } else { - assert.NoError(t, err, "Expected no error pausing the deployment") - } - - assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused, - "Deployment should have correct paused state after pause") - - if test.expectedAnnotation { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.NotEmpty(t, pausedAtAnnotationValue, - "Pause annotation should be present and contain a value when deployment is paused") - } else { - pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation] - assert.Empty(t, pausedAtAnnotationValue, - "Pause annotation should not be present when deployment has not been paused by reloader") - } - }) - } -} - -// Simple helper function for test cases -func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*appsv1.Deployment, error) { - for _, deployment := range deployments { - accessor, err := meta.Accessor(deployment) - if err != nil { - return nil, fmt.Errorf("error getting accessor for item: %v", err) - } - if accessor.GetName() == deploymentName { - deploymentObj, ok := deployment.(*appsv1.Deployment) - if !ok { - return nil, fmt.Errorf("failed to cast to Deployment") - } - return deploymentObj, nil - } - } - return nil, fmt.Errorf("deployment '%s' not found", deploymentName) -} diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go deleted file mode 100644 index ae0bb1e21..000000000 --- a/internal/pkg/handler/update.go +++ /dev/null @@ -1,53 +0,0 @@ -package handler - -import ( - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" -) - -// ResourceUpdatedHandler contains updated objects -type ResourceUpdatedHandler struct { - Resource interface{} - OldResource interface{} - Collectors metrics.Collectors - Recorder record.EventRecorder -} - -// Handle processes the updated resource -func (r ResourceUpdatedHandler) Handle() error { - if r.Resource == nil || r.OldResource == nil { - logrus.Errorf("Resource update handler received nil resource") - } else { - config, oldSHAData := r.GetConfig() - if config.SHAValue != oldSHAData { - // Send a webhook if update - if options.WebhookUrl != "" { - return sendUpgradeWebhook(config, options.WebhookUrl) - } - // process resource based on its type - return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy) - } - } - return nil -} - -// GetConfig gets configurations containing SHA, annotations, namespace and resource name -func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap)) - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) - } - return config, oldSHAData -} diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go deleted file mode 100644 index 6f185f1e8..000000000 --- a/internal/pkg/handler/upgrade.go +++ /dev/null @@ -1,619 +0,0 @@ -package handler - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - - "github.com/parnurzeal/gorequest" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - alert "github.com/stakater/Reloader/internal/pkg/alerts" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - app "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" -) - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment -func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetDeploymentItem, - ItemsFunc: callbacks.GetDeploymentItems, - AnnotationsFunc: callbacks.GetDeploymentAnnotations, - PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations, - ContainersFunc: callbacks.GetDeploymentContainers, - InitContainersFunc: callbacks.GetDeploymentInitContainers, - UpdateFunc: callbacks.UpdateDeployment, - PatchFunc: callbacks.PatchDeployment, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetDeploymentVolumes, - ResourceType: "Deployment", - SupportsPatch: true, - } -} - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob -func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetCronJobItem, - ItemsFunc: callbacks.GetCronJobItems, - AnnotationsFunc: callbacks.GetCronJobAnnotations, - PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations, - ContainersFunc: callbacks.GetCronJobContainers, - InitContainersFunc: callbacks.GetCronJobInitContainers, - UpdateFunc: callbacks.CreateJobFromCronjob, - PatchFunc: callbacks.PatchCronJob, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetCronJobVolumes, - ResourceType: "CronJob", - SupportsPatch: false, - } -} - -// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob -func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetJobItem, - ItemsFunc: callbacks.GetJobItems, - AnnotationsFunc: callbacks.GetJobAnnotations, - PodAnnotationsFunc: callbacks.GetJobPodAnnotations, - ContainersFunc: callbacks.GetJobContainers, - InitContainersFunc: callbacks.GetJobInitContainers, - UpdateFunc: callbacks.ReCreateJobFromjob, - PatchFunc: callbacks.PatchJob, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetJobVolumes, - ResourceType: "Job", - SupportsPatch: false, - } -} - -// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset -func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetDaemonSetItem, - ItemsFunc: callbacks.GetDaemonSetItems, - AnnotationsFunc: callbacks.GetDaemonSetAnnotations, - PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations, - ContainersFunc: callbacks.GetDaemonSetContainers, - InitContainersFunc: callbacks.GetDaemonSetInitContainers, - UpdateFunc: callbacks.UpdateDaemonSet, - PatchFunc: callbacks.PatchDaemonSet, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetDaemonSetVolumes, - ResourceType: "DaemonSet", - SupportsPatch: true, - } -} - -// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet -func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetStatefulSetItem, - ItemsFunc: callbacks.GetStatefulSetItems, - AnnotationsFunc: callbacks.GetStatefulSetAnnotations, - PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations, - ContainersFunc: callbacks.GetStatefulSetContainers, - InitContainersFunc: callbacks.GetStatefulSetInitContainers, - UpdateFunc: callbacks.UpdateStatefulSet, - PatchFunc: callbacks.PatchStatefulSet, - PatchTemplatesFunc: callbacks.GetPatchTemplates, - VolumesFunc: callbacks.GetStatefulSetVolumes, - ResourceType: "StatefulSet", - SupportsPatch: true, - } -} - -// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout -func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs { - return callbacks.RollingUpgradeFuncs{ - ItemFunc: callbacks.GetRolloutItem, - ItemsFunc: callbacks.GetRolloutItems, - AnnotationsFunc: callbacks.GetRolloutAnnotations, - PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations, - ContainersFunc: callbacks.GetRolloutContainers, - InitContainersFunc: callbacks.GetRolloutInitContainers, - UpdateFunc: callbacks.UpdateRollout, - PatchFunc: callbacks.PatchRollout, - PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} }, - VolumesFunc: callbacks.GetRolloutVolumes, - ResourceType: "Rollout", - SupportsPatch: false, - } -} - -func sendUpgradeWebhook(config common.Config, webhookUrl string) error { - logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'", - config.ResourceName, config.Type, config.Namespace, webhookUrl) - - body, errs := sendWebhook(webhookUrl) - if errs != nil { - // return the first error - return errs[0] - } else { - logrus.Info(body) - } - - return nil -} - -func sendWebhook(url string) (string, []error) { - request := gorequest.New() - resp, _, err := request.Post(url).Send(`{"webhook":"update successful"}`).End() - if err != nil { - // the reloader seems to retry automatically so no retry logic added - return "", err - } - defer func() { - closeErr := resp.Body.Close() - if closeErr != nil { - logrus.Error(closeErr) - } - }() - var buffer bytes.Buffer - _, bufferErr := io.Copy(&buffer, resp.Body) - if bufferErr != nil { - logrus.Error(bufferErr) - } - return buffer.String(), nil -} - -func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error { - clients := kube.GetClients() - - // Get ignored workload types to avoid listing resources without RBAC permissions - ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList() - if err != nil { - logrus.Errorf("Failed to parse ignored workload types: %v", err) - ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails - } - - err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - - // Only process CronJobs if they are not ignored - if !ignoredWorkloadTypes.Contains("cronjobs") { - err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - // Only process Jobs if they are not ignored - if !ignoredWorkloadTypes.Contains("jobs") { - err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - - if options.IsArgoRollouts == "true" { - err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke) - if err != nil { - return err - } - } - - return nil -} - -func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error { - err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy) - if err != nil { - logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err) - } - return err -} - -// PerformAction invokes the deployment if there is any change in configmap or secret data -func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - - for _, item := range items { - err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error { - return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource) - }) - if err != nil { - return err - } - } - - return nil -} - -func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error { - var lastError error - fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - err := fn(fetchResource) - fetchResource = true - switch { - case err == nil: - return true, nil - case apierrors.IsConflict(err): - lastError = err - return false, nil - default: - return false, err - } - }) - if wait.Interrupted(err) { - err = lastError - } - return err -} - -func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error { - accessor, err := meta.Accessor(resource) - if err != nil { - return err - } - - resourceName := accessor.GetName() - if fetchResource { - resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace) - if err != nil { - return err - } - } - annotations := upgradeFuncs.AnnotationsFunc(resource) - podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource) - result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions()) - - if !result.ShouldReload { - logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace) - return nil - } - - strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload) - - if strategyResult.Result != constants.Updated { - return nil - } - - // find correct annotation and update the resource - pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation] - - if foundPauseInterval { - deployment, ok := resource.(*app.Deployment) - if !ok { - logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation) - } else { - _, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval) - if err != nil { - logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err) - return err - } - } - } - - if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil { - err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes) - } else { - err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource) - } - - if err != nil { - message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err) - logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err) - - collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc() - collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc() - if recorder != nil { - recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message) - } - return err - } else { - message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace) - message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace) - - logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace) - - collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc() - collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc() - alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD") - if recorder != nil { - recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message) - } - if ok && alert_on_reload == "true" { - msg := fmt.Sprintf( - "Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*", - config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace) - alert.SendWebhookAlert(msg) - } - } - - return nil -} - -func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string { - for i := range volumes { - switch mountType { - case constants.ConfigmapEnvVarPostfix: - if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName { - return volumes[i].Name - } - - if volumes[i].Projected != nil { - for j := range volumes[i].Projected.Sources { - if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName { - return volumes[i].Name - } - } - } - case constants.SecretEnvVarPostfix: - if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName { - return volumes[i].Name - } - - if volumes[i].Projected != nil { - for j := range volumes[i].Projected.Sources { - if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName { - return volumes[i].Name - } - } - } - } - } - - return "" -} - -func getContainerWithVolumeMount(containers []v1.Container, volumeMountName string) *v1.Container { - for i := range containers { - volumeMounts := containers[i].VolumeMounts - for j := range volumeMounts { - if volumeMounts[j].Name == volumeMountName { - return &containers[i] - } - } - } - - return nil -} - -func getContainerWithEnvReference(containers []v1.Container, resourceName string, resourceType string) *v1.Container { - for i := range containers { - envs := containers[i].Env - for j := range envs { - envVarSource := envs[j].ValueFrom - if envVarSource != nil { - if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.Name == resourceName { - return &containers[i] - } else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.Name == resourceName { - return &containers[i] - } - } - } - - envsFrom := containers[i].EnvFrom - for j := range envsFrom { - if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.Name == resourceName { - return &containers[i] - } else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.Name == resourceName { - return &containers[i] - } - } - } - return nil -} - -func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container { - volumes := upgradeFuncs.VolumesFunc(item) - containers := upgradeFuncs.ContainersFunc(item) - initContainers := upgradeFuncs.InitContainersFunc(item) - var container *v1.Container - // Get the volumeMountName to find volumeMount in container - volumeMountName := getVolumeMountName(volumes, config.Type, config.ResourceName) - // Get the container with mounted configmap/secret - if volumeMountName != "" { - container = getContainerWithVolumeMount(containers, volumeMountName) - if container == nil && len(initContainers) > 0 { - container = getContainerWithVolumeMount(initContainers, volumeMountName) - if container != nil { - // if configmap/secret is being used in init container then return the first Pod container to save reloader env - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - } else if container != nil { - return container - } - } - - // Get the container with referenced secret or configmap as env var - container = getContainerWithEnvReference(containers, config.ResourceName, config.Type) - if container == nil && len(initContainers) > 0 { - container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type) - if container != nil { - // if configmap/secret is being used in init container then return the first Pod container to save reloader env - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - } - - // Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload - if container == nil && !autoReload { - if len(containers) > 0 { - return &containers[0] - } - // No containers available, return nil to avoid crash - return nil - } - - return container -} - -type Patch struct { - Type patchtypes.PatchType - Bytes []byte -} - -type InvokeStrategyResult struct { - Result constants.Result - Patch *Patch -} - -type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult - -func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - return updatePodAnnotations(upgradeFuncs, item, config, autoReload) - } - return updateContainerEnvVars(upgradeFuncs, item, config, autoReload) -} - -func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - // Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout - // Note: the data on this struct is purely informational and is not used for future updates - reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name}) - annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs) - if err != nil { - logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err) - return InvokeStrategyResult{constants.NotUpdated, nil} - } - - // Copy the all annotations to the item's annotations - pa := upgradeFuncs.PodAnnotationsFunc(item) - if pa == nil { - return InvokeStrategyResult{constants.NotUpdated, nil} - } - - for k, v := range annotations { - pa[k] = v - } - - return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}} -} - -func getReloaderAnnotationKey() string { - return fmt.Sprintf("%s/%s", - constants.ReloaderAnnotationPrefix, - constants.LastReloadedFromAnnotation, - ) -} - -func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) { - if target == nil { - return nil, nil, errors.New("target is required") - } - - // Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the - // resource that caused the last invokeReloadStrategy. - // Intentionally only storing the last item in order to keep - // the generated annotations as small as possible. - annotations := make(map[string]string) - lastReloadedResourceName := getReloaderAnnotationKey() - - lastReloadedResource, err := json.Marshal(target) - if err != nil { - return nil, nil, err - } - - annotations[lastReloadedResourceName] = string(lastReloadedResource) - - var patch []byte - if upgradeFuncs.SupportsPatch { - escapedValue, err := jsonEscape(annotations[lastReloadedResourceName]) - if err != nil { - return nil, nil, err - } - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().AnnotationTemplate, lastReloadedResourceName, escapedValue) - } - - return annotations, patch, nil -} - -func getEnvVarName(resourceName string, typeName string) string { - return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName -} - -func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult { - envVar := getEnvVarName(config.ResourceName, config.Type) - container := getContainerUsingResource(upgradeFuncs, item, config, autoReload) - - if container == nil { - return InvokeStrategyResult{constants.NoContainerFound, nil} - } - - //update if env var exists - updateResult := updateEnvVar(container, envVar, config.SHAValue) - - // if no existing env var exists lets create one - if updateResult == constants.NoEnvVarFound { - e := v1.EnvVar{ - Name: envVar, - Value: config.SHAValue, - } - container.Env = append(container.Env, e) - updateResult = constants.Updated - } - - var patch []byte - if upgradeFuncs.SupportsPatch { - patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().EnvVarTemplate, container.Name, envVar, config.SHAValue) - } - - return InvokeStrategyResult{updateResult, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}} -} - -func updateEnvVar(container *v1.Container, envVar string, shaData string) constants.Result { - envs := container.Env - for j := range envs { - if envs[j].Name == envVar { - if envs[j].Value != shaData { - envs[j].Value = shaData - return constants.Updated - } - return constants.NotUpdated - } - } - - return constants.NoEnvVarFound -} - -func jsonEscape(toEscape string) (string, error) { - bytes, err := json.Marshal(toEscape) - if err != nil { - return "", err - } - escaped := string(bytes) - return escaped[1 : len(escaped)-1], nil -} diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go deleted file mode 100644 index 9a0e94587..000000000 --- a/internal/pkg/handler/upgrade_test.go +++ /dev/null @@ -1,4288 +0,0 @@ -package handler - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/prometheus/client_golang/prometheus" - promtestutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - testclient "k8s.io/client-go/kubernetes/fake" -) - -var ( - clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()} - - arsNamespace = "test-handler-" + testutil.RandSeq(5) - arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) - arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) - - ersNamespace = "test-handler-" + testutil.RandSeq(5) - ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) -) - -func TestMain(m *testing.M) { - - // Creating namespaces - testutil.CreateNamespace(arsNamespace, clients.KubernetesClient) - testutil.CreateNamespace(ersNamespace, clients.KubernetesClient) - - logrus.Infof("Setting up the annotation reload strategy test resources") - setupArs() - logrus.Infof("Setting up the env-var reload strategy test resources") - setupErs() - - logrus.Infof("Running Testcases") - retCode := m.Run() - - logrus.Infof("tearing down the annotation reload strategy test resources") - teardownArs() - logrus.Infof("tearing down the env-var reload strategy test resources") - teardownErs() - - os.Exit(retCode) -} - -func setupArs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - // Patch with ignore annotation - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace) - _, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedConfigMapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsConfigmapWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsSecretWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated, - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with configmap and without annotations - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(clients.KubernetesClient, arsConfigMapWithNonAnnotatedDeployment, arsNamespace, map[string]string{}) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and without annotation creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretWithSecretAutoAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsConfigmapWithConfigMapAutoAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and exclude secret annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and exclude configmap annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsConfigmapWithExcludeConfigMapAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithPodAnnotations, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithBothAnnotations, arsNamespace, true) - - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } -} - -func teardownArs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and exclude secret annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and exclude configmap annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment with pause annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting Secret used with exclude secret annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with exclude configmap annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting configmap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient) - -} - -func setupErs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret exclude annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap exclude annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace) - _, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedConfigMapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersConfigmapWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersSecretWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated, - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretWithSecretAutoAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapAutoAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretWithSecretExcludeAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapExcludeAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithPodAnnotations, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithBothAnnotations, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } -} - -func teardownErs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret exclude annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap exclude annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment for testing pausing deployments - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotation: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotation: %v", err) - } - - // Deleting Secret used with secret exclude annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret exclude annotation: %v", err) - } - - // Deleting ConfigMap used with configmap exclude annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err) - } - - // Deleting ConfigMap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient) - -} - -func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config { - ns := ersNamespace - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - ns = arsNamespace - } - - return common.Config{ - Namespace: ns, - ResourceName: name, - SHAValue: shaData, - Annotation: annotation, - TypedAutoAnnotation: typedAutoAnnotation, - Type: resourceType, - } -} - -func getCollectors() metrics.Collectors { - return metrics.NewCollectors() -} - -var labelSucceeded = prometheus.Labels{"success": "true"} -var labelFailed = prometheus.Labels{"success": "false"} - -func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - config.SHAValue = testutil.GetSHAfromEmptyData() - removed := testutil.VerifyResourceAnnotationUpdate(clients, config, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate) - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationButWithAutoReloadAllUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - options.AutoReloadAll = true - defer func() { options.AutoReloadAll = false }() - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated+"-different", - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithExcludeSecretAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithExcludeSecretAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithExcludeConfigMapAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == arsConfigmapWithPodAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == arsConfigmapWithBothAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased") - } -} -func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been (ERS)") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased (ERS)") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased (ERS)") - } -} - -func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - removed := testutil.VerifyResourceEnvVarRemoved(clients, config, envVarPostfix, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate) - - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.JSONPatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - assert.Equal(t, 2, patchCalled) - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated+"-different", - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretExcludeAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretExcludeAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment that had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigMapExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapExcludeAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == ersConfigmapWithPodAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == ersConfigmapWithBothAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestPausingDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace) -} - -func TestPausingDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace) -} - -func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) { - options.ReloadStrategy = reloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - // Wait for deployment to have paused-at annotation - logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName) - err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second) - if err != nil { - t.Errorf("Failed to wait for deployment paused-at annotation: %v", err) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment has been paused") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err := isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment has not been paused") - } - - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com") - config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment is still paused") - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment should still be paused") - } - - logrus.Infof("Verifying deployment has been resumed after pause interval") - time.Sleep(11 * time.Second) - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if deploymentPaused { - t.Errorf("Deployment should have been resumed after pause interval") - } -} - -func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) { - deployment, err := FindDeploymentByName(deployments, deploymentName) - if err != nil { - return false, err - } - return IsPaused(deployment), nil -} - -// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation -func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error { - start := time.Now() - - for time.Since(start) < timeout { - items := deploymentFuncs.ItemsFunc(clients, namespace) - deployment, err := FindDeploymentByName(items, deploymentName) - if err == nil { - annotations := deployment.GetAnnotations() - if annotations != nil { - if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists { - return nil - } - } - } - - time.Sleep(100 * time.Millisecond) - } - - return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName) -} - -// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers -// This simulates the scenario where Argo Rollouts with workloadRef return empty containers -func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object { - rollout := &argorolloutv1alpha1.Rollout{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: argorolloutv1alpha1.RolloutSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{}, // Empty containers slice - InitContainers: []v1.Container{}, // Empty init containers slice - Volumes: []v1.Volume{}, // Empty volumes slice - }, - }, - }, - } - var obj runtime.Object = rollout - return &obj -} - -// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions -func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) { - namespace := "test-namespace" - resourceName := "test-configmap" - - // Use real Argo Rollout functions but mock the containers function - rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs() - originalContainersFunc := rolloutFuncs.ContainersFunc - originalInitContainersFunc := rolloutFuncs.InitContainersFunc - - // Override to return empty containers (simulating workloadRef scenario) - rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - - // Restore original functions after test - defer func() { - rolloutFuncs.ContainersFunc = originalContainersFunc - rolloutFuncs.InitContainersFunc = originalInitContainersFunc - }() - - // Use proper Argo Rollout object instead of Pod - mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout") - - config := common.Config{ - Namespace: namespace, - ResourceName: resourceName, - Type: constants.ConfigmapEnvVarPostfix, - SHAValue: "test-sha", - } - - // Test both autoReload scenarios using subtests as suggested by Felix - for _, autoReload := range []bool{true, false} { - t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) { - // This tests the actual fix in the context of Argo Rollouts - result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload) - - if result != nil { - t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result) - } - }) - } -} diff --git a/internal/pkg/leadership/leadership.go b/internal/pkg/leadership/leadership.go deleted file mode 100644 index f8c85bc15..000000000 --- a/internal/pkg/leadership/leadership.go +++ /dev/null @@ -1,107 +0,0 @@ -package leadership - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/controller" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" -) - -var ( - // Used for liveness probe - m sync.Mutex - healthy bool = true -) - -func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock { - return &resourcelock.LeaseLock{ - LeaseMeta: v1.ObjectMeta{ - Name: lockName, - Namespace: namespace, - }, - Client: client, - LockConfig: resourcelock.ResourceLockConfig{ - Identity: podname, - }, - } -} - -// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown. -func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) { - // Construct channels for the controllers to use - var stopChannels []chan struct{} - for i := 0; i < len(controllers); i++ { - stop := make(chan struct{}) - stopChannels = append(stopChannels, stop) - } - - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - ReleaseOnCancel: true, - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(c context.Context) { - logrus.Info("became leader, starting controllers") - runControllers(controllers, stopChannels) - }, - OnStoppedLeading: func() { - logrus.Info("no longer leader, shutting down") - stopControllers(stopChannels) - cancel() - m.Lock() - defer m.Unlock() - healthy = false - }, - OnNewLeader: func(current_id string) { - if current_id == id { - logrus.Info("still the leader!") - return - } - logrus.Infof("new leader is %s", current_id) - }, - }, - }) -} - -func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) { - for i, c := range controllers { - c := c - go c.Run(1, stopChannels[i]) - } -} - -func stopControllers(stopChannels []chan struct{}) { - for _, c := range stopChannels { - close(c) - } -} - -// Healthz sets up the liveness probe endpoint. If leadership election is -// enabled and a replica stops leading the liveness probe will fail and the -// kubelet will restart the container. -func SetupLivenessEndpoint() { - http.HandleFunc("/live", healthz) -} - -func healthz(w http.ResponseWriter, req *http.Request) { - m.Lock() - defer m.Unlock() - if healthy { - if i, err := w.Write([]byte("alive")); err != nil { - logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err) - } - return - } - - w.WriteHeader(http.StatusInternalServerError) -} diff --git a/internal/pkg/leadership/leadership_test.go b/internal/pkg/leadership/leadership_test.go deleted file mode 100644 index d850e7a9e..000000000 --- a/internal/pkg/leadership/leadership_test.go +++ /dev/null @@ -1,216 +0,0 @@ -//go:build integration -// +build integration - -package leadership - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient) - - os.Exit(retCode) -} - -func TestHealthz(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response := httptest.NewRecorder() - - healthz(response, request) - got := response.Code - want := 200 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } - - // Have the liveness probe serve a 500 - healthy = false - - request, err = http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response = httptest.NewRecorder() - - healthz(response, request) - got = response.Code - want = 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } -} - -// TestRunLeaderElection validates that the liveness endpoint serves 500 when -// leadership election fails -func TestRunLeaderElection(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - - lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace) - - go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{}) - - // Liveness probe should be serving OK - request, err := http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response := httptest.NewRecorder() - - healthz(response, request) - got := response.Code - want := 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } - - // Cancel the leader election context, so leadership is released and - // live endpoint serves 500 - cancel() - - request, err = http.NewRequest(http.MethodGet, "/live", nil) - if err != nil { - t.Fatalf(("failed to create request")) - } - - response = httptest.NewRecorder() - - healthz(response, request) - got = response.Code - want = 500 - - if got != want { - t.Fatalf("got: %q, want: %q", got, want) - } -} - -// TestRunLeaderElectionWithControllers tests that leadership election works -// with real controllers and that on context cancellation the controllers stop -// running. -func TestRunLeaderElectionWithControllers(t *testing.T) { - t.Logf("Creating controller") - var controllers []*controller.Controller - for k := range kube.ResourceMap { - c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, "", "", metrics.NewCollectors()) - if err != nil { - logrus.Fatalf("%s", err) - } - - controllers = append(controllers, c) - } - time.Sleep(3 * time.Second) - - lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace) - - ctx, cancel := context.WithCancel(context.TODO()) - - // Start running leadership election, this also starts the controllers - go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers) - time.Sleep(3 * time.Second) - - // Create some stuff and do a thing - configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com") - if err != nil { - t.Fatalf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true) - if err != nil { - t.Fatalf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Fatalf("Configmap was not updated") - } - time.Sleep(3 * time.Second) - - // Verifying deployment update - logrus.Infof("Verifying pod envvars has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: testutil.Namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Fatalf("Deployment was not updated") - } - time.Sleep(testutil.SleepDuration) - - // Cancel the leader election context, so leadership is released - logrus.Info("shutting down controller from test") - cancel() - time.Sleep(5 * time.Second) - - // Updating configmap again - updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new") - if updateErr != nil { - t.Fatalf("Configmap was not updated") - } - - // Verifying that the deployment was not updated as leadership has been lost - logrus.Infof("Verifying pod envvars has not been updated") - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new") - config = common.Config{ - Namespace: testutil.Namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs() - updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Fatalf("Deployment was updated") - } - - // Deleting deployment - err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(testutil.SleepDuration) -} diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go index 09db4e8e9..f0e22f126 100644 --- a/internal/pkg/metadata/metadata.go +++ b/internal/pkg/metadata/metadata.go @@ -10,7 +10,7 @@ import ( "runtime" "time" - "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -199,30 +199,29 @@ func (m *MetaInfo) ToConfigMap() *corev1.ConfigMap { type Publisher struct { client client.Client cfg *config.Config + log logr.Logger } // NewPublisher creates a new Publisher. -func NewPublisher(c client.Client, cfg *config.Config) *Publisher { +func NewPublisher(c client.Client, cfg *config.Config, log logr.Logger) *Publisher { return &Publisher{ client: c, cfg: cfg, + log: log, } } // Publish creates or updates the metadata ConfigMap. -// Returns an error if the operation fails, or nil on success. -// If RELOADER_NAMESPACE is not set, this is a no-op. func (p *Publisher) Publish(ctx context.Context) error { namespace := os.Getenv(EnvReloaderNamespace) if namespace == "" { - logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") + p.log.Info("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") return nil } metaInfo := NewMetaInfo(p.cfg) configMap := metaInfo.ToConfigMap() - // Try to get existing ConfigMap existing := &corev1.ConfigMap{} err := p.client.Get(ctx, client.ObjectKey{ Name: ConfigMapName, @@ -233,34 +232,36 @@ func (p *Publisher) Publish(ctx context.Context) error { if !errors.IsNotFound(err) { return fmt.Errorf("failed to get existing meta info configmap: %w", err) } - // ConfigMap doesn't exist, create it - logrus.Info("Creating meta info configmap") + p.log.Info("Creating meta info configmap") if err := p.client.Create(ctx, configMap, client.FieldOwner(FieldManager)); err != nil { return fmt.Errorf("failed to create meta info configmap: %w", err) } - logrus.Info("Meta info configmap created successfully") + p.log.Info("Meta info configmap created successfully") return nil } - // ConfigMap exists, update it - logrus.Info("Meta info configmap already exists, updating it") + p.log.Info("Meta info configmap already exists, updating it") existing.Data = configMap.Data existing.Labels = configMap.Labels if err := p.client.Update(ctx, existing, client.FieldOwner(FieldManager)); err != nil { return fmt.Errorf("failed to update meta info configmap: %w", err) } - logrus.Info("Meta info configmap updated successfully") + p.log.Info("Meta info configmap updated successfully") return nil } // PublishMetaInfoConfigMap is a convenience function that creates a Publisher and calls Publish. -// This provides a simple API similar to the v1 PublishMetaInfoConfigmap function. -func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config) error { - publisher := NewPublisher(c, cfg) +func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config, log logr.Logger) error { + publisher := NewPublisher(c, cfg, log) return publisher.Publish(ctx) } -// toJSON marshals data to JSON string. Returns empty string on error. +// CreateOrUpdate creates or updates the metadata ConfigMap using the provided client. +func CreateOrUpdate(c client.Client, cfg *config.Config, log logr.Logger) error { + ctx := context.Background() + return PublishMetaInfoConfigMap(ctx, c, cfg, log) +} + func toJSON(data interface{}) string { jsonData, err := json.Marshal(data) if err != nil { @@ -269,8 +270,6 @@ func toJSON(data interface{}) string { return string(jsonData) } -// parseUTCTime parses a time string in RFC3339 format. -// Returns zero time if value is empty or parsing fails. func parseUTCTime(value string) time.Time { if value == "" { return time.Time{} diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go index 94fce1838..b001da6ad 100644 --- a/internal/pkg/metadata/metadata_test.go +++ b/internal/pkg/metadata/metadata_test.go @@ -6,6 +6,7 @@ import ( "os" "testing" + "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -13,6 +14,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +// testLogger returns a no-op logger for testing. +func testLogger() logr.Logger { + return logr.Discard() +} + func TestNewBuildInfo(t *testing.T) { // Set build variables for testing oldVersion := Version @@ -166,7 +172,7 @@ func TestPublisher_Publish_NoNamespace(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() cfg := config.NewDefault() - publisher := NewPublisher(fakeClient, cfg) + publisher := NewPublisher(fakeClient, cfg, testLogger()) err := publisher.Publish(context.Background()) if err != nil { @@ -188,7 +194,7 @@ func TestPublisher_Publish_CreateNew(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() cfg := config.NewDefault() - publisher := NewPublisher(fakeClient, cfg) + publisher := NewPublisher(fakeClient, cfg, testLogger()) ctx := context.Background() err := publisher.Publish(ctx) @@ -233,7 +239,7 @@ func TestPublisher_Publish_UpdateExisting(t *testing.T) { Build() cfg := config.NewDefault() - publisher := NewPublisher(fakeClient, cfg) + publisher := NewPublisher(fakeClient, cfg, testLogger()) ctx := context.Background() err := publisher.Publish(ctx) @@ -277,7 +283,7 @@ func TestPublishMetaInfoConfigMap(t *testing.T) { cfg := config.NewDefault() ctx := context.Background() - err := PublishMetaInfoConfigMap(ctx, fakeClient, cfg) + err := PublishMetaInfoConfigMap(ctx, fakeClient, cfg, testLogger()) if err != nil { t.Errorf("PublishMetaInfoConfigMap() error = %v", err) } diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go deleted file mode 100644 index 0f99be8af..000000000 --- a/internal/pkg/options/flags.go +++ /dev/null @@ -1,92 +0,0 @@ -package options - -import "github.com/stakater/Reloader/internal/pkg/constants" - -type ArgoRolloutStrategy int - -const ( - // RestartStrategy is the annotation value for restart strategy for rollouts - RestartStrategy ArgoRolloutStrategy = iota - // RolloutStrategy is the annotation value for rollout strategy for rollouts - RolloutStrategy -) - -var ( - // Auto reload all resources when their corresponding configmaps/secrets are updated - AutoReloadAll = false - // ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in - // configmaps specified by name - ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload" - // SecretUpdateOnChangeAnnotation is an annotation to detect changes in - // secrets specified by name - SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload" - // ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps - ReloaderAutoAnnotation = "reloader.stakater.com/auto" - // IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps - IgnoreResourceAnnotation = "reloader.stakater.com/ignore" - // ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps - ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto" - // SecretReloaderAutoAnnotation is an annotation to detect changes in secrets - SecretReloaderAutoAnnotation = "secret.reloader.stakater.com/auto" - // ConfigmapReloaderAutoAnnotation is a comma separated list of configmaps that excludes detecting changes on cms - ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload" - // SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets - SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload" - // AutoSearchAnnotation is an annotation to detect changes in - // configmaps or triggers with the SearchMatchAnnotation - AutoSearchAnnotation = "reloader.stakater.com/search" - // SearchMatchAnnotation is an annotation to tag secrets to be found with - // AutoSearchAnnotation - SearchMatchAnnotation = "reloader.stakater.com/match" - // RolloutStrategyAnnotation is an annotation to define rollout update strategy - RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" - // PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after - // a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration - // only positive values are allowed - PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period" - // Annotation set by reloader to indicate that the deployment has been paused - PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at" - // LogFormat is the log format to use (json, or empty string for default) - LogFormat = "" - // LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic) - LogLevel = "" - // IsArgoRollouts Adds support for argo rollouts - IsArgoRollouts = "false" - // ReloadStrategy Specify the update strategy - ReloadStrategy = constants.EnvVarsReloadStrategy - // ReloadOnCreate Adds support to watch create events - ReloadOnCreate = "false" - // ReloadOnDelete Adds support to watch delete events - ReloadOnDelete = "false" - SyncAfterRestart = false - // EnableHA adds support for running multiple replicas via leadership election - EnableHA = false - // Url to send a request to instead of triggering a reload - WebhookUrl = "" - // ResourcesToIgnore is a list of resources to ignore when watching for changes - ResourcesToIgnore = []string{} - // WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes - WorkloadTypesToIgnore = []string{} - // NamespacesToIgnore is a list of namespace names to ignore when watching for changes - NamespacesToIgnore = []string{} - // NamespaceSelectors is a list of namespace selectors to watch for changes - NamespaceSelectors = []string{} - // ResourceSelectors is a list of resource selectors to watch for changes - ResourceSelectors = []string{} - // EnablePProf enables pprof for profiling - EnablePProf = false - // PProfAddr is the address to start pprof server on - // Default is :6060 - PProfAddr = ":6060" -) - -func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy { - switch s { - case "restart": - return RestartStrategy - case "rollout": - fallthrough - default: - return RolloutStrategy - } -} diff --git a/internal/pkg/reload/hasher.go b/internal/pkg/reload/hasher.go index 3839fd1a9..0d259ac3a 100644 --- a/internal/pkg/reload/hasher.go +++ b/internal/pkg/reload/hasher.go @@ -13,7 +13,6 @@ import ( ) // Hasher computes content hashes for ConfigMaps and Secrets. -// The hash is used to detect changes and trigger workload reloads. type Hasher struct{} // NewHasher creates a new Hasher instance. @@ -22,7 +21,6 @@ func NewHasher() *Hasher { } // HashConfigMap computes a SHA1 hash of the ConfigMap's data and binaryData. -// The hash is deterministic - same content always produces the same hash. func (h *Hasher) HashConfigMap(cm *corev1.ConfigMap) string { if cm == nil { return h.computeSHA("") @@ -31,7 +29,6 @@ func (h *Hasher) HashConfigMap(cm *corev1.ConfigMap) string { } // HashSecret computes a SHA1 hash of the Secret's data. -// The hash is deterministic - same content always produces the same hash. func (h *Hasher) HashSecret(secret *corev1.Secret) string { if secret == nil { return h.computeSHA("") @@ -39,8 +36,6 @@ func (h *Hasher) HashSecret(secret *corev1.Secret) string { return h.hashSecretData(secret.Data) } -// hashConfigMapData computes a hash from ConfigMap data and binary data. -// Keys are sorted to ensure deterministic output. func (h *Hasher) hashConfigMapData(data map[string]string, binaryData map[string][]byte) string { values := make([]string, 0, len(data)+len(binaryData)) @@ -49,7 +44,6 @@ func (h *Hasher) hashConfigMapData(data map[string]string, binaryData map[string } for k, v := range binaryData { - // Binary data is base64 encoded for consistent hashing values = append(values, k+"="+base64.StdEncoding.EncodeToString(v)) } @@ -57,13 +51,10 @@ func (h *Hasher) hashConfigMapData(data map[string]string, binaryData map[string return h.computeSHA(strings.Join(values, ";")) } -// hashSecretData computes a hash from Secret data. -// Keys are sorted to ensure deterministic output. func (h *Hasher) hashSecretData(data map[string][]byte) string { values := make([]string, 0, len(data)) for k, v := range data { - // Secret data is stored as raw bytes, not base64 encoded values = append(values, k+"="+string(v)) } @@ -71,7 +62,6 @@ func (h *Hasher) hashSecretData(data map[string][]byte) string { return h.computeSHA(strings.Join(values, ";")) } -// computeSHA generates a SHA1 hash from a string. func (h *Hasher) computeSHA(data string) string { hasher := sha1.New() _, _ = io.WriteString(hasher, data) @@ -79,7 +69,6 @@ func (h *Hasher) computeSHA(data string) string { } // EmptyHash returns an empty string to signal resource deletion. -// This triggers env var removal when using the env-vars strategy. func (h *Hasher) EmptyHash() string { return "" } diff --git a/internal/pkg/reload/matcher.go b/internal/pkg/reload/matcher.go index 51bd7d6c5..6f56d8ef2 100644 --- a/internal/pkg/reload/matcher.go +++ b/internal/pkg/reload/matcher.go @@ -19,13 +19,9 @@ const ( // MatchResult contains the result of checking if a workload should be reloaded. type MatchResult struct { - // ShouldReload indicates whether the workload should be reloaded. ShouldReload bool - // AutoReload indicates if this is an auto-reload (vs explicit annotation). - // This affects which container to target for env var injection. - AutoReload bool - // Reason provides a human-readable explanation of the decision. - Reason string + AutoReload bool + Reason string } // Matcher determines whether a workload should be reloaded based on annotations. @@ -40,32 +36,16 @@ func NewMatcher(cfg *config.Config) *Matcher { // MatchInput contains all the information needed to determine if a reload should occur. type MatchInput struct { - // ResourceName is the name of the ConfigMap or Secret that changed. - ResourceName string - // ResourceNamespace is the namespace of the ConfigMap or Secret. - ResourceNamespace string - // ResourceType is whether this is a ConfigMap or Secret. - ResourceType ResourceType - // ResourceAnnotations are the annotations on the ConfigMap or Secret. + ResourceName string + ResourceNamespace string + ResourceType ResourceType ResourceAnnotations map[string]string - // WorkloadAnnotations are the annotations on the workload (Deployment, etc.). WorkloadAnnotations map[string]string - // PodAnnotations are the annotations on the pod template. - PodAnnotations map[string]string + PodAnnotations map[string]string } // ShouldReload determines if a workload should be reloaded based on its annotations. -// -// The matching logic follows this precedence (BUG FIX: explicit annotations checked first): -// 1. If the resource has the ignore annotation, skip it -// 2. If the resource is in the exclude list for this workload, skip it -// 3. If explicit reload annotation matches the resource name, reload (not auto) -// 4. If search annotation is enabled and resource has match annotation, reload (auto) -// 5. If auto annotation is "true", reload (auto) -// 6. If typed auto annotation is "true", reload (auto) -// 7. If AutoReloadAll is enabled and no explicit "false" annotations, reload (auto) func (m *Matcher) ShouldReload(input MatchInput) MatchResult { - // Check resource-level ignore annotation if m.isResourceIgnored(input.ResourceAnnotations) { return MatchResult{ ShouldReload: false, @@ -73,10 +53,8 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } - // Determine which annotations to use (workload or pod template) annotations := m.selectAnnotations(input) - // Check if resource is excluded if m.isResourceExcluded(input.ResourceName, input.ResourceType, annotations) { return MatchResult{ ShouldReload: false, @@ -84,8 +62,6 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } - // Check explicit reload annotation (e.g., configmap.reloader.stakater.com/reload: "my-config") - // BUG FIX: Check this BEFORE auto annotations to ensure explicit references take precedence if m.matchesExplicitAnnotation(input.ResourceName, input.ResourceType, annotations) { return MatchResult{ ShouldReload: true, @@ -94,7 +70,6 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } - // Check search/match pattern if m.matchesSearchPattern(input.ResourceAnnotations, annotations) { return MatchResult{ ShouldReload: true, @@ -103,7 +78,6 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } - // Check auto annotations if m.matchesAutoAnnotation(input.ResourceType, annotations) { return MatchResult{ ShouldReload: true, @@ -112,7 +86,6 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } - // Check global auto-reload-all setting if m.matchesAutoReloadAll(input.ResourceType, annotations) { return MatchResult{ ShouldReload: true, @@ -127,7 +100,6 @@ func (m *Matcher) ShouldReload(input MatchInput) MatchResult { } } -// isResourceIgnored checks if the resource has the ignore annotation set to true. func (m *Matcher) isResourceIgnored(resourceAnnotations map[string]string) bool { if resourceAnnotations == nil { return false @@ -135,44 +107,34 @@ func (m *Matcher) isResourceIgnored(resourceAnnotations map[string]string) bool return resourceAnnotations[m.cfg.Annotations.Ignore] == "true" } -// selectAnnotations determines which set of annotations to use for matching. -// If workload annotations don't have relevant annotations, fall back to pod annotations. func (m *Matcher) selectAnnotations(input MatchInput) map[string]string { - // Check if any relevant annotation exists on workload annotations if m.hasRelevantAnnotations(input.WorkloadAnnotations, input.ResourceType) { return input.WorkloadAnnotations } - // Fall back to pod annotations if m.hasRelevantAnnotations(input.PodAnnotations, input.ResourceType) { return input.PodAnnotations } - // Default to workload annotations even if empty return input.WorkloadAnnotations } -// hasRelevantAnnotations checks if the annotations contain any reload-related annotation. func (m *Matcher) hasRelevantAnnotations(annotations map[string]string, resourceType ResourceType) bool { if annotations == nil { return false } - // Check for explicit annotation explicitAnn := m.getExplicitAnnotation(resourceType) if _, ok := annotations[explicitAnn]; ok { return true } - // Check for search annotation if _, ok := annotations[m.cfg.Annotations.Search]; ok { return true } - // Check for auto annotation if _, ok := annotations[m.cfg.Annotations.Auto]; ok { return true } - // Check for typed auto annotation typedAutoAnn := m.getTypedAutoAnnotation(resourceType) if _, ok := annotations[typedAutoAnn]; ok { return true @@ -181,7 +143,6 @@ func (m *Matcher) hasRelevantAnnotations(annotations map[string]string, resource return false } -// isResourceExcluded checks if the resource is in the exclude list. func (m *Matcher) isResourceExcluded(resourceName string, resourceType ResourceType, annotations map[string]string) bool { if annotations == nil { return false @@ -209,7 +170,6 @@ func (m *Matcher) isResourceExcluded(resourceName string, resourceType ResourceT return false } -// matchesExplicitAnnotation checks if the resource name matches the explicit reload annotation. func (m *Matcher) matchesExplicitAnnotation(resourceName string, resourceType ResourceType, annotations map[string]string) bool { if annotations == nil { return false @@ -221,16 +181,13 @@ func (m *Matcher) matchesExplicitAnnotation(resourceName string, resourceType Re return false } - // Support comma-separated list of resource names with regex matching for _, value := range strings.Split(annotationValue, ",") { value = strings.TrimSpace(value) if value == "" { continue } - // Support regex patterns re, err := regexp.Compile("^" + value + "$") if err != nil { - // If regex is invalid, fall back to exact match if value == resourceName { return true } @@ -244,7 +201,6 @@ func (m *Matcher) matchesExplicitAnnotation(resourceName string, resourceType Re return false } -// matchesSearchPattern checks if the search/match pattern is enabled. func (m *Matcher) matchesSearchPattern(resourceAnnotations, workloadAnnotations map[string]string) bool { if workloadAnnotations == nil || resourceAnnotations == nil { return false @@ -259,29 +215,24 @@ func (m *Matcher) matchesSearchPattern(resourceAnnotations, workloadAnnotations return ok && matchValue == "true" } -// matchesAutoAnnotation checks if auto reload is enabled via annotations. func (m *Matcher) matchesAutoAnnotation(resourceType ResourceType, annotations map[string]string) bool { if annotations == nil { return false } - // Check generic auto annotation if annotations[m.cfg.Annotations.Auto] == "true" { return true } - // Check typed auto annotation typedAutoAnn := m.getTypedAutoAnnotation(resourceType) return annotations[typedAutoAnn] == "true" } -// matchesAutoReloadAll checks if global auto-reload-all is enabled. func (m *Matcher) matchesAutoReloadAll(resourceType ResourceType, annotations map[string]string) bool { if !m.cfg.AutoReloadAll { return false } - // If auto annotation is explicitly set to false, don't auto-reload if annotations != nil { if annotations[m.cfg.Annotations.Auto] == "false" { return false @@ -295,7 +246,6 @@ func (m *Matcher) matchesAutoReloadAll(resourceType ResourceType, annotations ma return true } -// getExplicitAnnotation returns the explicit reload annotation for the resource type. func (m *Matcher) getExplicitAnnotation(resourceType ResourceType) string { switch resourceType { case ResourceTypeConfigMap: @@ -307,7 +257,6 @@ func (m *Matcher) getExplicitAnnotation(resourceType ResourceType) string { } } -// getTypedAutoAnnotation returns the typed auto annotation for the resource type. func (m *Matcher) getTypedAutoAnnotation(resourceType ResourceType) string { switch resourceType { case ResourceTypeConfigMap: diff --git a/internal/pkg/reload/matcher_test.go b/internal/pkg/reload/matcher_test.go index 5e4f8b8f7..8595d0abf 100644 --- a/internal/pkg/reload/matcher_test.go +++ b/internal/pkg/reload/matcher_test.go @@ -92,7 +92,7 @@ func TestMatcher_ShouldReload(t *testing.T) { ResourceType: ResourceTypeConfigMap, ResourceAnnotations: nil, WorkloadAnnotations: map[string]string{ - "reloader.stakater.com/auto": "true", + "reloader.stakater.com/auto": "true", "configmap.reloader.stakater.com/reload": "external-config", }, PodAnnotations: nil, @@ -277,7 +277,7 @@ func TestMatcher_ShouldReload(t *testing.T) { ResourceType: ResourceTypeSecret, ResourceAnnotations: nil, WorkloadAnnotations: map[string]string{ - "reloader.stakater.com/auto": "true", + "reloader.stakater.com/auto": "true", "secrets.exclude.reloader.stakater.com/reload": "my-secret", }, PodAnnotations: nil, @@ -403,7 +403,7 @@ func TestMatcher_BugFix_AutoDoesNotIgnoreExplicit(t *testing.T) { ResourceType: ResourceTypeConfigMap, ResourceAnnotations: nil, WorkloadAnnotations: map[string]string{ - "reloader.stakater.com/auto": "true", // Enables auto-reload + "reloader.stakater.com/auto": "true", // Enables auto-reload "configmap.reloader.stakater.com/reload": "external-config", // Explicit list }, PodAnnotations: nil, diff --git a/internal/pkg/reload/pause_test.go b/internal/pkg/reload/pause_test.go index 9c7992d0f..74e8162b2 100644 --- a/internal/pkg/reload/pause_test.go +++ b/internal/pkg/reload/pause_test.go @@ -65,10 +65,10 @@ func TestPauseHandler_GetPausePeriod(t *testing.T) { handler := NewPauseHandler(cfg) tests := []struct { - name string - workload workload.WorkloadAccessor - wantPeriod time.Duration - wantErr bool + name string + workload workload.WorkloadAccessor + wantPeriod time.Duration + wantErr bool }{ { name: "valid pause period", diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go index 450590027..b2cce7017 100644 --- a/internal/pkg/reload/predicate_test.go +++ b/internal/pkg/reload/predicate_test.go @@ -128,10 +128,10 @@ func TestNamespaceFilterPredicate_Generic(t *testing.T) { func TestLabelSelectorPredicate_Create(t *testing.T) { tests := []struct { - name string - selector string - objectLabels map[string]string - wantAllow bool + name string + selector string + objectLabels map[string]string + wantAllow bool }{ { name: "match single label", @@ -355,38 +355,38 @@ func TestCombinedFiltering(t *testing.T) { labelPredicate := LabelSelectorPredicate(cfg) tests := []struct { - name string - namespace string - labels map[string]string - wantNSAllow bool + name string + namespace string + labels map[string]string + wantNSAllow bool wantLabelAllow bool }{ { - name: "allowed namespace and matching labels", - namespace: "default", - labels: map[string]string{"managed": "true"}, - wantNSAllow: true, + name: "allowed namespace and matching labels", + namespace: "default", + labels: map[string]string{"managed": "true"}, + wantNSAllow: true, wantLabelAllow: true, }, { - name: "allowed namespace but non-matching labels", - namespace: "default", - labels: map[string]string{"managed": "false"}, - wantNSAllow: true, + name: "allowed namespace but non-matching labels", + namespace: "default", + labels: map[string]string{"managed": "false"}, + wantNSAllow: true, wantLabelAllow: false, }, { - name: "ignored namespace with matching labels", - namespace: "kube-system", - labels: map[string]string{"managed": "true"}, - wantNSAllow: false, + name: "ignored namespace with matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "true"}, + wantNSAllow: false, wantLabelAllow: true, }, { - name: "ignored namespace and non-matching labels", - namespace: "kube-system", - labels: map[string]string{"managed": "false"}, - wantNSAllow: false, + name: "ignored namespace and non-matching labels", + namespace: "kube-system", + labels: map[string]string{"managed": "false"}, + wantNSAllow: false, wantLabelAllow: false, }, } diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index d0f681d08..6d7825fe1 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -3,13 +3,11 @@ package reload import ( "context" "encoding/json" - "fmt" "time" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) // Service orchestrates the reload logic for ConfigMaps and Secrets. @@ -69,18 +67,15 @@ type ReloadDecision struct { } // ProcessConfigMap evaluates all workloads to determine which should be reloaded. -// This method does not modify any workloads - it only returns decisions. func (s *Service) ProcessConfigMap(change ConfigMapChange, workloads []workload.WorkloadAccessor) []ReloadDecision { if change.ConfigMap == nil { return nil } - // Check if we should process this event type if !s.shouldProcessEvent(change.EventType) { return nil } - // Compute hash hash := s.hasher.HashConfigMap(change.ConfigMap) if change.EventType == EventTypeDelete { hash = s.hasher.EmptyHash() @@ -97,18 +92,15 @@ func (s *Service) ProcessConfigMap(change ConfigMapChange, workloads []workload. } // ProcessSecret evaluates all workloads to determine which should be reloaded. -// This method does not modify any workloads - it only returns decisions. func (s *Service) ProcessSecret(change SecretChange, workloads []workload.WorkloadAccessor) []ReloadDecision { if change.Secret == nil { return nil } - // Check if we should process this event type if !s.shouldProcessEvent(change.EventType) { return nil } - // Compute hash hash := s.hasher.HashSecret(change.Secret) if change.EventType == EventTypeDelete { hash = s.hasher.EmptyHash() @@ -124,7 +116,6 @@ func (s *Service) ProcessSecret(change SecretChange, workloads []workload.Worklo ) } -// processResource processes a resource change against all workloads. func (s *Service) processResource( resourceName string, resourceNamespace string, @@ -136,17 +127,14 @@ func (s *Service) processResource( var decisions []ReloadDecision for _, wl := range workloads { - // Skip workloads in different namespaces if wl.GetNamespace() != resourceNamespace { continue } - // Check if workload should be ignored based on type if s.cfg.IsWorkloadIgnored(string(wl.Kind())) { continue } - // Check if workload uses this resource (via volumes or env) var usesResource bool switch resourceType { case ResourceTypeConfigMap: @@ -155,7 +143,6 @@ func (s *Service) processResource( usesResource = wl.UsesSecret(resourceName) } - // Build match input input := MatchInput{ ResourceName: resourceName, ResourceNamespace: resourceNamespace, @@ -165,11 +152,8 @@ func (s *Service) processResource( PodAnnotations: wl.GetPodTemplateAnnotations(), } - // Check if we should reload matchResult := s.matcher.ShouldReload(input) - // For auto-reload, the workload must actually use the resource - // For explicit annotation, the user explicitly requested it shouldReload := matchResult.ShouldReload if matchResult.AutoReload && !usesResource { shouldReload = false @@ -187,7 +171,6 @@ func (s *Service) processResource( return decisions } -// shouldProcessEvent checks if the event type should be processed. func (s *Service) shouldProcessEvent(eventType EventType) bool { switch eventType { case EventTypeCreate: @@ -202,8 +185,6 @@ func (s *Service) shouldProcessEvent(eventType EventType) bool { } // ApplyReload applies the reload strategy to a workload. -// This modifies the workload in-place but does not persist the changes. -// Returns true if changes were made, false otherwise. func (s *Service) ApplyReload( ctx context.Context, wl workload.WorkloadAccessor, @@ -213,7 +194,6 @@ func (s *Service) ApplyReload( hash string, autoReload bool, ) (bool, error) { - // Find the target container container := s.findTargetContainer(wl, resourceName, resourceType, autoReload) input := StrategyInput{ @@ -226,13 +206,11 @@ func (s *Service) ApplyReload( AutoReload: autoReload, } - // Apply the strategy-specific changes updated, err := s.strategy.Apply(input) if err != nil { return false, err } - // Always set the attribution annotation regardless of strategy if updated { s.setAttributionAnnotation(wl, resourceName, resourceType, namespace, hash, container) } @@ -240,8 +218,6 @@ func (s *Service) ApplyReload( return updated, nil } -// setAttributionAnnotation sets the last-reloaded-from annotation on the pod template. -// This is always set regardless of the reload strategy for audit purposes. func (s *Service) setAttributionAnnotation( wl workload.WorkloadAccessor, resourceName string, @@ -266,16 +242,12 @@ func (s *Service) setAttributionAnnotation( sourceJSON, err := json.Marshal(source) if err != nil { - // Non-fatal: skip annotation if marshaling fails return } wl.SetPodTemplateAnnotation(s.cfg.Annotations.LastReloadedFrom, string(sourceJSON)) } -// findTargetContainer finds the container to target for the reload. -// For auto-reload, it finds the container that uses the resource. -// For explicit annotation, it returns the first container. func (s *Service) findTargetContainer( wl workload.WorkloadAccessor, resourceName string, @@ -287,7 +259,6 @@ func (s *Service) findTargetContainer( return nil } - // For explicit annotation, return the first container if !autoReload { return &containers[0] } @@ -295,40 +266,31 @@ func (s *Service) findTargetContainer( volumes := wl.GetVolumes() initContainers := wl.GetInitContainers() - // For auto-reload, find the container that uses the resource - // Check volumes first volumeName := s.findVolumeUsingResource(volumes, resourceName, resourceType) if volumeName != "" { container := s.findContainerWithVolumeMount(containers, volumeName) if container != nil { return container } - // Check init containers container = s.findContainerWithVolumeMount(initContainers, volumeName) if container != nil { - // Return the first regular container for init container refs return &containers[0] } } - // Check env references container := s.findContainerWithEnvRef(containers, resourceName, resourceType) if container != nil { return container } - // Check init container env references container = s.findContainerWithEnvRef(initContainers, resourceName, resourceType) if container != nil { - // Return the first regular container for init container refs return &containers[0] } - // Default to first container return &containers[0] } -// findVolumeUsingResource finds a volume that uses the given resource. func (s *Service) findVolumeUsingResource(volumes []corev1.Volume, resourceName string, resourceType ResourceType) string { for _, vol := range volumes { switch resourceType { @@ -359,7 +321,6 @@ func (s *Service) findVolumeUsingResource(volumes []corev1.Volume, resourceName return "" } -// findContainerWithVolumeMount finds a container that mounts the given volume. func (s *Service) findContainerWithVolumeMount(containers []corev1.Container, volumeName string) *corev1.Container { for i := range containers { for _, mount := range containers[i].VolumeMounts { @@ -371,10 +332,8 @@ func (s *Service) findContainerWithVolumeMount(containers []corev1.Container, vo return nil } -// findContainerWithEnvRef finds a container that references the resource via env. func (s *Service) findContainerWithEnvRef(containers []corev1.Container, resourceName string, resourceType ResourceType) *corev1.Container { for i := range containers { - // Check env vars for _, env := range containers[i].Env { if env.ValueFrom == nil { continue @@ -391,7 +350,6 @@ func (s *Service) findContainerWithEnvRef(containers []corev1.Container, resourc } } - // Check envFrom for _, envFrom := range containers[i].EnvFrom { switch resourceType { case ResourceTypeConfigMap: @@ -412,36 +370,3 @@ func (s *Service) findContainerWithEnvRef(containers []corev1.Container, resourc func (s *Service) Hasher() *Hasher { return s.hasher } - -// Matcher returns the matcher used by this service. -func (s *Service) Matcher() *Matcher { - return s.matcher -} - -// Strategy returns the strategy used by this service. -func (s *Service) Strategy() Strategy { - return s.strategy -} - -// ListWorkloads lists all workloads in the given namespace. -// If namespace is empty, lists workloads in all namespaces. -func ListWorkloads(ctx context.Context, c client.Client, namespace string, registry *workload.Registry) ([]workload.WorkloadAccessor, error) { - var workloads []workload.WorkloadAccessor - - for _, kind := range registry.SupportedKinds() { - list, err := listWorkloadsByKind(ctx, c, namespace, kind) - if err != nil { - return nil, fmt.Errorf("listing %s: %w", kind, err) - } - workloads = append(workloads, list...) - } - - return workloads, nil -} - -// listWorkloadsByKind lists workloads of a specific kind. -func listWorkloadsByKind(ctx context.Context, c client.Client, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { - // This will be implemented by the controller using the appropriate list functions - // For now, return empty slice as the controller will handle this - return nil, nil -} diff --git a/internal/pkg/reload/strategy.go b/internal/pkg/reload/strategy.go index 9a147fe2e..8912e1cb7 100644 --- a/internal/pkg/reload/strategy.go +++ b/internal/pkg/reload/strategy.go @@ -22,35 +22,22 @@ const ( // Strategy defines how workload restarts are triggered. type Strategy interface { - // Apply applies the reload strategy to the pod spec. - // Returns true if changes were made, false otherwise. Apply(input StrategyInput) (bool, error) - - // Name returns the strategy name for logging purposes. Name() string } // StrategyInput contains the information needed to apply a reload strategy. type StrategyInput struct { - // ResourceName is the name of the ConfigMap or Secret that changed. - ResourceName string - // ResourceType is the type of resource (configmap or secret). - ResourceType ResourceType - // Namespace is the namespace of the resource. - Namespace string - // Hash is the SHA hash of the resource content. - Hash string - // Container is the container to target for env var injection. - // If nil, the first container is used. - Container *corev1.Container - // PodAnnotations is the pod template annotations map (for annotation strategy). + ResourceName string + ResourceType ResourceType + Namespace string + Hash string + Container *corev1.Container PodAnnotations map[string]string - // AutoReload indicates if this is an auto-reload (affects container selection). - AutoReload bool + AutoReload bool } // ReloadSource contains metadata about what triggered a reload. -// This is stored in the annotation when using annotation strategy. type ReloadSource struct { Kind string `json:"kind"` Name string `json:"name"` @@ -61,7 +48,6 @@ type ReloadSource struct { } // EnvVarStrategy triggers reloads by adding/updating environment variables. -// This is the default strategy and is GitOps-friendly. type EnvVarStrategy struct{} // NewEnvVarStrategy creates a new EnvVarStrategy. @@ -69,13 +55,11 @@ func NewEnvVarStrategy() *EnvVarStrategy { return &EnvVarStrategy{} } -// Name returns the strategy name. func (s *EnvVarStrategy) Name() string { return string(config.ReloadStrategyEnvVars) } // Apply adds, updates, or removes an environment variable to trigger a restart. -// When hash is empty (resource deleted), the env var is removed. func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { if input.Container == nil { return false, fmt.Errorf("container is required for env-var strategy") @@ -83,25 +67,20 @@ func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { envVarName := s.envVarName(input.ResourceName, input.ResourceType) - // Handle deletion: remove the env var when hash is empty if input.Hash == "" { return s.removeEnvVar(input.Container, envVarName), nil } - // Check if env var already exists for i := range input.Container.Env { if input.Container.Env[i].Name == envVarName { if input.Container.Env[i].Value == input.Hash { - // Already up to date return false, nil } - // Update existing input.Container.Env[i].Value = input.Hash return true, nil } } - // Add new env var input.Container.Env = append(input.Container.Env, corev1.EnvVar{ Name: envVarName, Value: input.Hash, @@ -110,12 +89,9 @@ func (s *EnvVarStrategy) Apply(input StrategyInput) (bool, error) { return true, nil } -// removeEnvVar removes an environment variable from a container. -// Returns true if a variable was removed. func (s *EnvVarStrategy) removeEnvVar(container *corev1.Container, name string) bool { for i := range container.Env { if container.Env[i].Name == name { - // Remove by replacing with last element and truncating container.Env[i] = container.Env[len(container.Env)-1] container.Env = container.Env[:len(container.Env)-1] return true @@ -124,7 +100,6 @@ func (s *EnvVarStrategy) removeEnvVar(container *corev1.Container, name string) return false } -// envVarName generates the environment variable name for a resource. func (s *EnvVarStrategy) envVarName(resourceName string, resourceType ResourceType) string { var postfix string switch resourceType { @@ -136,8 +111,6 @@ func (s *EnvVarStrategy) envVarName(resourceName string, resourceType ResourceTy return EnvVarPrefix + convertToEnvVarName(resourceName) + "_" + postfix } -// convertToEnvVarName converts a string to a valid environment variable name. -// Invalid characters are replaced with underscores, and the result is uppercased. func convertToEnvVarName(text string) string { var buffer bytes.Buffer upper := strings.ToUpper(text) @@ -169,7 +142,6 @@ func NewAnnotationStrategy(cfg *config.Config) *AnnotationStrategy { return &AnnotationStrategy{cfg: cfg} } -// Name returns the strategy name. func (s *AnnotationStrategy) Name() string { return string(config.ReloadStrategyAnnotations) } @@ -185,7 +157,6 @@ func (s *AnnotationStrategy) Apply(input StrategyInput) (bool, error) { containerName = input.Container.Name } - // Create reload source metadata source := ReloadSource{ Kind: string(input.ResourceType), Name: input.ResourceName, @@ -204,7 +175,6 @@ func (s *AnnotationStrategy) Apply(input StrategyInput) (bool, error) { existingValue := input.PodAnnotations[annotationKey] if existingValue == string(sourceJSON) { - // Already up to date return false, nil } diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go deleted file mode 100644 index 1ad43e18d..000000000 --- a/internal/pkg/testutil/kube.go +++ /dev/null @@ -1,1231 +0,0 @@ -package testutil - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "sort" - "strconv" - "strings" - "time" - - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" - openshiftv1 "github.com/openshift/api/apps/v1" - appsclient "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" -) - -var ( - letters = []rune("abcdefghijklmnopqrstuvwxyz") - // ConfigmapResourceType is a resource type which controller watches for changes - ConfigmapResourceType = "configMaps" - // SecretResourceType is a resource type which controller watches for changes - SecretResourceType = "secrets" -) - -var ( - Clients = kube.GetClients() - Pod = "test-reloader-" + RandSeq(5) - Namespace = "test-reloader-" + RandSeq(5) - ConfigmapNamePrefix = "testconfigmap-reloader" - SecretNamePrefix = "testsecret-reloader" - Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - Collectors = metrics.NewCollectors() - SleepDuration = 3 * time.Second -) - -// CreateNamespace creates namespace for testing -func CreateNamespace(namespace string, client kubernetes.Interface) { - _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) - if err != nil { - logrus.Fatalf("Failed to create namespace for testing %v", err) - } else { - logrus.Infof("Creating namespace for testing = %s", namespace) - } -} - -// DeleteNamespace deletes namespace for testing -func DeleteNamespace(namespace string, client kubernetes.Interface) { - err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) - if err != nil { - logrus.Fatalf("Failed to delete namespace that was created for testing %v", err) - } else { - logrus.Infof("Deleting namespace for testing = %s", namespace) - } -} - -func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations), - } -} - -func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string { - annotations := make(map[string]string) - if autoReload { - annotations[options.ReloaderAutoAnnotation] = "true" - } - if secretAutoReload { - annotations[options.SecretReloaderAutoAnnotation] = "true" - } - if configmapAutoReload { - annotations[options.ConfigmapReloaderAutoAnnotation] = "true" - } - - if len(annotations) == 0 { - annotations = map[string]string{ - options.ConfigmapUpdateOnChangeAnnotation: name, - options.SecretUpdateOnChangeAnnotation: name} - } - for k, v := range extraAnnotations { - annotations[k] = v - } - return annotations -} - -func getEnvVarSources(name string) []v1.EnvFromSource { - return []v1.EnvFromSource{ - { - ConfigMapRef: &v1.ConfigMapEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - { - SecretRef: &v1.SecretEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - } -} - -func getVolumes(name string) []v1.Volume { - return []v1.Volume{ - { - Name: "projectedconfigmap", - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: []v1.VolumeProjection{ - { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - { - Name: "projectedsecret", - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: []v1.VolumeProjection{ - { - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - { - Name: "configmap", - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - { - Name: "secret", - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: name, - }, - }, - }, - } -} - -func getVolumeMounts() []v1.VolumeMount { - return []v1.VolumeMount{ - { - MountPath: "etc/config", - Name: "configmap", - }, - { - MountPath: "etc/sec", - Name: "secret", - }, - { - MountPath: "etc/projectedconfig", - Name: "projectedconfigmap", - }, - { - MountPath: "etc/projectedsec", - Name: "projectedsecret", - }, - } -} - -func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - { - Name: "CONFIGMAP_" + util.ConvertToEnvVarName(name), - ValueFrom: &v1.EnvVarSource{ - ConfigMapKeyRef: &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - Key: "test.url", - }, - }, - }, - { - Name: "SECRET_" + util.ConvertToEnvVarName(name), - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - Key: "test.url", - }, - }, - }, - }, - }, - }, - }, - } -} - -func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - EnvFrom: getEnvVarSources(name), - }, - }, - }, - } -} - -func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - VolumeMounts: getVolumeMounts(), - }, - }, - Volumes: getVolumes(name), - }, - } -} - -func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - VolumeMounts: getVolumeMounts(), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - Volumes: getVolumes(name), - }, - } -} - -func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - EnvFrom: getEnvVarSources(name), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - }, - } -} - -// GetDeployment provides deployment for testing -func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -// GetDeploymentConfig provides deployment for testing -func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithVolume, - }, - } -} - -// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts -func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainer(deploymentName), - }, - } -} - -// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource -func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName), - }, - } -} - -func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(deploymentName), - }, - } -} - -func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithEnvVars, - }, - } -} - -func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } -} - -func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment { - replicaset := int32(1) - deployment := &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } - if !both { - deployment.Annotations = nil - } - deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{}) - return deployment -} - -func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - var objectMeta metav1.ObjectMeta - switch resourceType { - case SecretResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{}) - case ConfigmapResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{}) - } - - return &appsv1.Deployment{ - ObjectMeta: objectMeta, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - - annotation := map[string]string{} - - switch resourceType { - case SecretResourceType: - annotation[options.SecretExcludeReloaderAnnotation] = deploymentName - case ConfigmapResourceType: - annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName - } - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - Annotations: annotation, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -// GetDaemonSet provides daemonset for testing -func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}), - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(daemonsetName), - }, - } -} - -func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}), - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(daemonSetName), - }, - } -} - -// GetStatefulSet provides statefulset for testing -func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}), - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(statefulsetName), - }, - } -} - -// GetStatefulSet provides statefulset for testing -func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}), - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: getPodTemplateSpecWithEnvVars(statefulsetName), - }, - } -} - -// GetConfigmap provides configmap for testing -func GetConfigmap(namespace string, configmapName string, testData string) *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configmapName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - }, - Data: map[string]string{"test.url": testData}, - } -} - -// GetConfigmapWithUpdatedLabel provides configmap for testing -func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLabel string, testData string) *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configmapName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": testLabel}, - }, - Data: map[string]string{"test.url": testData}, - } -} - -// GetSecret provides secret for testing -func GetSecret(namespace string, secretName string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - -func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob { - return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}), - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", // Run every 5 minutes - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithVolumes(cronJobName), - }, - }, - }, - } -} - -func GetJob(namespace string, jobName string) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}), - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithVolumes(jobName), - }, - } -} - -func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob { - return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}), - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", // Run every 5 minutes - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithEnvVars(cronJobName), - }, - }, - }, - } -} - -func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}), - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Template: getPodTemplateSpecWithEnvVars(jobName), - }, - } -} - -// GetSecretWithUpdatedLabel provides secret for testing -func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": label}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - -// GetResourceSHAFromEnvVar returns the SHA value of given environment variable -func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string { - for i := range containers { - envs := containers[i].Env - for j := range envs { - if envs[j].Name == envVar { - return envs[j].Value - } - } - } - return "" -} - -// GetResourceSHAFromAnnotation returns the SHA value of given environment variable -func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string { - lastReloadedResourceName := fmt.Sprintf("%s/%s", - constants.ReloaderAnnotationPrefix, - constants.LastReloadedFromAnnotation, - ) - - annotationJson, ok := podAnnotations[lastReloadedResourceName] - if !ok { - return "" - } - - var last common.ReloadSource - bytes := []byte(annotationJson) - err := json.Unmarshal(bytes, &last) - if err != nil { - return "" - } - - return last.Hash -} - -// ConvertResourceToSHA generates SHA from secret or configmap data -func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string { - values := []string{} - switch resourceType { - case SecretResourceType: - secret := GetSecret(namespace, resourceName, data) - for k, v := range secret.Data { - values = append(values, k+"="+string(v[:])) - } - case ConfigmapResourceType: - configmap := GetConfigmap(namespace, resourceName, data) - for k, v := range configmap.Data { - values = append(values, k+"="+v) - } - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -// CreateConfigMap creates a configmap in given namespace and returns the ConfigMapInterface -func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) { - logrus.Infof("Creating configmap") - configmapClient := client.CoreV1().ConfigMaps(namespace) - _, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return configmapClient, err -} - -// CreateSecret creates a secret in given namespace and returns the SecretInterface -func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) { - logrus.Infof("Creating secret") - secretClient := client.CoreV1().Secrets(namespace) - _, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return secretClient, err -} - -// CreateDeployment creates a deployment in given namespace and returns the Deployment -func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeployment(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName) - } - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeployment creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeployment(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName) - } - - for annotationKey, annotationValue := range additionalAnnotations { - deploymentObj.Annotations[annotationKey] = annotationValue - } - - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig -func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) { - logrus.Infof("Creating DeploymentConfig") - deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace) - var deploymentConfigObj *openshiftv1.DeploymentConfig - if volumeMount { - deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName) - } else { - deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName) - } - deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{}) - time.Sleep(5 * time.Second) - return deploymentConfig, err -} - -// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment -func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName) - } - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err - -} - -// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given -// namespace with given annotations. -func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deploymentObj.Annotations = annotations - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithTypedAutoAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithTypedAutoAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithTypedAutoAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithExcludeAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithExcludeAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithExcludeAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - return deployment, err -} - -// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet -func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) { - logrus.Infof("Creating DaemonSet") - daemonsetClient := client.AppsV1().DaemonSets(namespace) - var daemonsetObj *appsv1.DaemonSet - if volumeMount { - daemonsetObj = GetDaemonSet(namespace, daemonsetName) - } else { - daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName) - } - daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return daemonset, err -} - -// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet -func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*appsv1.StatefulSet, error) { - logrus.Infof("Creating StatefulSet") - statefulsetClient := client.AppsV1().StatefulSets(namespace) - var statefulsetObj *appsv1.StatefulSet - if volumeMount { - statefulsetObj = GetStatefulSet(namespace, statefulsetName) - } else { - statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName) - } - statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return statefulset, err -} - -// CreateCronJob creates a cronjob in given namespace and returns the CronJob -func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) { - logrus.Infof("Creating CronJob") - cronJobClient := client.BatchV1().CronJobs(namespace) - var cronJobObj *batchv1.CronJob - if volumeMount { - cronJobObj = GetCronJob(namespace, cronJobName) - } else { - cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName) - } - cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return cronJob, err -} - -// CreateJob creates a job in given namespace and returns the Job -func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) { - logrus.Infof("Creating Job") - jobClient := client.BatchV1().Jobs(namespace) - var jobObj *batchv1.Job - if volumeMount { - jobObj = GetJob(namespace, jobName) - } else { - jobObj = GetJobWithEnvVar(namespace, jobName) - } - job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return job, err -} - -// DeleteDeployment creates a deployment in given namespace and returns the error if any -func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error { - logrus.Infof("Deleting Deployment") - deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return deploymentError -} - -// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any -func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error { - logrus.Infof("Deleting DeploymentConfig") - deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return deploymentConfigError -} - -// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any -func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error { - logrus.Infof("Deleting DaemonSet %s", daemonsetName) - daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return daemonsetError -} - -// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any -func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error { - logrus.Infof("Deleting StatefulSet %s", statefulsetName) - statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return statefulsetError -} - -// DeleteCronJob deletes a cronJob in given namespace and returns the error if any -func DeleteCronJob(client kubernetes.Interface, namespace string, cronJobName string) error { - logrus.Infof("Deleting CronJob %s", cronJobName) - cronJobError := client.BatchV1().CronJobs(namespace).Delete(context.TODO(), cronJobName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return cronJobError -} - -// Deleteob deletes a job in given namespace and returns the error if any -func DeleteJob(client kubernetes.Interface, namespace string, jobName string) error { - logrus.Infof("Deleting Job %s", jobName) - jobError := client.BatchV1().Jobs(namespace).Delete(context.TODO(), jobName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return jobError -} - -// UpdateConfigMap updates a configmap in given namespace and returns the error if any -func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error { - logrus.Infof("Updating configmap %q.\n", configmapName) - var configmap *v1.ConfigMap - if label != "" { - configmap = GetConfigmapWithUpdatedLabel(namespace, configmapName, label, data) - } else { - configmap = GetConfigmap(namespace, configmapName, data) - } - _, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{}) - time.Sleep(3 * time.Second) - return updateErr -} - -// UpdateSecret updates a secret in given namespace and returns the error if any -func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secretName string, label string, data string) error { - logrus.Infof("Updating secret %q.\n", secretName) - var secret *v1.Secret - if label != "" { - secret = GetSecretWithUpdatedLabel(namespace, secretName, label, data) - } else { - secret = GetSecret(namespace, secretName, data) - } - _, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{}) - time.Sleep(3 * time.Second) - return updateErr -} - -// DeleteConfigMap deletes a configmap in given namespace and returns the error if any -func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error { - logrus.Infof("Deleting configmap %q.\n", configmapName) - err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return err -} - -// DeleteSecret deletes a secret in given namespace and returns the error if any -func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error { - logrus.Infof("Deleting secret %q.\n", secretName) - err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return err -} - -// RandSeq generates a random sequence -func RandSeq(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} - -// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not -func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - containers := upgradeFuncs.ContainersFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue) - matches := false - if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - updated := GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - return true - } - } - } - return false -} - -// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed -func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - containers := upgradeFuncs.ContainersFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue) - - matches := false - if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - value := GetResourceSHAFromEnvVar(containers, envName) - if value == "" { - return true - } - } - } - return false -} - -// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not -func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - podAnnotations := upgradeFuncs.PodAnnotationsFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - matches := false - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - updated := GetResourceSHAFromAnnotation(podAnnotations) - if updated == config.SHAValue { - return true - } - } - } - return false -} - -func GetSHAfromEmptyData() string { - return crypto.GenerateSHA("") -} - -// GetRollout provides rollout for testing -func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout { - replicaset := int32(1) - return &argorolloutv1alpha1.Rollout{ - ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations), - Spec: argorolloutv1alpha1.RolloutSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Template: getPodTemplateSpecWithVolumes(rolloutName), - }, - } -} - -// CreateRollout creates a rolout in given namespace and returns the Rollout -func CreateRollout(client argorollout.Interface, rolloutName string, namespace string, annotations map[string]string) (*argorolloutv1alpha1.Rollout, error) { - logrus.Infof("Creating Rollout") - rolloutClient := client.ArgoprojV1alpha1().Rollouts(namespace) - rolloutObj := GetRollout(namespace, rolloutName, annotations) - rollout, err := rolloutClient.Create(context.TODO(), rolloutObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return rollout, err -} diff --git a/internal/pkg/util/interface.go b/internal/pkg/util/interface.go deleted file mode 100644 index ff261ab00..000000000 --- a/internal/pkg/util/interface.go +++ /dev/null @@ -1,50 +0,0 @@ -package util - -import ( - "reflect" - "strconv" - - "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// InterfaceSlice converts an interface to an interface array -func InterfaceSlice(slice interface{}) []interface{} { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - logrus.Errorf("InterfaceSlice() given a non-slice type") - } - - ret := make([]interface{}, s.Len()) - - for i := 0; i < s.Len(); i++ { - ret[i] = s.Index(i).Interface() - } - - return ret -} - -type ObjectMeta struct { - metav1.ObjectMeta -} - -func ToObjectMeta(kubernetesObject interface{}) ObjectMeta { - objectValue := reflect.ValueOf(kubernetesObject) - fieldName := reflect.TypeOf((*metav1.ObjectMeta)(nil)).Elem().Name() - field := objectValue.FieldByName(fieldName).Interface().(metav1.ObjectMeta) - - return ObjectMeta{ - ObjectMeta: field, - } -} - -// ParseBool returns result in bool format after parsing -func ParseBool(value interface{}) bool { - if reflect.Bool == reflect.TypeOf(value).Kind() { - return value.(bool) - } else if reflect.String == reflect.TypeOf(value).Kind() { - result, _ := strconv.ParseBool(value.(string)) - return result - } - return false -} diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go deleted file mode 100644 index ec86d1c9c..000000000 --- a/internal/pkg/util/util.go +++ /dev/null @@ -1,128 +0,0 @@ -package util - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/options" - v1 "k8s.io/api/core/v1" -) - -// ConvertToEnvVarName converts the given text into a usable env var -// removing any special chars with '_' and transforming text to upper case -func ConvertToEnvVarName(text string) string { - var buffer bytes.Buffer - upper := strings.ToUpper(text) - lastCharValid := false - for i := 0; i < len(upper); i++ { - ch := upper[i] - if (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') { - buffer.WriteString(string(ch)) - lastCharValid = true - } else { - if lastCharValid { - buffer.WriteString("_") - } - lastCharValid = false - } - } - return buffer.String() -} - -func GetSHAfromConfigmap(configmap *v1.ConfigMap) string { - values := []string{} - for k, v := range configmap.Data { - values = append(values, k+"="+v) - } - for k, v := range configmap.BinaryData { - values = append(values, k+"="+base64.StdEncoding.EncodeToString(v)) - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -func GetSHAfromSecret(data map[string][]byte) string { - values := []string{} - for k, v := range data { - values = append(values, k+"="+string(v[:])) - } - sort.Strings(values) - return crypto.GenerateSHA(strings.Join(values, ";")) -} - -type List []string - -func (l *List) Contains(s string) bool { - for _, v := range *l { - if v == s { - return true - } - } - return false -} - -func ConfigureReloaderFlags(cmd *cobra.Command) { - cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources") - cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name") - cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name") - cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps") - cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps") - cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets") - cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation") - cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search") - cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected") - cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader") - cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)") - cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)") - cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload") - cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')") - cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") - cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore") - cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces") - cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets") - cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts") - cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy") - cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events") - cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events") - cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election") - cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts") - cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling") - cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060") -} - -func GetIgnoredResourcesList() (List, error) { - - ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore") - - for _, v := range ignoredResourcesList { - if v != "configMaps" && v != "secrets" { - return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v) - } - } - - if len(ignoredResourcesList) > 1 { - return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both") - } - - return ignoredResourcesList, nil -} - -func GetIgnoredWorkloadTypesList() (List, error) { - - ignoredWorkloadTypesList := options.WorkloadTypesToIgnore - - for _, v := range ignoredWorkloadTypesList { - if v != "jobs" && v != "cronjobs" { - return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v) - } - } - - return ignoredWorkloadTypesList, nil -} diff --git a/internal/pkg/util/util_test.go b/internal/pkg/util/util_test.go deleted file mode 100644 index 338f329f3..000000000 --- a/internal/pkg/util/util_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stakater/Reloader/internal/pkg/options" - v1 "k8s.io/api/core/v1" -) - -func TestConvertToEnvVarName(t *testing.T) { - data := "www.stakater.com" - envVar := ConvertToEnvVarName(data) - if envVar != "WWW_STAKATER_COM" { - t.Errorf("Failed to convert data into environment variable") - } -} - -func TestGetHashFromConfigMap(t *testing.T) { - data := map[*v1.ConfigMap]string{ - { - Data: map[string]string{"test": "test"}, - }: "Only Data", - { - Data: map[string]string{"test": "test"}, - BinaryData: map[string][]byte{"bintest": []byte("test")}, - }: "Both Data and BinaryData", - { - BinaryData: map[string][]byte{"bintest": []byte("test")}, - }: "Only BinaryData", - } - converted := map[string]string{} - for cm, cmName := range data { - converted[cmName] = GetSHAfromConfigmap(cm) - } - - // Test that the has for each configmap is really unique - for cmName, cmHash := range converted { - count := 0 - for _, cmHash2 := range converted { - if cmHash == cmHash2 { - count++ - } - } - if count > 1 { - t.Errorf("Found duplicate hashes for %v", cmName) - } - } -} - -func TestGetIgnoredWorkloadTypesList(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - workloadTypes []string - expectError bool - expected []string - }{ - { - name: "Both jobs and cronjobs", - workloadTypes: []string{"jobs", "cronjobs"}, - expectError: false, - expected: []string{"jobs", "cronjobs"}, - }, - { - name: "Only jobs", - workloadTypes: []string{"jobs"}, - expectError: false, - expected: []string{"jobs"}, - }, - { - name: "Only cronjobs", - workloadTypes: []string{"cronjobs"}, - expectError: false, - expected: []string{"cronjobs"}, - }, - { - name: "Empty list", - workloadTypes: []string{}, - expectError: false, - expected: []string{}, - }, - { - name: "Invalid workload type", - workloadTypes: []string{"invalid"}, - expectError: true, - expected: nil, - }, - { - name: "Mixed valid and invalid", - workloadTypes: []string{"jobs", "invalid"}, - expectError: true, - expected: nil, - }, - { - name: "Duplicate values", - workloadTypes: []string{"jobs", "jobs"}, - expectError: false, - expected: []string{"jobs", "jobs"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the global option - options.WorkloadTypesToIgnore = tt.workloadTypes - - result, err := GetIgnoredWorkloadTypesList() - - if tt.expectError && err == nil { - t.Errorf("Expected error but got none") - } - - if !tt.expectError && err != nil { - t.Errorf("Expected no error but got: %v", err) - } - - if !tt.expectError { - if len(result) != len(tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - return - } - - for i, expected := range tt.expected { - if i >= len(result) || result[i] != expected { - t.Errorf("Expected %v, got %v", tt.expected, result) - break - } - } - } - }) - } -} - -func TestListContains(t *testing.T) { - tests := []struct { - name string - list List - item string - expected bool - }{ - { - name: "List contains item", - list: List{"jobs", "cronjobs"}, - item: "jobs", - expected: true, - }, - { - name: "List does not contain item", - list: List{"jobs"}, - item: "cronjobs", - expected: false, - }, - { - name: "Empty list", - list: List{}, - item: "jobs", - expected: false, - }, - { - name: "Case sensitive matching", - list: List{"jobs", "cronjobs"}, - item: "Jobs", - expected: false, - }, - { - name: "Multiple occurrences", - list: List{"jobs", "jobs", "cronjobs"}, - item: "jobs", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.list.Contains(tt.item) - if result != tt.expected { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } -} diff --git a/main.go b/main.go deleted file mode 100644 index 1c429710c..000000000 --- a/main.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "os" - - "github.com/stakater/Reloader/internal/pkg/app" -) - -func main() { - if err := app.Run(); err != nil { - os.Exit(1) - } - os.Exit(0) -} diff --git a/pkg/common/common.go b/pkg/common/common.go deleted file mode 100644 index 84d982748..000000000 --- a/pkg/common/common.go +++ /dev/null @@ -1,358 +0,0 @@ -package common - -import ( - "context" - "os" - "regexp" - "strconv" - "strings" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" -) - -type Map map[string]string - -type ReloadCheckResult struct { - ShouldReload bool - AutoReload bool -} - -// ReloaderOptions contains all configurable options for the Reloader controller. -// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets. -type ReloaderOptions struct { - // AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated - AutoReloadAll bool `json:"autoReloadAll"` - // ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name - ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"` - // SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name - SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"` - // ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets - ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"` - // IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched - IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"` - // ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only - ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"` - // SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only - SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"` - // ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching - ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"` - // SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching - SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"` - // AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation - AutoSearchAnnotation string `json:"autoSearchAnnotation"` - // SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation - SearchMatchAnnotation string `json:"searchMatchAnnotation"` - // RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads - RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"` - // PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after - PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"` - // PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader - PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"` - - // LogFormat specifies the log format to use (json, or empty string for default text format) - LogFormat string `json:"logFormat"` - // LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic) - LogLevel string `json:"logLevel"` - // IsArgoRollouts indicates whether support for Argo Rollouts is enabled - IsArgoRollouts bool `json:"isArgoRollouts"` - // ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations) - ReloadStrategy string `json:"reloadStrategy"` - // ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created - ReloadOnCreate bool `json:"reloadOnCreate"` - // ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted - ReloadOnDelete bool `json:"reloadOnDelete"` - // SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true) - SyncAfterRestart bool `json:"syncAfterRestart"` - // EnableHA indicates whether High Availability mode is enabled with leader election - EnableHA bool `json:"enableHA"` - // WebhookUrl is the URL to send webhook notifications to instead of performing reloads - WebhookUrl string `json:"webhookUrl"` - // ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets") - ResourcesToIgnore []string `json:"resourcesToIgnore"` - // WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs") - WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"` - // NamespaceSelectors is a list of label selectors to filter namespaces to watch - NamespaceSelectors []string `json:"namespaceSelectors"` - // ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch - ResourceSelectors []string `json:"resourceSelectors"` - // NamespacesToIgnore is a list of namespace names to ignore when watching for changes - NamespacesToIgnore []string `json:"namespacesToIgnore"` - // EnablePProf enables pprof for profiling - EnablePProf bool `json:"enablePProf"` - // PProfAddr is the address to start pprof server on - PProfAddr string `json:"pprofAddr"` -} - -var CommandLineOptions *ReloaderOptions - -func PublishMetaInfoConfigmap(clientset kubernetes.Interface) { - namespace := os.Getenv("RELOADER_NAMESPACE") - if namespace == "" { - logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") - return - } - - metaInfo := &MetaInfo{ - BuildInfo: *NewBuildInfo(), - ReloaderOptions: *GetCommandLineOptions(), - DeploymentInfo: metav1.ObjectMeta{ - Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"), - Namespace: namespace, - }, - } - - configMap := metaInfo.ToConfigMap() - - if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil { - logrus.Info("Meta info configmap already exists, updating it") - _, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{}) - if err != nil { - logrus.Warn("Failed to update existing meta info configmap: ", err) - } - return - } - - _, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) - if err != nil { - logrus.Warn("Failed to create meta info configmap: ", err) - } -} - -func GetNamespaceLabelSelector(slice []string) (string, error) { - for i, kv := range slice { - // Legacy support for ":" as a delimiter and "*" for wildcard. - if strings.Contains(kv, ":") { - split := strings.Split(kv, ":") - if split[1] == "*" { - slice[i] = split[0] - } else { - slice[i] = split[0] + "=" + split[1] - } - } - // Convert wildcard to valid apimachinery operator - if strings.Contains(kv, "=") { - split := strings.Split(kv, "=") - if split[1] == "*" { - slice[i] = split[0] - } - } - } - - namespaceLabelSelector := strings.Join(slice[:], ",") - _, err := labels.Parse(namespaceLabelSelector) - if err != nil { - logrus.Fatal(err) - } - - return namespaceLabelSelector, nil -} - -func GetResourceLabelSelector(slice []string) (string, error) { - for i, kv := range slice { - // Legacy support for ":" as a delimiter and "*" for wildcard. - if strings.Contains(kv, ":") { - split := strings.Split(kv, ":") - if split[1] == "*" { - slice[i] = split[0] - } else { - slice[i] = split[0] + "=" + split[1] - } - } - // Convert wildcard to valid apimachinery operator - if strings.Contains(kv, "=") { - split := strings.Split(kv, "=") - if split[1] == "*" { - slice[i] = split[0] - } - } - } - - resourceLabelSelector := strings.Join(slice[:], ",") - _, err := labels.Parse(resourceLabelSelector) - if err != nil { - logrus.Fatal(err) - } - - return resourceLabelSelector, nil -} - -// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options. -func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult { - - // Check if this workload type should be ignored - if len(options.WorkloadTypesToIgnore) > 0 { - ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList() - if err != nil { - logrus.Errorf("Failed to parse ignored workload types: %v", err) - } else { - // Map Kubernetes resource types to CLI-friendly names for comparison - var resourceToCheck string - switch resourceType { - case "Job": - resourceToCheck = "jobs" - case "CronJob": - resourceToCheck = "cronjobs" - default: - resourceToCheck = resourceType // For other types, use as-is - } - - // Check if current resource type should be ignored - if ignoredWorkloadTypes.Contains(resourceToCheck) { - return ReloadCheckResult{ - ShouldReload: false, - } - } - } - } - - ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation] - if ignoreResourceAnnotatonValue == "true" { - return ReloadCheckResult{ - ShouldReload: false, - } - } - - annotationValue, found := annotations[config.Annotation] - searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation] - excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation] - excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation] - - if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn { - annotations = podAnnotations - annotationValue = annotations[config.Annotation] - searchAnnotationValue = annotations[options.AutoSearchAnnotation] - reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation] - } - - isResourceExcluded := false - - switch config.Type { - case constants.ConfigmapEnvVarPostfix: - if foundExcludeConfigmap { - isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue) - } - case constants.SecretEnvVarPostfix: - if foundExcludeSecret { - isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue) - } - } - - if isResourceExcluded { - return ReloadCheckResult{ - ShouldReload: false, - } - } - - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: true, - } - } - - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.TrimSpace(value) - re := regexp.MustCompile("^" + value + "$") - if re.Match([]byte(config.ResourceName)) { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: false, - } - } - } - - if searchAnnotationValue == "true" { - matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation] - if matchAnnotationValue == "true" { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: true, - } - } - } - - return ReloadCheckResult{ - ShouldReload: false, - } -} - -func checkIfResourceIsExcluded(resourceName, excludedResources string) bool { - if excludedResources == "" { - return false - } - - excludedResourcesList := strings.Split(excludedResources, ",") - for _, excludedResource := range excludedResourcesList { - if strings.TrimSpace(excludedResource) == resourceName { - return true - } - } - - return false -} - -func init() { - GetCommandLineOptions() -} - -func GetCommandLineOptions() *ReloaderOptions { - if CommandLineOptions == nil { - CommandLineOptions = &ReloaderOptions{} - } - - CommandLineOptions.AutoReloadAll = options.AutoReloadAll - CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation - CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation - CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation - CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation - CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation - CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation - CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation - CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation - CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation - CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation - CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation - CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation - CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation - CommandLineOptions.LogFormat = options.LogFormat - CommandLineOptions.LogLevel = options.LogLevel - CommandLineOptions.ReloadStrategy = options.ReloadStrategy - CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart - CommandLineOptions.EnableHA = options.EnableHA - CommandLineOptions.WebhookUrl = options.WebhookUrl - CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore - CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore - CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors - CommandLineOptions.ResourceSelectors = options.ResourceSelectors - CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore - CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts) - CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate) - CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete) - CommandLineOptions.EnablePProf = options.EnablePProf - CommandLineOptions.PProfAddr = options.PProfAddr - - return CommandLineOptions -} - -func parseBool(value string) bool { - if value == "" { - return false - } - result, err := strconv.ParseBool(value) - if err != nil { - return false // Default to false if parsing fails - } - return result -} diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go deleted file mode 100644 index 532d3adfa..000000000 --- a/pkg/common/common_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stakater/Reloader/internal/pkg/options" -) - -func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - ignoredWorkloadTypes []string - resourceType string - shouldReload bool - description string - }{ - { - name: "Jobs ignored - Job should not reload", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "Job", - shouldReload: false, - description: "When jobs are ignored, Job resources should not be reloaded", - }, - { - name: "Jobs ignored - CronJob should reload", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "CronJob", - shouldReload: true, - description: "When jobs are ignored, CronJob resources should still be processed", - }, - { - name: "CronJobs ignored - CronJob should not reload", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "CronJob", - shouldReload: false, - description: "When cronjobs are ignored, CronJob resources should not be reloaded", - }, - { - name: "CronJobs ignored - Job should reload", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "Job", - shouldReload: true, - description: "When cronjobs are ignored, Job resources should still be processed", - }, - { - name: "Both ignored - Job should not reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Job", - shouldReload: false, - description: "When both are ignored, Job resources should not be reloaded", - }, - { - name: "Both ignored - CronJob should not reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "CronJob", - shouldReload: false, - description: "When both are ignored, CronJob resources should not be reloaded", - }, - { - name: "Both ignored - Deployment should reload", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Deployment", - shouldReload: true, - description: "When both are ignored, other workload types should still be processed", - }, - { - name: "None ignored - Job should reload", - ignoredWorkloadTypes: []string{}, - resourceType: "Job", - shouldReload: true, - description: "When nothing is ignored, all workload types should be processed", - }, - { - name: "None ignored - CronJob should reload", - ignoredWorkloadTypes: []string{}, - resourceType: "CronJob", - shouldReload: true, - description: "When nothing is ignored, all workload types should be processed", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the ignored workload types - options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes - - // Create minimal test config and options - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - // Create ReloaderOptions with the ignored workload types - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: tt.ignoredWorkloadTypes, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Call ShouldReload - result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts) - - // Check the result - if result.ShouldReload != tt.shouldReload { - t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v", - tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload) - } - - t.Logf("✓ %s", tt.description) - }) - } -} - -func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - // Test with invalid workload type - should still continue processing - options.WorkloadTypesToIgnore = []string{"invalid"} - - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: []string{"invalid"}, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Should not panic and should continue with normal processing - result := ShouldReload(config, "Job", annotations, Map{}, opts) - - // Since validation failed, it should continue with normal processing (should reload) - if !result.ShouldReload { - t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload) - } -} - -// Test that validates the fix for issue #996 -func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) { - // Save original state - originalWorkloadTypes := options.WorkloadTypesToIgnore - defer func() { - options.WorkloadTypesToIgnore = originalWorkloadTypes - }() - - tests := []struct { - name string - ignoredWorkloadTypes []string - resourceType string - description string - }{ - { - name: "Issue #996 - ignoreJobs prevents Job processing", - ignoredWorkloadTypes: []string{"jobs"}, - resourceType: "Job", - description: "Job resources are skipped entirely, preventing RBAC permission errors", - }, - { - name: "Issue #996 - ignoreCronJobs prevents CronJob processing", - ignoredWorkloadTypes: []string{"cronjobs"}, - resourceType: "CronJob", - description: "CronJob resources are skipped entirely, preventing RBAC permission errors", - }, - { - name: "Issue #996 - both ignored prevent both types", - ignoredWorkloadTypes: []string{"jobs", "cronjobs"}, - resourceType: "Job", - description: "Job resources are skipped entirely when both types are ignored", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set the ignored workload types - options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes - - config := Config{ - ResourceName: "test-resource", - Annotation: "configmap.reloader.stakater.com/reload", - } - - annotations := Map{ - "configmap.reloader.stakater.com/reload": "test-config", - } - - opts := &ReloaderOptions{ - WorkloadTypesToIgnore: tt.ignoredWorkloadTypes, - AutoReloadAll: true, // Enable auto-reload to simplify test - ReloaderAutoAnnotation: "reloader.stakater.com/auto", - } - - // Call ShouldReload - result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts) - - // Should not reload when workload type is ignored - if result.ShouldReload { - t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v", - tt.resourceType, result.ShouldReload) - } - - t.Logf("✓ %s", tt.description) - }) - } -} diff --git a/pkg/common/config.go b/pkg/common/config.go deleted file mode 100644 index 4227c2bc3..000000000 --- a/pkg/common/config.go +++ /dev/null @@ -1,48 +0,0 @@ -package common - -import ( - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - v1 "k8s.io/api/core/v1" -) - -// Config contains rolling upgrade configuration parameters -type Config struct { - Namespace string - ResourceName string - ResourceAnnotations map[string]string - Annotation string - TypedAutoAnnotation string - SHAValue string - Type string - Labels map[string]string -} - -// GetConfigmapConfig provides utility config for configmap -func GetConfigmapConfig(configmap *v1.ConfigMap) Config { - return Config{ - Namespace: configmap.Namespace, - ResourceName: configmap.Name, - ResourceAnnotations: configmap.Annotations, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation, - SHAValue: util.GetSHAfromConfigmap(configmap), - Type: constants.ConfigmapEnvVarPostfix, - Labels: configmap.Labels, - } -} - -// GetSecretConfig provides utility config for secret -func GetSecretConfig(secret *v1.Secret) Config { - return Config{ - Namespace: secret.Namespace, - ResourceName: secret.Name, - ResourceAnnotations: secret.Annotations, - Annotation: options.SecretUpdateOnChangeAnnotation, - TypedAutoAnnotation: options.SecretReloaderAutoAnnotation, - SHAValue: util.GetSHAfromSecret(secret.Data), - Type: constants.SecretEnvVarPostfix, - Labels: secret.Labels, - } -} diff --git a/pkg/common/metainfo.go b/pkg/common/metainfo.go deleted file mode 100644 index b792c5297..000000000 --- a/pkg/common/metainfo.go +++ /dev/null @@ -1,129 +0,0 @@ -package common - -import ( - "encoding/json" - "fmt" - "runtime" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Version, Commit, and BuildDate are set during the build process -// using the -X linker flag to inject these values into the binary. -// They provide metadata about the build version, commit hash, build date, and whether there are -// uncommitted changes in the source code at the time of build. -// This information is useful for debugging and tracking the specific build of the Reloader binary. -var Version = "dev" -var Commit = "unknown" -var BuildDate = "unknown" - -const ( - MetaInfoConfigmapName = "reloader-meta-info" - MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info" - MetaInfoConfigmapLabelValue = "reloader-oss" -) - -// MetaInfo contains comprehensive metadata about the Reloader instance. -// This includes build information, configuration options, and deployment details. -type MetaInfo struct { - // BuildInfo contains information about the build version, commit, and compilation details - BuildInfo BuildInfo `json:"buildInfo"` - // ReloaderOptions contains all the configuration options and flags used by this Reloader instance - ReloaderOptions ReloaderOptions `json:"reloaderOptions"` - // DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance - DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"` -} - -// BuildInfo contains information about the build and version of the Reloader binary. -// This includes Go version, release version, commit details, and build timestamp. -type BuildInfo struct { - // GoVersion is the version of Go used to compile the binary - GoVersion string `json:"goVersion"` - // ReleaseVersion is the version tag or branch of the Reloader release - ReleaseVersion string `json:"releaseVersion"` - // CommitHash is the Git commit hash of the source code used to build this binary - CommitHash string `json:"commitHash"` - // CommitTime is the timestamp of the Git commit used to build this binary - CommitTime time.Time `json:"commitTime"` -} - -func NewBuildInfo() *BuildInfo { - metaInfo := &BuildInfo{ - GoVersion: runtime.Version(), - ReleaseVersion: Version, - CommitHash: Commit, - CommitTime: ParseUTCTime(BuildDate), - } - - return metaInfo -} - -func (m *MetaInfo) ToConfigMap() *v1.ConfigMap { - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: MetaInfoConfigmapName, - Namespace: m.DeploymentInfo.Namespace, - Labels: map[string]string{ - MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue, - }, - }, - Data: map[string]string{ - "buildInfo": toJson(m.BuildInfo), - "reloaderOptions": toJson(m.ReloaderOptions), - "deploymentInfo": toJson(m.DeploymentInfo), - }, - } -} - -func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) { - var buildInfo BuildInfo - if val, ok := configmap.Data["buildInfo"]; ok { - err := json.Unmarshal([]byte(val), &buildInfo) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err) - } - } - - var reloaderOptions ReloaderOptions - if val, ok := configmap.Data["reloaderOptions"]; ok { - err := json.Unmarshal([]byte(val), &reloaderOptions) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err) - } - } - - var deploymentInfo metav1.ObjectMeta - if val, ok := configmap.Data["deploymentInfo"]; ok { - err := json.Unmarshal([]byte(val), &deploymentInfo) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err) - } - } - - return &MetaInfo{ - BuildInfo: buildInfo, - ReloaderOptions: reloaderOptions, - DeploymentInfo: deploymentInfo, - }, nil -} - -func toJson(data interface{}) string { - jsonData, err := json.Marshal(data) - if err != nil { - return "" - } - return string(jsonData) -} - -func ParseUTCTime(value string) time.Time { - if value == "" { - return time.Time{} // Return zero time if value is empty - } - t, err := time.Parse(time.RFC3339, value) - if err != nil { - return time.Time{} // Return zero time if parsing fails - } - return t -} diff --git a/pkg/common/reload_source.go b/pkg/common/reload_source.go deleted file mode 100644 index 093826132..000000000 --- a/pkg/common/reload_source.go +++ /dev/null @@ -1,39 +0,0 @@ -package common - -import "time" - -type ReloadSource struct { - Type string `json:"type"` - Name string `json:"name"` - Namespace string `json:"namespace"` - Hash string `json:"hash"` - ContainerRefs []string `json:"containerRefs"` - ObservedAt int64 `json:"observedAt"` -} - -func NewReloadSource( - resourceName string, - resourceNamespace string, - resourceType string, - resourceHash string, - containerRefs []string, -) ReloadSource { - return ReloadSource{ - ObservedAt: time.Now().Unix(), - Name: resourceName, - Namespace: resourceNamespace, - Type: resourceType, - Hash: resourceHash, - ContainerRefs: containerRefs, - } -} - -func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource { - return NewReloadSource( - config.ResourceName, - config.Namespace, - config.Type, - config.SHAValue, - containerRefs, - ) -} diff --git a/pkg/kube/client.go b/pkg/kube/client.go deleted file mode 100644 index 423006392..000000000 --- a/pkg/kube/client.go +++ /dev/null @@ -1,118 +0,0 @@ -package kube - -import ( - "context" - "os" - - "k8s.io/client-go/tools/clientcmd" - - argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" - appsclient "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/sirupsen/logrus" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -// Clients struct exposes interfaces for kubernetes as well as openshift if available -type Clients struct { - KubernetesClient kubernetes.Interface - OpenshiftAppsClient appsclient.Interface - ArgoRolloutClient argorollout.Interface -} - -var ( - // IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes - IsOpenshift = isOpenshift() -) - -// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier -func GetClients() Clients { - client, err := GetKubernetesClient() - if err != nil { - logrus.Fatalf("Unable to create Kubernetes client error = %v", err) - } - - var appsClient *appsclient.Clientset - - if IsOpenshift { - appsClient, err = GetOpenshiftAppsClient() - if err != nil { - logrus.Warnf("Unable to create Openshift Apps client error = %v", err) - } - } - - var rolloutClient *argorollout.Clientset - - rolloutClient, err = GetArgoRolloutClient() - if err != nil { - logrus.Warnf("Unable to create ArgoRollout client error = %v", err) - } - - return Clients{ - KubernetesClient: client, - OpenshiftAppsClient: appsClient, - ArgoRolloutClient: rolloutClient, - } -} - -func GetArgoRolloutClient() (*argorollout.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return argorollout.NewForConfig(config) -} - -func isOpenshift() bool { - client, err := GetKubernetesClient() - if err != nil { - logrus.Fatalf("Unable to create Kubernetes client error = %v", err) - } - _, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw() - if err == nil { - logrus.Info("Environment: Openshift") - return true - } - logrus.Info("Environment: Kubernetes") - return false -} - -// GetOpenshiftAppsClient returns an Openshift Client that can query on Apps -func GetOpenshiftAppsClient() (*appsclient.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return appsclient.NewForConfig(config) -} - -// GetKubernetesClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config -func GetKubernetesClient() (*kubernetes.Clientset, error) { - config, err := getConfig() - if err != nil { - return nil, err - } - return kubernetes.NewForConfig(config) -} - -func getConfig() (*rest.Config, error) { - var config *rest.Config - kubeconfigPath := os.Getenv("KUBECONFIG") - if kubeconfigPath == "" { - kubeconfigPath = os.Getenv("HOME") + "/.kube/config" - } - //If file exists so use that config settings - if _, err := os.Stat(kubeconfigPath); err == nil { - config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return nil, err - } - } else { //Use Incluster Configuration - config, err = rest.InClusterConfig() - if err != nil { - return nil, err - } - } - - return config, nil -} diff --git a/pkg/kube/resourcemapper.go b/pkg/kube/resourcemapper.go deleted file mode 100644 index 89ac2afc4..000000000 --- a/pkg/kube/resourcemapper.go +++ /dev/null @@ -1,13 +0,0 @@ -package kube - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ResourceMap are resources from where changes are going to be detected -var ResourceMap = map[string]runtime.Object{ - "configmaps": &v1.ConfigMap{}, - "secrets": &v1.Secret{}, - "namespaces": &v1.Namespace{}, -} From 9d588cadb2455605a9e54487e6e54d53340052bc Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:55 +0100 Subject: [PATCH 11/35] feat: Add reconciler test cases and config test cases --- internal/pkg/config/config_test.go | 237 ++++ internal/pkg/config/flags_test.go | 330 ++++++ internal/pkg/config/validation_test.go | 320 ++++++ .../controller/configmap_reconciler_test.go | 844 ++++++++++++++ .../pkg/controller/secret_reconciler_test.go | 1017 +++++++++++++++++ 5 files changed, 2748 insertions(+) create mode 100644 internal/pkg/config/config_test.go create mode 100644 internal/pkg/config/flags_test.go create mode 100644 internal/pkg/config/validation_test.go create mode 100644 internal/pkg/controller/configmap_reconciler_test.go create mode 100644 internal/pkg/controller/secret_reconciler_test.go diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go new file mode 100644 index 000000000..d9f740a7a --- /dev/null +++ b/internal/pkg/config/config_test.go @@ -0,0 +1,237 @@ +package config + +import ( + "testing" + "time" +) + +func TestNewDefault(t *testing.T) { + cfg := NewDefault() + + if cfg == nil { + t.Fatal("NewDefault() returned nil") + } + + // Test default values + if cfg.ReloadStrategy != ReloadStrategyEnvVars { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) + } + + if cfg.ArgoRolloutStrategy != ArgoRolloutStrategyRollout { + t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, ArgoRolloutStrategyRollout) + } + + if cfg.AutoReloadAll { + t.Error("AutoReloadAll should be false by default") + } + + if cfg.ArgoRolloutsEnabled { + t.Error("ArgoRolloutsEnabled should be false by default") + } + + if cfg.ReloadOnCreate { + t.Error("ReloadOnCreate should be false by default") + } + + if cfg.ReloadOnDelete { + t.Error("ReloadOnDelete should be false by default") + } + + if cfg.EnableHA { + t.Error("EnableHA should be false by default") + } + + if cfg.LogLevel != "info" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "info") + } + + if cfg.MetricsAddr != ":9090" { + t.Errorf("MetricsAddr = %q, want %q", cfg.MetricsAddr, ":9090") + } + + if cfg.HealthAddr != ":8081" { + t.Errorf("HealthAddr = %q, want %q", cfg.HealthAddr, ":8081") + } + + if cfg.PProfAddr != ":6060" { + t.Errorf("PProfAddr = %q, want %q", cfg.PProfAddr, ":6060") + } +} + +func TestDefaultAnnotations(t *testing.T) { + ann := DefaultAnnotations() + + tests := []struct { + name string + got string + want string + }{ + {"Prefix", ann.Prefix, "reloader.stakater.com"}, + {"Auto", ann.Auto, "reloader.stakater.com/auto"}, + {"ConfigmapAuto", ann.ConfigmapAuto, "configmap.reloader.stakater.com/auto"}, + {"SecretAuto", ann.SecretAuto, "secret.reloader.stakater.com/auto"}, + {"ConfigmapReload", ann.ConfigmapReload, "configmap.reloader.stakater.com/reload"}, + {"SecretReload", ann.SecretReload, "secret.reloader.stakater.com/reload"}, + {"ConfigmapExclude", ann.ConfigmapExclude, "configmaps.exclude.reloader.stakater.com/reload"}, + {"SecretExclude", ann.SecretExclude, "secrets.exclude.reloader.stakater.com/reload"}, + {"Ignore", ann.Ignore, "reloader.stakater.com/ignore"}, + {"Search", ann.Search, "reloader.stakater.com/search"}, + {"Match", ann.Match, "reloader.stakater.com/match"}, + {"RolloutStrategy", ann.RolloutStrategy, "reloader.stakater.com/rollout-strategy"}, + {"PausePeriod", ann.PausePeriod, "deployment.reloader.stakater.com/pause-period"}, + {"PausedAt", ann.PausedAt, "deployment.reloader.stakater.com/paused-at"}, + {"LastReloadedFrom", ann.LastReloadedFrom, "reloader.stakater.com/last-reloaded-from"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.got != tt.want { + t.Errorf("%s = %q, want %q", tt.name, tt.got, tt.want) + } + }) + } +} + +func TestDefaultLeaderElection(t *testing.T) { + cfg := NewDefault() + + if cfg.LeaderElection.LockName != "reloader-leader-election" { + t.Errorf("LockName = %q, want %q", cfg.LeaderElection.LockName, "reloader-leader-election") + } + + if cfg.LeaderElection.LeaseDuration != 15*time.Second { + t.Errorf("LeaseDuration = %v, want %v", cfg.LeaderElection.LeaseDuration, 15*time.Second) + } + + if cfg.LeaderElection.RenewDeadline != 10*time.Second { + t.Errorf("RenewDeadline = %v, want %v", cfg.LeaderElection.RenewDeadline, 10*time.Second) + } + + if cfg.LeaderElection.RetryPeriod != 2*time.Second { + t.Errorf("RetryPeriod = %v, want %v", cfg.LeaderElection.RetryPeriod, 2*time.Second) + } + + if !cfg.LeaderElection.ReleaseOnCancel { + t.Error("ReleaseOnCancel should be true by default") + } +} + +func TestConfig_IsResourceIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredResources = []string{"configmaps", "secrets"} + + tests := []struct { + name string + resource string + want bool + }{ + {"exact match lowercase", "configmaps", true}, + {"exact match uppercase", "CONFIGMAPS", true}, + {"exact match mixed case", "ConfigMaps", true}, + {"not ignored", "deployments", false}, + {"partial match (not ignored)", "config", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := cfg.IsResourceIgnored(tt.resource) + if got != tt.want { + t.Errorf("IsResourceIgnored(%q) = %v, want %v", tt.resource, got, tt.want) + } + }) + } +} + +func TestConfig_IsWorkloadIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"jobs", "cronjobs"} + + tests := []struct { + name string + workload string + want bool + }{ + {"exact match", "jobs", true}, + {"case insensitive", "JOBS", true}, + {"not ignored", "deployments", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := cfg.IsWorkloadIgnored(tt.workload) + if got != tt.want { + t.Errorf("IsWorkloadIgnored(%q) = %v, want %v", tt.workload, got, tt.want) + } + }) + } +} + +func TestConfig_IsNamespaceIgnored(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system", "kube-public"} + + tests := []struct { + name string + namespace string + want bool + }{ + {"exact match", "kube-system", true}, + {"case sensitive no match", "Kube-System", false}, + {"not ignored", "default", false}, + {"empty string", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := cfg.IsNamespaceIgnored(tt.namespace) + if got != tt.want { + t.Errorf("IsNamespaceIgnored(%q) = %v, want %v", tt.namespace, got, tt.want) + } + }) + } +} + +func TestEqualFold(t *testing.T) { + tests := []struct { + s, t string + want bool + }{ + {"abc", "abc", true}, + {"ABC", "abc", true}, + {"abc", "ABC", true}, + {"aBc", "AbC", true}, + {"abc", "abcd", false}, + {"", "", true}, + {"a", "", false}, + {"", "a", false}, + } + + for _, tt := range tests { + t.Run(tt.s+"_"+tt.t, func(t *testing.T) { + got := equalFold(tt.s, tt.t) + if got != tt.want { + t.Errorf("equalFold(%q, %q) = %v, want %v", tt.s, tt.t, got, tt.want) + } + }) + } +} + +func TestReloadStrategy_String(t *testing.T) { + if string(ReloadStrategyEnvVars) != "env-vars" { + t.Errorf("ReloadStrategyEnvVars = %q, want %q", ReloadStrategyEnvVars, "env-vars") + } + if string(ReloadStrategyAnnotations) != "annotations" { + t.Errorf("ReloadStrategyAnnotations = %q, want %q", ReloadStrategyAnnotations, "annotations") + } +} + +func TestArgoRolloutStrategy_String(t *testing.T) { + if string(ArgoRolloutStrategyRestart) != "restart" { + t.Errorf("ArgoRolloutStrategyRestart = %q, want %q", ArgoRolloutStrategyRestart, "restart") + } + if string(ArgoRolloutStrategyRollout) != "rollout" { + t.Errorf("ArgoRolloutStrategyRollout = %q, want %q", ArgoRolloutStrategyRollout, "rollout") + } +} diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go new file mode 100644 index 000000000..4ddcbaeb9 --- /dev/null +++ b/internal/pkg/config/flags_test.go @@ -0,0 +1,330 @@ +package config + +import ( + "testing" + + "github.com/spf13/pflag" +) + +func TestBindFlags(t *testing.T) { + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + // Verify flags are registered + expectedFlags := []string{ + "auto-reload-all", + "reload-strategy", + "is-Argo-Rollouts", + "reload-on-create", + "reload-on-delete", + "sync-after-restart", + "enable-ha", + "leader-election-id", + "leader-election-namespace", + "leader-election-lease-duration", + "leader-election-renew-deadline", + "leader-election-retry-period", + "leader-election-release-on-cancel", + "webhook-url", + "resources-to-ignore", + "ignored-workload-types", + "namespaces-to-ignore", + "namespace-selector", + "resource-label-selector", + "log-format", + "log-level", + "metrics-addr", + "health-addr", + "enable-pprof", + "pprof-addr", + "auto-annotation", + "configmap-auto-annotation", + "secret-auto-annotation", + "configmap-annotation", + "secret-annotation", + "auto-search-annotation", + "search-match-annotation", + "pause-deployment-annotation", + "pause-deployment-time-annotation", + "watch-namespace", + } + + for _, flagName := range expectedFlags { + if fs.Lookup(flagName) == nil { + t.Errorf("Expected flag %q to be registered", flagName) + } + } +} + +func TestBindFlags_DefaultValues(t *testing.T) { + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + // Parse empty args to use defaults + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + // Check default values are preserved + if cfg.ReloadStrategy != ReloadStrategyEnvVars { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) + } + + if cfg.LogLevel != "info" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "info") + } +} + +func TestBindFlags_CustomValues(t *testing.T) { + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + BindFlags(fs, cfg) + + args := []string{ + "--auto-reload-all=true", + "--reload-strategy=annotations", + "--log-level=debug", + "--log-format=json", + "--webhook-url=https://example.com/hook", + "--enable-ha=true", + "--enable-pprof=true", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if !cfg.AutoReloadAll { + t.Error("AutoReloadAll should be true") + } + + if cfg.ReloadStrategy != ReloadStrategyAnnotations { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyAnnotations) + } + + if cfg.LogLevel != "debug" { + t.Errorf("LogLevel = %q, want %q", cfg.LogLevel, "debug") + } + + if cfg.LogFormat != "json" { + t.Errorf("LogFormat = %q, want %q", cfg.LogFormat, "json") + } + + if cfg.WebhookURL != "https://example.com/hook" { + t.Errorf("WebhookURL = %q, want %q", cfg.WebhookURL, "https://example.com/hook") + } + + if !cfg.EnableHA { + t.Error("EnableHA should be true") + } + + if !cfg.EnablePProf { + t.Error("EnablePProf should be true") + } +} + +func TestApplyFlags_BooleanStrings(t *testing.T) { + tests := []struct { + name string + args []string + want bool + wantErr bool + }{ + {"true lowercase", []string{"--is-Argo-Rollouts=true"}, true, false}, + {"TRUE uppercase", []string{"--is-Argo-Rollouts=TRUE"}, true, false}, + {"1", []string{"--is-Argo-Rollouts=1"}, true, false}, + {"yes", []string{"--is-Argo-Rollouts=yes"}, true, false}, + {"false", []string{"--is-Argo-Rollouts=false"}, false, false}, + {"no", []string{"--is-Argo-Rollouts=no"}, false, false}, + {"0", []string{"--is-Argo-Rollouts=0"}, false, false}, + {"empty", []string{}, false, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset flag values + fv = flagValues{} + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if cfg.ArgoRolloutsEnabled != tt.want { + t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) + } + }) + } +} + +func TestApplyFlags_CommaSeparatedLists(t *testing.T) { + // Reset flag values + fv = flagValues{} + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--resources-to-ignore=configMaps,secrets", + "--ignored-workload-types=jobs,cronjobs", + "--namespaces-to-ignore=kube-system,kube-public", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + // Check ignored resources + if len(cfg.IgnoredResources) != 2 { + t.Errorf("IgnoredResources length = %d, want 2", len(cfg.IgnoredResources)) + } + if cfg.IgnoredResources[0] != "configMaps" || cfg.IgnoredResources[1] != "secrets" { + t.Errorf("IgnoredResources = %v", cfg.IgnoredResources) + } + + // Check ignored workloads + if len(cfg.IgnoredWorkloads) != 2 { + t.Errorf("IgnoredWorkloads length = %d, want 2", len(cfg.IgnoredWorkloads)) + } + + // Check ignored namespaces + if len(cfg.IgnoredNamespaces) != 2 { + t.Errorf("IgnoredNamespaces length = %d, want 2", len(cfg.IgnoredNamespaces)) + } +} + +func TestApplyFlags_Selectors(t *testing.T) { + // Reset flag values + fv = flagValues{} + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--namespace-selector=env=production,team=platform", + "--resource-label-selector=app=myapp", + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if len(cfg.NamespaceSelectors) != 2 { + t.Errorf("NamespaceSelectors length = %d, want 2", len(cfg.NamespaceSelectors)) + } + + if len(cfg.ResourceSelectors) != 1 { + t.Errorf("ResourceSelectors length = %d, want 1", len(cfg.ResourceSelectors)) + } + + // Check string versions are preserved + if len(cfg.NamespaceSelectorStrings) != 2 { + t.Errorf("NamespaceSelectorStrings length = %d, want 2", len(cfg.NamespaceSelectorStrings)) + } +} + +func TestApplyFlags_InvalidSelector(t *testing.T) { + // Reset flag values + fv = flagValues{} + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + args := []string{ + "--namespace-selector=env in (prod,staging", // missing closing paren + } + + if err := fs.Parse(args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if err == nil { + t.Error("ApplyFlags() should return error for invalid selector") + } +} + +func TestParseBoolString(t *testing.T) { + tests := []struct { + input string + want bool + }{ + {"true", true}, + {"TRUE", true}, + {"True", true}, + {" true ", true}, + {"1", true}, + {"yes", true}, + {"YES", true}, + {"false", false}, + {"FALSE", false}, + {"0", false}, + {"no", false}, + {"", false}, + {"invalid", false}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := parseBoolString(tt.input) + if got != tt.want { + t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +func TestSplitAndTrim(t *testing.T) { + tests := []struct { + name string + input string + want []string + }{ + {"empty string", "", nil}, + {"single value", "abc", []string{"abc"}}, + {"multiple values", "a,b,c", []string{"a", "b", "c"}}, + {"with spaces", " a , b , c ", []string{"a", "b", "c"}}, + {"empty elements", "a,,b", []string{"a", "b"}}, + {"only commas", ",,,", []string{}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := splitAndTrim(tt.input) + if len(got) != len(tt.want) { + t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + } + } + }) + } +} diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go new file mode 100644 index 000000000..2972333c1 --- /dev/null +++ b/internal/pkg/config/validation_test.go @@ -0,0 +1,320 @@ +package config + +import ( + "strings" + "testing" +) + +func TestConfig_Validate_ReloadStrategy(t *testing.T) { + tests := []struct { + name string + strategy ReloadStrategy + wantErr bool + wantVal ReloadStrategy + }{ + {"valid env-vars", ReloadStrategyEnvVars, false, ReloadStrategyEnvVars}, + {"valid annotations", ReloadStrategyAnnotations, false, ReloadStrategyAnnotations}, + {"empty defaults to env-vars", "", false, ReloadStrategyEnvVars}, + {"invalid strategy", "invalid", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ReloadStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return + } + + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } + + if cfg.ReloadStrategy != tt.wantVal { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, tt.wantVal) + } + }) + } +} + +func TestConfig_Validate_ArgoRolloutStrategy(t *testing.T) { + tests := []struct { + name string + strategy ArgoRolloutStrategy + wantErr bool + wantVal ArgoRolloutStrategy + }{ + {"valid restart", ArgoRolloutStrategyRestart, false, ArgoRolloutStrategyRestart}, + {"valid rollout", ArgoRolloutStrategyRollout, false, ArgoRolloutStrategyRollout}, + {"empty defaults to rollout", "", false, ArgoRolloutStrategyRollout}, + {"invalid strategy", "invalid", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ArgoRolloutStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return + } + + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } + + if cfg.ArgoRolloutStrategy != tt.wantVal { + t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, tt.wantVal) + } + }) + } +} + +func TestConfig_Validate_LogLevel(t *testing.T) { + validLevels := []string{"trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", ""} + for _, level := range validLevels { + t.Run("valid_"+level, func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = level + if err := cfg.Validate(); err != nil { + t.Errorf("Validate() error for level %q: %v", level, err) + } + }) + } + + t.Run("invalid level", func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = "invalid" + err := cfg.Validate() + if err == nil { + t.Error("Validate() should return error for invalid log level") + } + }) +} + +func TestConfig_Validate_LogFormat(t *testing.T) { + tests := []struct { + name string + format string + wantErr bool + }{ + {"json format", "json", false}, + {"empty format", "", false}, + {"invalid format", "xml", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.LogFormat = tt.format + err := cfg.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestConfig_Validate_NormalizesIgnoredResources(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredResources = []string{"ConfigMaps", "SECRETS", " spaces "} + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() error = %v", err) + } + + expected := []string{"configmaps", "secrets", "spaces"} + if len(cfg.IgnoredResources) != len(expected) { + t.Fatalf("IgnoredResources length = %d, want %d", len(cfg.IgnoredResources), len(expected)) + } + + for i, got := range cfg.IgnoredResources { + if got != expected[i] { + t.Errorf("IgnoredResources[%d] = %q, want %q", i, got, expected[i]) + } + } +} + +func TestConfig_Validate_NormalizesIgnoredWorkloads(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"Jobs", "CRONJOBS", ""} + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() error = %v", err) + } + + expected := []string{"jobs", "cronjobs"} + if len(cfg.IgnoredWorkloads) != len(expected) { + t.Fatalf("IgnoredWorkloads length = %d, want %d", len(cfg.IgnoredWorkloads), len(expected)) + } + + for i, got := range cfg.IgnoredWorkloads { + if got != expected[i] { + t.Errorf("IgnoredWorkloads[%d] = %q, want %q", i, got, expected[i]) + } + } +} + +func TestConfig_Validate_MultipleErrors(t *testing.T) { + cfg := NewDefault() + cfg.ReloadStrategy = "invalid" + cfg.ArgoRolloutStrategy = "invalid" + cfg.LogLevel = "invalid" + cfg.LogFormat = "invalid" + + err := cfg.Validate() + if err == nil { + t.Fatal("Validate() should return error for multiple invalid values") + } + + errs, ok := err.(ValidationErrors) + if !ok { + t.Fatalf("Expected ValidationErrors, got %T", err) + } + + if len(errs) != 4 { + t.Errorf("Expected 4 errors, got %d: %v", len(errs), errs) + } +} + +func TestValidationError_Error(t *testing.T) { + err := ValidationError{ + Field: "TestField", + Message: "test message", + } + + expected := "config.TestField: test message" + if err.Error() != expected { + t.Errorf("Error() = %q, want %q", err.Error(), expected) + } +} + +func TestValidationErrors_Error(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var errs ValidationErrors + if errs.Error() != "" { + t.Errorf("Empty errors should return empty string, got %q", errs.Error()) + } + }) + + t.Run("single error", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + } + if !strings.Contains(errs.Error(), "Field1") { + t.Errorf("Error() should contain field name, got %q", errs.Error()) + } + }) + + t.Run("multiple errors", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + {Field: "Field2", Message: "error2"}, + } + errStr := errs.Error() + if !strings.Contains(errStr, "multiple configuration errors") { + t.Errorf("Error() should mention multiple errors, got %q", errStr) + } + if !strings.Contains(errStr, "Field1") || !strings.Contains(errStr, "Field2") { + t.Errorf("Error() should contain all field names, got %q", errStr) + } + }) +} + +func TestParseSelectors(t *testing.T) { + tests := []struct { + name string + selectors []string + wantLen int + wantErr bool + }{ + {"nil input", nil, 0, false}, + {"empty input", []string{}, 0, false}, + {"single valid selector", []string{"env=production"}, 1, false}, + {"multiple valid selectors", []string{"env=production", "team=platform"}, 2, false}, + {"selector with whitespace", []string{" env=production "}, 1, false}, + {"empty string in list", []string{"env=production", "", "team=platform"}, 2, false}, + {"invalid selector syntax", []string{"env in (prod,staging"}, 0, true}, // missing closing paren + {"set-based selector", []string{"env in (prod,staging)"}, 1, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + selectors, err := ParseSelectors(tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("ParseSelectors() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(selectors) != tt.wantLen { + t.Errorf("ParseSelectors() returned %d selectors, want %d", len(selectors), tt.wantLen) + } + }) + } +} + +func TestMustParseSelectors(t *testing.T) { + t.Run("valid selectors", func(t *testing.T) { + selectors := MustParseSelectors([]string{"env=production"}) + if len(selectors) != 1 { + t.Errorf("MustParseSelectors() returned %d selectors, want 1", len(selectors)) + } + }) + + t.Run("panics on invalid", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Error("MustParseSelectors() should panic on invalid selector") + } + }() + MustParseSelectors([]string{"env in (prod,staging"}) // missing closing paren + }) +} + +func TestNormalizeToLower(t *testing.T) { + tests := []struct { + name string + input []string + want []string + }{ + {"nil input", nil, nil}, + {"empty input", []string{}, []string{}}, + {"lowercase", []string{"abc"}, []string{"abc"}}, + {"uppercase", []string{"ABC"}, []string{"abc"}}, + {"mixed case", []string{"AbC"}, []string{"abc"}}, + {"with whitespace", []string{" abc "}, []string{"abc"}}, + {"removes empty", []string{"abc", "", "def"}, []string{"abc", "def"}}, + {"only whitespace", []string{" "}, []string{}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := normalizeToLower(tt.input) + if tt.want == nil && got != nil { + t.Errorf("normalizeToLower() = %v, want nil", got) + return + } + if len(got) != len(tt.want) { + t.Errorf("normalizeToLower() length = %d, want %d", len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("normalizeToLower()[%d] = %q, want %q", i, got[i], tt.want[i]) + } + } + }) + } +} diff --git a/internal/pkg/controller/configmap_reconciler_test.go b/internal/pkg/controller/configmap_reconciler_test.go new file mode 100644 index 000000000..cd4e8d35c --- /dev/null +++ b/internal/pkg/controller/configmap_reconciler_test.go @@ -0,0 +1,844 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func newTestScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = batchv1.AddToScheme(scheme) + return scheme +} + +func newTestConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { + scheme := newTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objects...). + Build() + + collectors := metrics.NewCollectors() + + return &controller.ConfigMapReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + ReloadService: reload.NewService(cfg), + Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Collectors: &collectors, + EventRecorder: events.NewRecorder(nil), + WebhookClient: webhook.NewClient("", testr.New(t)), + Alerter: &alerting.NoOpAlerter{}, + } +} + +func TestConfigMapReconciler_NotFound(t *testing.T) { + cfg := config.NewDefault() + reconciler := newTestConfigMapReconciler(t, cfg) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue for NotFound") + } +} + +func TestConfigMapReconciler_NotFound_ReloadOnDelete(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "deleted-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "deleted-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_IgnoredNamespace(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + Data: map[string]string{"key": "value"}, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "kube-system", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue for ignored namespace") + } +} + +func TestConfigMapReconciler_NoMatchingWorkloads(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + }, + }}, + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_WorkloadInDifferentNamespace(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "namespace-a", + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "namespace-b", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "namespace-a", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_IgnoredWorkloadType(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredWorkloads = []string{"deployment"} + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_DaemonSet(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + daemonset := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, daemonset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_StatefulSet(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, statefulset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shared-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + } + + deployment1 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-1", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test1"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test1"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + deployment2 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-2", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test2"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test2"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + daemonset := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daemonset-1", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "daemon"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "daemon"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment1, deployment2, daemonset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "shared-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_VolumeMount(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-cm", + Namespace: "default", + }, + Data: map[string]string{"config.yaml": "key: value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{{ + Name: "config", + MountPath: "/etc/config", + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "volume-cm", + }, + }, + }, + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "volume-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_ProjectedVolume(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "projected-cm", + Namespace: "default", + }, + Data: map[string]string{"config.yaml": "key: value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{{ + Name: "config", + MountPath: "/etc/config", + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "projected-cm", + }, + }, + }}, + }, + }, + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "projected-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestConfigMapReconciler_SearchAnnotation(t *testing.T) { + cfg := config.NewDefault() + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.Match: "true", + }, + }, + Data: map[string]string{"key": "value"}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.Search: "true", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-cm", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} diff --git a/internal/pkg/controller/secret_reconciler_test.go b/internal/pkg/controller/secret_reconciler_test.go new file mode 100644 index 000000000..155324aa2 --- /dev/null +++ b/internal/pkg/controller/secret_reconciler_test.go @@ -0,0 +1,1017 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func newTestSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { + scheme := newTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objects...). + Build() + + collectors := metrics.NewCollectors() + + return &controller.SecretReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + ReloadService: reload.NewService(cfg), + Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Collectors: &collectors, + EventRecorder: events.NewRecorder(nil), + WebhookClient: webhook.NewClient("", testr.New(t)), + Alerter: &alerting.NoOpAlerter{}, + } +} + +func TestSecretReconciler_NotFound(t *testing.T) { + cfg := config.NewDefault() + reconciler := newTestSecretReconciler(t, cfg) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue for NotFound") + } +} + +func TestSecretReconciler_NotFound_ReloadOnDelete(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnDelete = true + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "deleted-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "deleted-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_IgnoredNamespace(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "kube-system", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + reconciler := newTestSecretReconciler(t, cfg, secret) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "kube-system", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue for ignored namespace") + } +} + +func TestSecretReconciler_NoMatchingWorkloads(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"password": []byte("secret123")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + EnvFrom: []corev1.EnvFromSource{{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-secret", + }, + }, + }}, + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"password": []byte("secret123")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_WorkloadInDifferentNamespace(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "namespace-a", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "namespace-b", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "namespace-a", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_IgnoredWorkloadType(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredWorkloads = []string{"deployment"} + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_DaemonSet(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + daemonset := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, daemonset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_StatefulSet(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, statefulset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_VolumeMount(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-secret", + Namespace: "default", + }, + Data: map[string][]byte{"credentials": []byte("supersecret")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{{ + Name: "secrets", + MountPath: "/etc/secrets", + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "volume-secret", + }, + }, + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "volume-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_ProjectedVolume(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "projected-secret", + Namespace: "default", + }, + Data: map[string][]byte{"credentials": []byte("supersecret")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{{ + Name: "secrets", + MountPath: "/etc/secrets", + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "secrets", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "projected-secret", + }, + }, + }}, + }, + }, + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "projected-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_EnvKeyRef(t *testing.T) { + cfg := config.NewDefault() + cfg.AutoReloadAll = true + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "key-ref-secret", + Namespace: "default", + }, + Data: map[string][]byte{"password": []byte("secret123")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + Env: []corev1.EnvVar{{ + Name: "DB_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "key-ref-secret", + }, + Key: "password", + }, + }, + }}, + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "key-ref-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_MultipleWorkloads(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shared-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + deployment1 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-1", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test1"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test1"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + deployment2 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-2", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test2"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test2"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "statefulset-1", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "stateful"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "stateful"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment1, deployment2, statefulset) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "shared-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_SearchAnnotation(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.Match: "true", + }, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.Search: "true", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_TLSSecret(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-secret", + Namespace: "default", + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.crt": []byte("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----"), + "tls.key": []byte("-----BEGIN RSA PRIVATE KEY-----\ntest\n-----END RSA PRIVATE KEY-----"), + }, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "tls-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "tls-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +func TestSecretReconciler_ImagePullSecret(t *testing.T) { + cfg := config.NewDefault() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "registry-secret", + Namespace: "default", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(`{"auths":{}}`), + }, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + Annotations: map[string]string{ + cfg.Annotations.SecretReload: "registry-secret", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "registry-secret", + }}, + }, + }, + }, + } + + reconciler := newTestSecretReconciler(t, cfg, secret, deployment) + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "registry-secret", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} From 3defc8bb297d7fa7255e56b22868c6cdb435f836 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:55 +0100 Subject: [PATCH 12/35] refactor: Move all common reconcile logic to lister and reload handler --- internal/pkg/alerting/alerter_test.go | 400 ++++++++---------- internal/pkg/config/config_test.go | 86 ++-- internal/pkg/config/validation.go | 52 +-- internal/pkg/config/validation_test.go | 266 ++++++------ .../pkg/controller/configmap_reconciler.go | 374 ++-------------- internal/pkg/controller/handler.go | 187 ++++++++ internal/pkg/controller/secret_reconciler.go | 374 ++-------------- internal/pkg/reload/service.go | 11 + internal/pkg/workload/lister.go | 111 +++++ 9 files changed, 761 insertions(+), 1100 deletions(-) create mode 100644 internal/pkg/controller/handler.go create mode 100644 internal/pkg/workload/lister.go diff --git a/internal/pkg/alerting/alerter_test.go b/internal/pkg/alerting/alerter_test.go index 6e5724f4d..74cc95e61 100644 --- a/internal/pkg/alerting/alerter_test.go +++ b/internal/pkg/alerting/alerter_test.go @@ -3,6 +3,7 @@ package alerting import ( "context" "encoding/json" + "io" "net/http" "net/http/httptest" "testing" @@ -11,97 +12,11 @@ import ( "github.com/stakater/Reloader/internal/pkg/config" ) -func TestNewAlerter_Disabled(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = false - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*NoOpAlerter); !ok { - t.Error("Expected NoOpAlerter when alerting is disabled") - } -} - -func TestNewAlerter_NoWebhookURL(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "" - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*NoOpAlerter); !ok { - t.Error("Expected NoOpAlerter when webhook URL is empty") - } -} - -func TestNewAlerter_Slack(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "http://example.com/webhook" - cfg.Alerting.Sink = "slack" - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*SlackAlerter); !ok { - t.Error("Expected SlackAlerter for sink=slack") - } -} - -func TestNewAlerter_Teams(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "http://example.com/webhook" - cfg.Alerting.Sink = "teams" - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*TeamsAlerter); !ok { - t.Error("Expected TeamsAlerter for sink=teams") - } -} - -func TestNewAlerter_GChat(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "http://example.com/webhook" - cfg.Alerting.Sink = "gchat" - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*GChatAlerter); !ok { - t.Error("Expected GChatAlerter for sink=gchat") - } -} - -func TestNewAlerter_Raw(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "http://example.com/webhook" - cfg.Alerting.Sink = "raw" - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*RawAlerter); !ok { - t.Error("Expected RawAlerter for sink=raw") - } -} - -func TestNewAlerter_DefaultIsRaw(t *testing.T) { - cfg := config.NewDefault() - cfg.Alerting.Enabled = true - cfg.Alerting.WebhookURL = "http://example.com/webhook" - cfg.Alerting.Sink = "" // Empty sink should default to raw - - alerter := NewAlerter(cfg) - if _, ok := alerter.(*RawAlerter); !ok { - t.Error("Expected RawAlerter for empty sink") - } -} - -func TestNoOpAlerter_Send(t *testing.T) { - alerter := &NoOpAlerter{} - err := alerter.Send(context.Background(), AlertMessage{}) - if err != nil { - t.Errorf("NoOpAlerter.Send() error = %v, want nil", err) - } -} - -func TestSlackAlerter_Send(t *testing.T) { - var receivedBody []byte +// testServer creates a test HTTP server that captures the request body. +// Returns the server and a function to retrieve the captured body. +func testServer(t *testing.T) (*httptest.Server, func() []byte) { + t.Helper() + var body []byte server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Expected POST request, got %s", r.Method) @@ -109,49 +24,15 @@ func TestSlackAlerter_Send(t *testing.T) { if r.Header.Get("Content-Type") != "application/json" { t.Errorf("Expected Content-Type application/json, got %s", r.Header.Get("Content-Type")) } - receivedBody = make([]byte, r.ContentLength) - r.Body.Read(receivedBody) + body, _ = io.ReadAll(r.Body) w.WriteHeader(http.StatusOK) })) - defer server.Close() - - alerter := NewSlackAlerter(server.URL, "", "Test Cluster") - msg := AlertMessage{ - WorkloadKind: "Deployment", - WorkloadName: "nginx", - WorkloadNamespace: "default", - ResourceKind: "ConfigMap", - ResourceName: "nginx-config", - ResourceNamespace: "default", - Timestamp: time.Now(), - } - - err := alerter.Send(context.Background(), msg) - if err != nil { - t.Fatalf("SlackAlerter.Send() error = %v", err) - } - - var slackMsg slackMessage - if err := json.Unmarshal(receivedBody, &slackMsg); err != nil { - t.Fatalf("Failed to unmarshal slack message: %v", err) - } - - if slackMsg.Text == "" { - t.Error("Expected non-empty text in slack message") - } + return server, func() []byte { return body } } -func TestTeamsAlerter_Send(t *testing.T) { - var receivedBody []byte - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedBody = make([]byte, r.ContentLength) - r.Body.Read(receivedBody) - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - alerter := NewTeamsAlerter(server.URL, "", "") - msg := AlertMessage{ +// testAlertMessage returns a standard AlertMessage for testing. +func testAlertMessage() AlertMessage { + return AlertMessage{ WorkloadKind: "Deployment", WorkloadName: "nginx", WorkloadNamespace: "default", @@ -160,108 +41,201 @@ func TestTeamsAlerter_Send(t *testing.T) { ResourceNamespace: "default", Timestamp: time.Now(), } - - err := alerter.Send(context.Background(), msg) - if err != nil { - t.Fatalf("TeamsAlerter.Send() error = %v", err) - } - - var teamsMsg teamsMessage - if err := json.Unmarshal(receivedBody, &teamsMsg); err != nil { - t.Fatalf("Failed to unmarshal teams message: %v", err) - } - - if teamsMsg.Type != "MessageCard" { - t.Errorf("Expected @type=MessageCard, got %s", teamsMsg.Type) - } } -func TestGChatAlerter_Send(t *testing.T) { - var receivedBody []byte - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedBody = make([]byte, r.ContentLength) - r.Body.Read(receivedBody) - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - alerter := NewGChatAlerter(server.URL, "", "") - msg := AlertMessage{ - WorkloadKind: "Deployment", - WorkloadName: "nginx", - WorkloadNamespace: "default", - ResourceKind: "ConfigMap", - ResourceName: "nginx-config", - ResourceNamespace: "default", - Timestamp: time.Now(), - } - - err := alerter.Send(context.Background(), msg) - if err != nil { - t.Fatalf("GChatAlerter.Send() error = %v", err) - } - - var gchatMsg gchatMessage - if err := json.Unmarshal(receivedBody, &gchatMsg); err != nil { - t.Fatalf("Failed to unmarshal gchat message: %v", err) - } - - if len(gchatMsg.Cards) != 1 { - t.Errorf("Expected 1 card, got %d", len(gchatMsg.Cards)) +func TestNewAlerter(t *testing.T) { + tests := []struct { + name string + setup func(*config.Config) + wantType string + }{ + { + name: "disabled", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = false + }, + wantType: "*alerting.NoOpAlerter", + }, + { + name: "no webhook URL", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "" + }, + wantType: "*alerting.NoOpAlerter", + }, + { + name: "slack", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "slack" + }, + wantType: "*alerting.SlackAlerter", + }, + { + name: "teams", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "teams" + }, + wantType: "*alerting.TeamsAlerter", + }, + { + name: "gchat", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "gchat" + }, + wantType: "*alerting.GChatAlerter", + }, + { + name: "raw", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "raw" + }, + wantType: "*alerting.RawAlerter", + }, + { + name: "empty sink defaults to raw", + setup: func(cfg *config.Config) { + cfg.Alerting.Enabled = true + cfg.Alerting.WebhookURL = "http://example.com/webhook" + cfg.Alerting.Sink = "" + }, + wantType: "*alerting.RawAlerter", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + tt.setup(cfg) + alerter := NewAlerter(cfg) + gotType := getTypeName(alerter) + if gotType != tt.wantType { + t.Errorf("NewAlerter() type = %s, want %s", gotType, tt.wantType) + } + }) } } -func TestRawAlerter_Send(t *testing.T) { - var receivedBody []byte - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedBody = make([]byte, r.ContentLength) - r.Body.Read(receivedBody) - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - alerter := NewRawAlerter(server.URL, "", "custom-info") - msg := AlertMessage{ - WorkloadKind: "Deployment", - WorkloadName: "nginx", - WorkloadNamespace: "default", - ResourceKind: "ConfigMap", - ResourceName: "nginx-config", - ResourceNamespace: "default", - Timestamp: time.Now(), - } - - err := alerter.Send(context.Background(), msg) - if err != nil { - t.Fatalf("RawAlerter.Send() error = %v", err) +func getTypeName(a Alerter) string { + switch a.(type) { + case *NoOpAlerter: + return "*alerting.NoOpAlerter" + case *SlackAlerter: + return "*alerting.SlackAlerter" + case *TeamsAlerter: + return "*alerting.TeamsAlerter" + case *GChatAlerter: + return "*alerting.GChatAlerter" + case *RawAlerter: + return "*alerting.RawAlerter" + default: + return "unknown" } +} - var rawMsg rawMessage - if err := json.Unmarshal(receivedBody, &rawMsg); err != nil { - t.Fatalf("Failed to unmarshal raw message: %v", err) +func TestNoOpAlerter_Send(t *testing.T) { + alerter := &NoOpAlerter{} + if err := alerter.Send(context.Background(), AlertMessage{}); err != nil { + t.Errorf("NoOpAlerter.Send() error = %v, want nil", err) } +} - if rawMsg.Event != "reload" { - t.Errorf("Expected event=reload, got %s", rawMsg.Event) - } - if rawMsg.WorkloadName != "nginx" { - t.Errorf("Expected workloadName=nginx, got %s", rawMsg.WorkloadName) - } - if rawMsg.Additional != "custom-info" { - t.Errorf("Expected additional=custom-info, got %s", rawMsg.Additional) +func TestAlerter_Send(t *testing.T) { + tests := []struct { + name string + newAlert func(url string) Alerter + validate func(t *testing.T, body []byte) + }{ + { + name: "slack", + newAlert: func(url string) Alerter { return NewSlackAlerter(url, "", "Test Cluster") }, + validate: func(t *testing.T, body []byte) { + var msg slackMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if msg.Text == "" { + t.Error("Expected non-empty text") + } + }, + }, + { + name: "teams", + newAlert: func(url string) Alerter { return NewTeamsAlerter(url, "", "") }, + validate: func(t *testing.T, body []byte) { + var msg teamsMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if msg.Type != "MessageCard" { + t.Errorf("@type = %s, want MessageCard", msg.Type) + } + }, + }, + { + name: "gchat", + newAlert: func(url string) Alerter { return NewGChatAlerter(url, "", "") }, + validate: func(t *testing.T, body []byte) { + var msg gchatMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if len(msg.Cards) != 1 { + t.Errorf("cards = %d, want 1", len(msg.Cards)) + } + }, + }, + { + name: "raw", + newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info") }, + validate: func(t *testing.T, body []byte) { + var msg rawMessage + if err := json.Unmarshal(body, &msg); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + if msg.Event != "reload" { + t.Errorf("event = %s, want reload", msg.Event) + } + if msg.WorkloadName != "nginx" { + t.Errorf("workloadName = %s, want nginx", msg.WorkloadName) + } + if msg.Additional != "custom-info" { + t.Errorf("additional = %s, want custom-info", msg.Additional) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server, getBody := testServer(t) + defer server.Close() + + alerter := tt.newAlert(server.URL) + if err := alerter.Send(context.Background(), testAlertMessage()); err != nil { + t.Fatalf("Send() error = %v", err) + } + tt.validate(t, getBody()) + }) } } func TestAlerter_WebhookError(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("Internal Server Error")) })) defer server.Close() alerter := NewRawAlerter(server.URL, "", "") - err := alerter.Send(context.Background(), AlertMessage{}) - if err == nil { + if err := alerter.Send(context.Background(), AlertMessage{}); err == nil { t.Error("Expected error for non-2xx response") } } diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index d9f740a7a..38b7ab538 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -84,11 +84,13 @@ func TestDefaultAnnotations(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.got != tt.want { - t.Errorf("%s = %q, want %q", tt.name, tt.got, tt.want) - } - }) + t.Run( + tt.name, func(t *testing.T) { + if tt.got != tt.want { + t.Errorf("%s = %q, want %q", tt.name, tt.got, tt.want) + } + }, + ) } } @@ -134,12 +136,14 @@ func TestConfig_IsResourceIgnored(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := cfg.IsResourceIgnored(tt.resource) - if got != tt.want { - t.Errorf("IsResourceIgnored(%q) = %v, want %v", tt.resource, got, tt.want) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsResourceIgnored(tt.resource) + if got != tt.want { + t.Errorf("IsResourceIgnored(%q) = %v, want %v", tt.resource, got, tt.want) + } + }, + ) } } @@ -159,12 +163,14 @@ func TestConfig_IsWorkloadIgnored(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := cfg.IsWorkloadIgnored(tt.workload) - if got != tt.want { - t.Errorf("IsWorkloadIgnored(%q) = %v, want %v", tt.workload, got, tt.want) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsWorkloadIgnored(tt.workload) + if got != tt.want { + t.Errorf("IsWorkloadIgnored(%q) = %v, want %v", tt.workload, got, tt.want) + } + }, + ) } } @@ -184,12 +190,14 @@ func TestConfig_IsNamespaceIgnored(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := cfg.IsNamespaceIgnored(tt.namespace) - if got != tt.want { - t.Errorf("IsNamespaceIgnored(%q) = %v, want %v", tt.namespace, got, tt.want) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := cfg.IsNamespaceIgnored(tt.namespace) + if got != tt.want { + t.Errorf("IsNamespaceIgnored(%q) = %v, want %v", tt.namespace, got, tt.want) + } + }, + ) } } @@ -209,29 +217,13 @@ func TestEqualFold(t *testing.T) { } for _, tt := range tests { - t.Run(tt.s+"_"+tt.t, func(t *testing.T) { - got := equalFold(tt.s, tt.t) - if got != tt.want { - t.Errorf("equalFold(%q, %q) = %v, want %v", tt.s, tt.t, got, tt.want) - } - }) - } -} - -func TestReloadStrategy_String(t *testing.T) { - if string(ReloadStrategyEnvVars) != "env-vars" { - t.Errorf("ReloadStrategyEnvVars = %q, want %q", ReloadStrategyEnvVars, "env-vars") - } - if string(ReloadStrategyAnnotations) != "annotations" { - t.Errorf("ReloadStrategyAnnotations = %q, want %q", ReloadStrategyAnnotations, "annotations") - } -} - -func TestArgoRolloutStrategy_String(t *testing.T) { - if string(ArgoRolloutStrategyRestart) != "restart" { - t.Errorf("ArgoRolloutStrategyRestart = %q, want %q", ArgoRolloutStrategyRestart, "restart") - } - if string(ArgoRolloutStrategyRollout) != "rollout" { - t.Errorf("ArgoRolloutStrategyRollout = %q, want %q", ArgoRolloutStrategyRollout, "rollout") + t.Run( + tt.s+"_"+tt.t, func(t *testing.T) { + got := equalFold(tt.s, tt.t) + if got != tt.want { + t.Errorf("equalFold(%q, %q) = %v, want %v", tt.s, tt.t, got, tt.want) + } + }, + ) } } diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 8a3bbfe54..0ebce4004 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -48,10 +48,12 @@ func (c *Config) Validate() error { case "": c.ReloadStrategy = ReloadStrategyEnvVars default: - errs = append(errs, ValidationError{ - Field: "ReloadStrategy", - Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ReloadStrategy, ReloadStrategyEnvVars, ReloadStrategyAnnotations), - }) + errs = append( + errs, ValidationError{ + Field: "ReloadStrategy", + Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ReloadStrategy, ReloadStrategyEnvVars, ReloadStrategyAnnotations), + }, + ) } // Validate ArgoRolloutStrategy @@ -61,10 +63,14 @@ func (c *Config) Validate() error { case "": c.ArgoRolloutStrategy = ArgoRolloutStrategyRollout default: - errs = append(errs, ValidationError{ - Field: "ArgoRolloutStrategy", - Message: fmt.Sprintf("invalid value %q, must be %q or %q", c.ArgoRolloutStrategy, ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout), - }) + errs = append( + errs, ValidationError{ + Field: "ArgoRolloutStrategy", + Message: fmt.Sprintf( + "invalid value %q, must be %q or %q", c.ArgoRolloutStrategy, ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout, + ), + }, + ) } // Validate LogLevel @@ -72,10 +78,12 @@ func (c *Config) Validate() error { case "trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", "": // valid default: - errs = append(errs, ValidationError{ - Field: "LogLevel", - Message: fmt.Sprintf("invalid log level %q", c.LogLevel), - }) + errs = append( + errs, ValidationError{ + Field: "LogLevel", + Message: fmt.Sprintf("invalid log level %q", c.LogLevel), + }, + ) } // Validate LogFormat @@ -83,10 +91,12 @@ func (c *Config) Validate() error { case "json", "": // valid default: - errs = append(errs, ValidationError{ - Field: "LogFormat", - Message: fmt.Sprintf("invalid log format %q, must be \"json\" or empty", c.LogFormat), - }) + errs = append( + errs, ValidationError{ + Field: "LogFormat", + Message: fmt.Sprintf("invalid log format %q, must be \"json\" or empty", c.LogFormat), + }, + ) } // Normalize IgnoredResources to lowercase for consistent comparison @@ -136,13 +146,3 @@ func ParseSelectors(selectorStrings []string) ([]labels.Selector, error) { } return selectors, nil } - -// MustParseSelectors parses selectors and panics on error. -// Use only when selectors are known to be valid (e.g., from validated config). -func MustParseSelectors(selectorStrings []string) []labels.Selector { - selectors, err := ParseSelectors(selectorStrings) - if err != nil { - panic(err) - } - return selectors -} diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go index 2972333c1..54e0a47d7 100644 --- a/internal/pkg/config/validation_test.go +++ b/internal/pkg/config/validation_test.go @@ -19,28 +19,30 @@ func TestConfig_Validate_ReloadStrategy(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := NewDefault() - cfg.ReloadStrategy = tt.strategy - - err := cfg.Validate() - - if tt.wantErr { - if err == nil { - t.Error("Validate() should return error for invalid strategy") + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ReloadStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return } - return - } - if err != nil { - t.Errorf("Validate() error = %v", err) - return - } + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } - if cfg.ReloadStrategy != tt.wantVal { - t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, tt.wantVal) - } - }) + if cfg.ReloadStrategy != tt.wantVal { + t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, tt.wantVal) + } + }, + ) } } @@ -58,51 +60,57 @@ func TestConfig_Validate_ArgoRolloutStrategy(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := NewDefault() - cfg.ArgoRolloutStrategy = tt.strategy - - err := cfg.Validate() - - if tt.wantErr { - if err == nil { - t.Error("Validate() should return error for invalid strategy") + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.ArgoRolloutStrategy = tt.strategy + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Error("Validate() should return error for invalid strategy") + } + return } - return - } - if err != nil { - t.Errorf("Validate() error = %v", err) - return - } + if err != nil { + t.Errorf("Validate() error = %v", err) + return + } - if cfg.ArgoRolloutStrategy != tt.wantVal { - t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, tt.wantVal) - } - }) + if cfg.ArgoRolloutStrategy != tt.wantVal { + t.Errorf("ArgoRolloutStrategy = %v, want %v", cfg.ArgoRolloutStrategy, tt.wantVal) + } + }, + ) } } func TestConfig_Validate_LogLevel(t *testing.T) { validLevels := []string{"trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", ""} for _, level := range validLevels { - t.Run("valid_"+level, func(t *testing.T) { - cfg := NewDefault() - cfg.LogLevel = level - if err := cfg.Validate(); err != nil { - t.Errorf("Validate() error for level %q: %v", level, err) - } - }) + t.Run( + "valid_"+level, func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = level + if err := cfg.Validate(); err != nil { + t.Errorf("Validate() error for level %q: %v", level, err) + } + }, + ) } - t.Run("invalid level", func(t *testing.T) { - cfg := NewDefault() - cfg.LogLevel = "invalid" - err := cfg.Validate() - if err == nil { - t.Error("Validate() should return error for invalid log level") - } - }) + t.Run( + "invalid level", func(t *testing.T) { + cfg := NewDefault() + cfg.LogLevel = "invalid" + err := cfg.Validate() + if err == nil { + t.Error("Validate() should return error for invalid log level") + } + }, + ) } func TestConfig_Validate_LogFormat(t *testing.T) { @@ -117,14 +125,16 @@ func TestConfig_Validate_LogFormat(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := NewDefault() - cfg.LogFormat = tt.format - err := cfg.Validate() - if (err != nil) != tt.wantErr { - t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := NewDefault() + cfg.LogFormat = tt.format + err := cfg.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }, + ) } } @@ -203,35 +213,41 @@ func TestValidationError_Error(t *testing.T) { } func TestValidationErrors_Error(t *testing.T) { - t.Run("empty", func(t *testing.T) { - var errs ValidationErrors - if errs.Error() != "" { - t.Errorf("Empty errors should return empty string, got %q", errs.Error()) - } - }) - - t.Run("single error", func(t *testing.T) { - errs := ValidationErrors{ - {Field: "Field1", Message: "error1"}, - } - if !strings.Contains(errs.Error(), "Field1") { - t.Errorf("Error() should contain field name, got %q", errs.Error()) - } - }) + t.Run( + "empty", func(t *testing.T) { + var errs ValidationErrors + if errs.Error() != "" { + t.Errorf("Empty errors should return empty string, got %q", errs.Error()) + } + }, + ) - t.Run("multiple errors", func(t *testing.T) { - errs := ValidationErrors{ - {Field: "Field1", Message: "error1"}, - {Field: "Field2", Message: "error2"}, - } - errStr := errs.Error() - if !strings.Contains(errStr, "multiple configuration errors") { - t.Errorf("Error() should mention multiple errors, got %q", errStr) - } - if !strings.Contains(errStr, "Field1") || !strings.Contains(errStr, "Field2") { - t.Errorf("Error() should contain all field names, got %q", errStr) - } - }) + t.Run( + "single error", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + } + if !strings.Contains(errs.Error(), "Field1") { + t.Errorf("Error() should contain field name, got %q", errs.Error()) + } + }, + ) + + t.Run( + "multiple errors", func(t *testing.T) { + errs := ValidationErrors{ + {Field: "Field1", Message: "error1"}, + {Field: "Field2", Message: "error2"}, + } + errStr := errs.Error() + if !strings.Contains(errStr, "multiple configuration errors") { + t.Errorf("Error() should mention multiple errors, got %q", errStr) + } + if !strings.Contains(errStr, "Field1") || !strings.Contains(errStr, "Field2") { + t.Errorf("Error() should contain all field names, got %q", errStr) + } + }, + ) } func TestParseSelectors(t *testing.T) { @@ -252,37 +268,21 @@ func TestParseSelectors(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - selectors, err := ParseSelectors(tt.selectors) - if (err != nil) != tt.wantErr { - t.Errorf("ParseSelectors() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !tt.wantErr && len(selectors) != tt.wantLen { - t.Errorf("ParseSelectors() returned %d selectors, want %d", len(selectors), tt.wantLen) - } - }) + t.Run( + tt.name, func(t *testing.T) { + selectors, err := ParseSelectors(tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("ParseSelectors() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(selectors) != tt.wantLen { + t.Errorf("ParseSelectors() returned %d selectors, want %d", len(selectors), tt.wantLen) + } + }, + ) } } -func TestMustParseSelectors(t *testing.T) { - t.Run("valid selectors", func(t *testing.T) { - selectors := MustParseSelectors([]string{"env=production"}) - if len(selectors) != 1 { - t.Errorf("MustParseSelectors() returned %d selectors, want 1", len(selectors)) - } - }) - - t.Run("panics on invalid", func(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Error("MustParseSelectors() should panic on invalid selector") - } - }() - MustParseSelectors([]string{"env in (prod,staging"}) // missing closing paren - }) -} - func TestNormalizeToLower(t *testing.T) { tests := []struct { name string @@ -300,21 +300,23 @@ func TestNormalizeToLower(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := normalizeToLower(tt.input) - if tt.want == nil && got != nil { - t.Errorf("normalizeToLower() = %v, want nil", got) - return - } - if len(got) != len(tt.want) { - t.Errorf("normalizeToLower() length = %d, want %d", len(got), len(tt.want)) - return - } - for i := range got { - if got[i] != tt.want[i] { - t.Errorf("normalizeToLower()[%d] = %q, want %q", i, got[i], tt.want[i]) + t.Run( + tt.name, func(t *testing.T) { + got := normalizeToLower(tt.input) + if tt.want == nil && got != nil { + t.Errorf("normalizeToLower() = %v, want nil", got) + return } - } - }) + if len(got) != len(tt.want) { + t.Errorf("normalizeToLower() length = %d, want %d", len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("normalizeToLower()[%d] = %q, want %q", i, got[i], tt.want[i]) + } + } + }, + ) } } diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index 447348c00..3f744ea57 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -3,7 +3,6 @@ package controller import ( "context" "sync" - "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" @@ -13,14 +12,10 @@ import ( "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -36,6 +31,7 @@ type ConfigMapReconciler struct { WebhookClient *webhook.Client Alerter alerting.Alerter + handler *ReloadHandler initialized bool initOnce sync.Once } @@ -44,17 +40,14 @@ type ConfigMapReconciler struct { func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("configmap", req.NamespacedName) - // Mark as initialized after first reconcile (caches are synced at this point) r.initOnce.Do(func() { r.initialized = true log.Info("ConfigMap controller initialized") }) - // Fetch the ConfigMap var cm corev1.ConfigMap if err := r.Get(ctx, req.NamespacedName, &cm); err != nil { if errors.IsNotFound(err) { - // ConfigMap was deleted - handle if ReloadOnDelete is enabled if r.Config.ReloadOnDelete { return r.handleDelete(ctx, req, log) } @@ -64,364 +57,63 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, err } - // Check if namespace should be ignored if r.Config.IsNamespaceIgnored(cm.Namespace) { log.V(1).Info("skipping ConfigMap in ignored namespace") return ctrl.Result{}, nil } - // Get all workloads in the same namespace - workloads, err := r.listWorkloads(ctx, cm.Namespace) - if err != nil { - log.Error(err, "failed to list workloads") - return ctrl.Result{}, err - } - - // Evaluate which workloads should be reloaded - change := reload.ConfigMapChange{ - ConfigMap: &cm, - EventType: reload.EventTypeUpdate, - } - decisions := r.ReloadService.ProcessConfigMap(change, workloads) - - // Collect workloads that should be reloaded - var workloadsToReload []reload.ReloadDecision - for _, decision := range decisions { - if decision.ShouldReload { - workloadsToReload = append(workloadsToReload, decision) - } - } - - // If webhook is configured, send notification instead of modifying workloads - if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { - return r.sendWebhookNotification(ctx, cm.Name, cm.Namespace, reload.ResourceTypeConfigMap, workloadsToReload, log) - } - - // Apply reloads with conflict retry - for _, decision := range workloadsToReload { - log.Info("reloading workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - "reason", decision.Reason, - ) - - updated, err := UpdateWorkloadWithRetry( - ctx, - r.Client, - r.ReloadService, - decision.Workload, - cm.Name, - reload.ResourceTypeConfigMap, - cm.Namespace, - decision.Hash, - decision.AutoReload, - ) - if err != nil { - log.Error(err, "failed to update workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "ConfigMap", cm.Name, err) - r.recordMetrics(false, cm.Namespace) - continue - } - - if updated { - r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "ConfigMap", cm.Name) - r.recordMetrics(true, cm.Namespace) - log.Info("workload reloaded successfully", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - - // Send alert notification - if err := r.Alerter.Send(ctx, alerting.AlertMessage{ - WorkloadKind: string(decision.Workload.Kind()), - WorkloadName: decision.Workload.GetName(), - WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: "ConfigMap", - ResourceName: cm.Name, - ResourceNamespace: cm.Namespace, - Timestamp: time.Now(), - }); err != nil { - log.Error(err, "failed to send alert") - } - } - } - - return ctrl.Result{}, nil + return r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, "ConfigMap", reload.ResourceTypeConfigMap, + func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { + return r.ReloadService.ProcessConfigMap(reload.ConfigMapChange{ + ConfigMap: &cm, + EventType: reload.EventTypeUpdate, + }, workloads) + }, log) } // FieldManager is the field manager name used for server-side apply. const FieldManager = "reloader" -// handleDelete handles ConfigMap deletion events. func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { log.Info("handling ConfigMap deletion") - // Get all workloads in the namespace - workloads, err := r.listWorkloads(ctx, req.Namespace) - if err != nil { - log.Error(err, "failed to list workloads") - return ctrl.Result{}, err - } - - // For delete events, we create a change with empty ConfigMap - change := reload.ConfigMapChange{ - ConfigMap: &corev1.ConfigMap{}, - EventType: reload.EventTypeDelete, - } - change.ConfigMap.Name = req.Name - change.ConfigMap.Namespace = req.Namespace - - decisions := r.ReloadService.ProcessConfigMap(change, workloads) - - // Collect workloads that should be reloaded - var workloadsToReload []reload.ReloadDecision - for _, decision := range decisions { - if decision.ShouldReload { - workloadsToReload = append(workloadsToReload, decision) - } - } - - // If webhook is configured, send notification instead of modifying workloads - if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { - return r.sendWebhookNotification(ctx, req.Name, req.Namespace, reload.ResourceTypeConfigMap, workloadsToReload, log) - } - - // Apply reloads for delete with conflict retry - for _, decision := range workloadsToReload { - log.Info("reloading workload due to ConfigMap deletion", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - - updated, err := UpdateWorkloadWithRetry( - ctx, - r.Client, - r.ReloadService, - decision.Workload, - req.Name, - reload.ResourceTypeConfigMap, - req.Namespace, - decision.Hash, - decision.AutoReload, - ) - if err != nil { - log.Error(err, "failed to update workload") - r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "ConfigMap", req.Name, err) - r.recordMetrics(false, req.Namespace) - continue - } - - if updated { - r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "ConfigMap", req.Name) - r.recordMetrics(true, req.Namespace) - - // Send alert notification - if err := r.Alerter.Send(ctx, alerting.AlertMessage{ - WorkloadKind: string(decision.Workload.Kind()), - WorkloadName: decision.Workload.GetName(), - WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: "ConfigMap", - ResourceName: req.Name, - ResourceNamespace: req.Namespace, - Timestamp: time.Now(), - }); err != nil { - log.Error(err, "failed to send alert") - } - } - } - - return ctrl.Result{}, nil + cm := &corev1.ConfigMap{} + cm.Name = req.Name + cm.Namespace = req.Namespace + + return r.reloadHandler().Process(ctx, req.Namespace, req.Name, "ConfigMap", reload.ResourceTypeConfigMap, + func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { + return r.ReloadService.ProcessConfigMap(reload.ConfigMapChange{ + ConfigMap: cm, + EventType: reload.EventTypeDelete, + }, workloads) + }, log) } -// listWorkloads returns all workloads in the given namespace. -func (r *ConfigMapReconciler) listWorkloads(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var result []workload.WorkloadAccessor - - for _, kind := range r.Registry.SupportedKinds() { - // Skip ignored workload types - if r.Config.IsWorkloadIgnored(string(kind)) { - continue +func (r *ConfigMapReconciler) reloadHandler() *ReloadHandler { + if r.handler == nil { + r.handler = &ReloadHandler{ + Client: r.Client, + Lister: workload.NewLister(r.Client, r.Registry, r.Config), + ReloadService: r.ReloadService, + WebhookClient: r.WebhookClient, + Collectors: r.Collectors, + EventRecorder: r.EventRecorder, + Alerter: r.Alerter, } - - workloads, err := r.listWorkloadsByKind(ctx, namespace, kind) - if err != nil { - return nil, err - } - result = append(result, workloads...) } - - return result, nil -} - -// listWorkloadsByKind lists workloads of a specific kind in the namespace. -func (r *ConfigMapReconciler) listWorkloadsByKind(ctx context.Context, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { - switch kind { - case workload.KindDeployment: - return r.listDeployments(ctx, namespace) - case workload.KindDaemonSet: - return r.listDaemonSets(ctx, namespace) - case workload.KindStatefulSet: - return r.listStatefulSets(ctx, namespace) - case workload.KindJob: - return r.listJobs(ctx, namespace) - case workload.KindCronJob: - return r.listCronJobs(ctx, namespace) - default: - return nil, nil - } -} - -func (r *ConfigMapReconciler) listDeployments(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.DeploymentList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewDeploymentWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *ConfigMapReconciler) listDaemonSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.DaemonSetList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewDaemonSetWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *ConfigMapReconciler) listStatefulSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.StatefulSetList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewStatefulSetWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *ConfigMapReconciler) listJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list batchv1.JobList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewJobWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *ConfigMapReconciler) listCronJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list batchv1.CronJobList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewCronJobWorkload(&list.Items[i]) - } - return result, nil -} - -// recordMetrics records reload metrics. -func (r *ConfigMapReconciler) recordMetrics(success bool, namespace string) { - r.Collectors.RecordReload(success, namespace) -} - -// sendWebhookNotification sends a webhook notification instead of modifying workloads. -func (r *ConfigMapReconciler) sendWebhookNotification( - ctx context.Context, - resourceName, namespace string, - resourceType reload.ResourceType, - decisions []reload.ReloadDecision, - log logr.Logger, -) (ctrl.Result, error) { - var workloads []webhook.WorkloadInfo - var hash string - for _, d := range decisions { - workloads = append(workloads, webhook.WorkloadInfo{ - Kind: string(d.Workload.Kind()), - Name: d.Workload.GetName(), - Namespace: d.Workload.GetNamespace(), - }) - if hash == "" { - hash = d.Hash - } - } - - payload := webhook.Payload{ - Kind: string(resourceType), - Namespace: namespace, - ResourceName: resourceName, - ResourceType: string(resourceType), - Hash: hash, - Timestamp: time.Now().UTC(), - Workloads: workloads, - } - - if err := r.WebhookClient.Send(ctx, payload); err != nil { - log.Error(err, "failed to send webhook notification") - r.recordMetrics(false, namespace) - return ctrl.Result{}, err - } - - log.Info("webhook notification sent", - "resource", resourceName, - "workloadCount", len(workloads), - ) - r.recordMetrics(true, namespace) - return ctrl.Result{}, nil + return r.handler } // SetupWithManager sets up the controller with the Manager. func (r *ConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { - hasher := r.ReloadService.Hasher() - return ctrl.NewControllerManagedBy(mgr). For(&corev1.ConfigMap{}). - WithEventFilter(predicate.And( - reload.ConfigMapPredicates(r.Config, hasher), - reload.NamespaceFilterPredicate(r.Config), - reload.LabelSelectorPredicate(r.Config), - reload.IgnoreAnnotationPredicate(r.Config), - r.createEventFilter(), + WithEventFilter(BuildEventFilter( + reload.ConfigMapPredicates(r.Config, r.ReloadService.Hasher()), + r.Config, &r.initialized, )). Complete(r) } -// createEventFilter filters create events based on initialization state. -func (r *ConfigMapReconciler) createEventFilter() predicate.Predicate { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - // During startup, skip create events unless SyncAfterRestart is enabled - if !r.initialized && !r.Config.SyncAfterRestart { - return false - } - // After initialization, only process creates if ReloadOnCreate is enabled - return r.Config.ReloadOnCreate - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return r.Config.ReloadOnDelete - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// Ensure ConfigMapReconciler implements reconcile.Reconciler var _ reconcile.Reconciler = &ConfigMapReconciler{} diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go new file mode 100644 index 000000000..b3ac4d2f4 --- /dev/null +++ b/internal/pkg/controller/handler.go @@ -0,0 +1,187 @@ +package controller + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// ReloadHandler handles the common reload workflow. +type ReloadHandler struct { + Client client.Client + Lister *workload.Lister + ReloadService *reload.Service + WebhookClient *webhook.Client + Collectors *metrics.Collectors + EventRecorder *events.Recorder + Alerter alerting.Alerter +} + +// Process handles the reload workflow: list workloads, get decisions, webhook or apply. +func (h *ReloadHandler) Process( + ctx context.Context, + namespace, resourceName, resourceKind string, + resourceType reload.ResourceType, + getDecisions func([]workload.WorkloadAccessor) []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + workloads, err := h.Lister.List(ctx, namespace) + if err != nil { + log.Error(err, "failed to list workloads") + return ctrl.Result{}, err + } + + decisions := reload.FilterDecisions(getDecisions(workloads)) + + if h.WebhookClient.IsConfigured() && len(decisions) > 0 { + return h.sendWebhook(ctx, resourceName, namespace, resourceType, decisions, log) + } + + h.applyReloads(ctx, resourceName, namespace, resourceKind, resourceType, decisions, log) + return ctrl.Result{}, nil +} + +func (h *ReloadHandler) sendWebhook( + ctx context.Context, + resourceName, namespace string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) (ctrl.Result, error) { + var workloads []webhook.WorkloadInfo + var hash string + for _, d := range decisions { + workloads = append(workloads, webhook.WorkloadInfo{ + Kind: string(d.Workload.Kind()), + Name: d.Workload.GetName(), + Namespace: d.Workload.GetNamespace(), + }) + if hash == "" { + hash = d.Hash + } + } + + payload := webhook.Payload{ + Kind: string(resourceType), + Namespace: namespace, + ResourceName: resourceName, + ResourceType: string(resourceType), + Hash: hash, + Timestamp: time.Now().UTC(), + Workloads: workloads, + } + + if err := h.WebhookClient.Send(ctx, payload); err != nil { + log.Error(err, "failed to send webhook notification") + h.Collectors.RecordReload(false, namespace) + return ctrl.Result{}, err + } + + log.Info("webhook notification sent", + "resource", resourceName, + "workloadCount", len(workloads), + ) + h.Collectors.RecordReload(true, namespace) + return ctrl.Result{}, nil +} + +func (h *ReloadHandler) applyReloads( + ctx context.Context, + resourceName, resourceNamespace, resourceKind string, + resourceType reload.ResourceType, + decisions []reload.ReloadDecision, + log logr.Logger, +) { + for _, decision := range decisions { + log.Info("reloading workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + "reason", decision.Reason, + ) + + updated, err := UpdateWorkloadWithRetry( + ctx, + h.Client, + h.ReloadService, + decision.Workload, + resourceName, + resourceType, + resourceNamespace, + decision.Hash, + decision.AutoReload, + ) + if err != nil { + log.Error(err, "failed to update workload", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + h.EventRecorder.ReloadFailed(decision.Workload.GetObject(), resourceKind, resourceName, err) + h.Collectors.RecordReload(false, resourceNamespace) + continue + } + + if updated { + h.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), resourceKind, resourceName) + h.Collectors.RecordReload(true, resourceNamespace) + log.Info("workload reloaded successfully", + "workload", decision.Workload.GetName(), + "kind", decision.Workload.Kind(), + ) + + if err := h.Alerter.Send(ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: resourceKind, + ResourceName: resourceName, + ResourceNamespace: resourceNamespace, + Timestamp: time.Now(), + }); err != nil { + log.Error(err, "failed to send alert") + } + } + } +} + +// BuildEventFilter combines a resource-specific predicate with common filters. +func BuildEventFilter(resourcePredicate predicate.Predicate, cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.And( + resourcePredicate, + reload.NamespaceFilterPredicate(cfg), + reload.LabelSelectorPredicate(cfg), + reload.IgnoreAnnotationPredicate(cfg), + createEventPredicate(cfg, initialized), + ) +} + +func createEventPredicate(cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if !*initialized && !cfg.SyncAfterRestart { + return false + } + return cfg.ReloadOnCreate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index 8c5dbd2b2..c6c79131c 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -3,7 +3,6 @@ package controller import ( "context" "sync" - "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" @@ -13,14 +12,10 @@ import ( "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -36,6 +31,7 @@ type SecretReconciler struct { WebhookClient *webhook.Client Alerter alerting.Alerter + handler *ReloadHandler initialized bool initOnce sync.Once } @@ -44,17 +40,14 @@ type SecretReconciler struct { func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("secret", req.NamespacedName) - // Mark as initialized after first reconcile (caches are synced at this point) r.initOnce.Do(func() { r.initialized = true log.Info("Secret controller initialized") }) - // Fetch the Secret var secret corev1.Secret if err := r.Get(ctx, req.NamespacedName, &secret); err != nil { if errors.IsNotFound(err) { - // Secret was deleted - handle if ReloadOnDelete is enabled if r.Config.ReloadOnDelete { return r.handleDelete(ctx, req, log) } @@ -64,361 +57,60 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, err } - // Check if namespace should be ignored if r.Config.IsNamespaceIgnored(secret.Namespace) { log.V(1).Info("skipping Secret in ignored namespace") return ctrl.Result{}, nil } - // Get all workloads in the same namespace - workloads, err := r.listWorkloads(ctx, secret.Namespace) - if err != nil { - log.Error(err, "failed to list workloads") - return ctrl.Result{}, err - } - - // Evaluate which workloads should be reloaded - change := reload.SecretChange{ - Secret: &secret, - EventType: reload.EventTypeUpdate, - } - decisions := r.ReloadService.ProcessSecret(change, workloads) - - // Collect workloads that should be reloaded - var workloadsToReload []reload.ReloadDecision - for _, decision := range decisions { - if decision.ShouldReload { - workloadsToReload = append(workloadsToReload, decision) - } - } - - // If webhook is configured, send notification instead of modifying workloads - if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { - return r.sendWebhookNotification(ctx, secret.Name, secret.Namespace, reload.ResourceTypeSecret, workloadsToReload, log) - } - - // Apply reloads with conflict retry - for _, decision := range workloadsToReload { - log.Info("reloading workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - "reason", decision.Reason, - ) - - updated, err := UpdateWorkloadWithRetry( - ctx, - r.Client, - r.ReloadService, - decision.Workload, - secret.Name, - reload.ResourceTypeSecret, - secret.Namespace, - decision.Hash, - decision.AutoReload, - ) - if err != nil { - log.Error(err, "failed to update workload", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "Secret", secret.Name, err) - r.recordMetrics(false, secret.Namespace) - continue - } - - if updated { - r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "Secret", secret.Name) - r.recordMetrics(true, secret.Namespace) - log.Info("workload reloaded successfully", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - - // Send alert notification - if err := r.Alerter.Send(ctx, alerting.AlertMessage{ - WorkloadKind: string(decision.Workload.Kind()), - WorkloadName: decision.Workload.GetName(), - WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: "Secret", - ResourceName: secret.Name, - ResourceNamespace: secret.Namespace, - Timestamp: time.Now(), - }); err != nil { - log.Error(err, "failed to send alert") - } - } - } - - return ctrl.Result{}, nil + return r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, "Secret", reload.ResourceTypeSecret, + func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { + return r.ReloadService.ProcessSecret(reload.SecretChange{ + Secret: &secret, + EventType: reload.EventTypeUpdate, + }, workloads) + }, log) } -// handleDelete handles Secret deletion events. func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { log.Info("handling Secret deletion") - // Get all workloads in the namespace - workloads, err := r.listWorkloads(ctx, req.Namespace) - if err != nil { - log.Error(err, "failed to list workloads") - return ctrl.Result{}, err - } - - // For delete events, we create a change with empty Secret - change := reload.SecretChange{ - Secret: &corev1.Secret{}, - EventType: reload.EventTypeDelete, - } - change.Secret.Name = req.Name - change.Secret.Namespace = req.Namespace - - decisions := r.ReloadService.ProcessSecret(change, workloads) - - // Collect workloads that should be reloaded - var workloadsToReload []reload.ReloadDecision - for _, decision := range decisions { - if decision.ShouldReload { - workloadsToReload = append(workloadsToReload, decision) - } - } - - // If webhook is configured, send notification instead of modifying workloads - if r.WebhookClient.IsConfigured() && len(workloadsToReload) > 0 { - return r.sendWebhookNotification(ctx, req.Name, req.Namespace, reload.ResourceTypeSecret, workloadsToReload, log) - } - - // Apply reloads for delete with conflict retry - for _, decision := range workloadsToReload { - log.Info("reloading workload due to Secret deletion", - "workload", decision.Workload.GetName(), - "kind", decision.Workload.Kind(), - ) - - updated, err := UpdateWorkloadWithRetry( - ctx, - r.Client, - r.ReloadService, - decision.Workload, - req.Name, - reload.ResourceTypeSecret, - req.Namespace, - decision.Hash, - decision.AutoReload, - ) - if err != nil { - log.Error(err, "failed to update workload") - r.EventRecorder.ReloadFailed(decision.Workload.GetObject(), "Secret", req.Name, err) - r.recordMetrics(false, req.Namespace) - continue - } - - if updated { - r.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), "Secret", req.Name) - r.recordMetrics(true, req.Namespace) - - // Send alert notification - if err := r.Alerter.Send(ctx, alerting.AlertMessage{ - WorkloadKind: string(decision.Workload.Kind()), - WorkloadName: decision.Workload.GetName(), - WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: "Secret", - ResourceName: req.Name, - ResourceNamespace: req.Namespace, - Timestamp: time.Now(), - }); err != nil { - log.Error(err, "failed to send alert") - } - } - } - - return ctrl.Result{}, nil + secret := &corev1.Secret{} + secret.Name = req.Name + secret.Namespace = req.Namespace + + return r.reloadHandler().Process(ctx, req.Namespace, req.Name, "Secret", reload.ResourceTypeSecret, + func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { + return r.ReloadService.ProcessSecret(reload.SecretChange{ + Secret: secret, + EventType: reload.EventTypeDelete, + }, workloads) + }, log) } -// listWorkloads returns all workloads in the given namespace. -func (r *SecretReconciler) listWorkloads(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var result []workload.WorkloadAccessor - - for _, kind := range r.Registry.SupportedKinds() { - // Skip ignored workload types - if r.Config.IsWorkloadIgnored(string(kind)) { - continue +func (r *SecretReconciler) reloadHandler() *ReloadHandler { + if r.handler == nil { + r.handler = &ReloadHandler{ + Client: r.Client, + Lister: workload.NewLister(r.Client, r.Registry, r.Config), + ReloadService: r.ReloadService, + WebhookClient: r.WebhookClient, + Collectors: r.Collectors, + EventRecorder: r.EventRecorder, + Alerter: r.Alerter, } - - workloads, err := r.listWorkloadsByKind(ctx, namespace, kind) - if err != nil { - return nil, err - } - result = append(result, workloads...) } - - return result, nil -} - -// listWorkloadsByKind lists workloads of a specific kind in the namespace. -func (r *SecretReconciler) listWorkloadsByKind(ctx context.Context, namespace string, kind workload.Kind) ([]workload.WorkloadAccessor, error) { - switch kind { - case workload.KindDeployment: - return r.listDeployments(ctx, namespace) - case workload.KindDaemonSet: - return r.listDaemonSets(ctx, namespace) - case workload.KindStatefulSet: - return r.listStatefulSets(ctx, namespace) - case workload.KindJob: - return r.listJobs(ctx, namespace) - case workload.KindCronJob: - return r.listCronJobs(ctx, namespace) - default: - return nil, nil - } -} - -func (r *SecretReconciler) listDeployments(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.DeploymentList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewDeploymentWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *SecretReconciler) listDaemonSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.DaemonSetList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewDaemonSetWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *SecretReconciler) listStatefulSets(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list appsv1.StatefulSetList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewStatefulSetWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *SecretReconciler) listJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list batchv1.JobList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewJobWorkload(&list.Items[i]) - } - return result, nil -} - -func (r *SecretReconciler) listCronJobs(ctx context.Context, namespace string) ([]workload.WorkloadAccessor, error) { - var list batchv1.CronJobList - if err := r.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]workload.WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = workload.NewCronJobWorkload(&list.Items[i]) - } - return result, nil -} - -// recordMetrics records reload metrics. -func (r *SecretReconciler) recordMetrics(success bool, namespace string) { - r.Collectors.RecordReload(success, namespace) -} - -// sendWebhookNotification sends a webhook notification instead of modifying workloads. -func (r *SecretReconciler) sendWebhookNotification( - ctx context.Context, - resourceName, namespace string, - resourceType reload.ResourceType, - decisions []reload.ReloadDecision, - log logr.Logger, -) (ctrl.Result, error) { - var workloads []webhook.WorkloadInfo - var hash string - for _, d := range decisions { - workloads = append(workloads, webhook.WorkloadInfo{ - Kind: string(d.Workload.Kind()), - Name: d.Workload.GetName(), - Namespace: d.Workload.GetNamespace(), - }) - if hash == "" { - hash = d.Hash - } - } - - payload := webhook.Payload{ - Kind: string(resourceType), - Namespace: namespace, - ResourceName: resourceName, - ResourceType: string(resourceType), - Hash: hash, - Timestamp: time.Now().UTC(), - Workloads: workloads, - } - - if err := r.WebhookClient.Send(ctx, payload); err != nil { - log.Error(err, "failed to send webhook notification") - r.recordMetrics(false, namespace) - return ctrl.Result{}, err - } - - log.Info("webhook notification sent", - "resource", resourceName, - "workloadCount", len(workloads), - ) - r.recordMetrics(true, namespace) - return ctrl.Result{}, nil + return r.handler } // SetupWithManager sets up the controller with the Manager. func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { - hasher := r.ReloadService.Hasher() - return ctrl.NewControllerManagedBy(mgr). For(&corev1.Secret{}). - WithEventFilter(predicate.And( - reload.SecretPredicates(r.Config, hasher), - reload.NamespaceFilterPredicate(r.Config), - reload.LabelSelectorPredicate(r.Config), - reload.IgnoreAnnotationPredicate(r.Config), - r.createEventFilter(), + WithEventFilter(BuildEventFilter( + reload.SecretPredicates(r.Config, r.ReloadService.Hasher()), + r.Config, &r.initialized, )). Complete(r) } -// createEventFilter filters create events based on initialization state. -func (r *SecretReconciler) createEventFilter() predicate.Predicate { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - // During startup, skip create events unless SyncAfterRestart is enabled - if !r.initialized && !r.Config.SyncAfterRestart { - return false - } - // After initialization, only process creates if ReloadOnCreate is enabled - return r.Config.ReloadOnCreate - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return r.Config.ReloadOnDelete - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// Ensure SecretReconciler implements reconcile.Reconciler var _ reconcile.Reconciler = &SecretReconciler{} diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index 6d7825fe1..e9ff12b65 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -66,6 +66,17 @@ type ReloadDecision struct { Hash string } +// FilterDecisions returns only decisions where ShouldReload is true. +func FilterDecisions(decisions []ReloadDecision) []ReloadDecision { + var result []ReloadDecision + for _, d := range decisions { + if d.ShouldReload { + result = append(result, d) + } + } + return result +} + // ProcessConfigMap evaluates all workloads to determine which should be reloaded. func (s *Service) ProcessConfigMap(change ConfigMapChange, workloads []workload.WorkloadAccessor) []ReloadDecision { if change.ConfigMap == nil { diff --git a/internal/pkg/workload/lister.go b/internal/pkg/workload/lister.go new file mode 100644 index 000000000..a1487bb74 --- /dev/null +++ b/internal/pkg/workload/lister.go @@ -0,0 +1,111 @@ +package workload + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IgnoreChecker checks if a workload kind should be ignored. +type IgnoreChecker interface { + IsWorkloadIgnored(kind string) bool +} + +// Lister lists workloads from the cluster. +type Lister struct { + Client client.Client + Registry *Registry + Checker IgnoreChecker +} + +// NewLister creates a new workload lister. +func NewLister(c client.Client, registry *Registry, checker IgnoreChecker) *Lister { + return &Lister{ + Client: c, + Registry: registry, + Checker: checker, + } +} + +// List returns all workloads in the given namespace. +func (l *Lister) List(ctx context.Context, namespace string) ([]WorkloadAccessor, error) { + var result []WorkloadAccessor + + for _, kind := range l.Registry.SupportedKinds() { + if l.Checker != nil && l.Checker.IsWorkloadIgnored(string(kind)) { + continue + } + + workloads, err := l.listByKind(ctx, namespace, kind) + if err != nil { + return nil, err + } + result = append(result, workloads...) + } + + return result, nil +} + +func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([]WorkloadAccessor, error) { + switch kind { + case KindDeployment: + var list appsv1.DeploymentList + if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewDeploymentWorkload(&list.Items[i]) + } + return result, nil + + case KindDaemonSet: + var list appsv1.DaemonSetList + if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewDaemonSetWorkload(&list.Items[i]) + } + return result, nil + + case KindStatefulSet: + var list appsv1.StatefulSetList + if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewStatefulSetWorkload(&list.Items[i]) + } + return result, nil + + case KindJob: + var list batchv1.JobList + if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewJobWorkload(&list.Items[i]) + } + return result, nil + + case KindCronJob: + var list batchv1.CronJobList + if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewCronJobWorkload(&list.Items[i]) + } + return result, nil + + default: + return nil, nil + } +} From c19058a66e4d6594a9fe2fd7c4150a4cc789e0c0 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:55 +0100 Subject: [PATCH 13/35] refactor: Re-use a lot of code and move to specific packages --- .../pkg/controller/configmap_reconciler.go | 8 +- .../pkg/controller/deployment_reconciler.go | 21 +++-- internal/pkg/controller/handler.go | 12 +-- internal/pkg/controller/retry.go | 86 ++++++----------- internal/pkg/controller/secret_reconciler.go | 8 +- internal/pkg/reload/matcher.go | 12 +++ internal/pkg/reload/predicate.go | 67 +++++--------- internal/pkg/reload/service.go | 71 +++++++------- internal/pkg/reload/service_test.go | 16 ++-- internal/pkg/workload/cronjob.go | 92 +------------------ internal/pkg/workload/daemonset.go | 88 +----------------- internal/pkg/workload/deployment.go | 90 +----------------- internal/pkg/workload/job.go | 88 +----------------- internal/pkg/workload/rollout.go | 92 +------------------ internal/pkg/workload/statefulset.go | 88 +----------------- internal/pkg/workload/uses.go | 77 ++++++++++++++++ 16 files changed, 225 insertions(+), 691 deletions(-) create mode 100644 internal/pkg/workload/uses.go diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index 3f744ea57..ef60e9fca 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -62,9 +62,9 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - return r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, "ConfigMap", reload.ResourceTypeConfigMap, + return r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, reload.ResourceTypeConfigMap, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.ProcessConfigMap(reload.ConfigMapChange{ + return r.ReloadService.Process(reload.ConfigMapChange{ ConfigMap: &cm, EventType: reload.EventTypeUpdate, }, workloads) @@ -81,9 +81,9 @@ func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request cm.Name = req.Name cm.Namespace = req.Namespace - return r.reloadHandler().Process(ctx, req.Namespace, req.Name, "ConfigMap", reload.ResourceTypeConfigMap, + return r.reloadHandler().Process(ctx, req.Namespace, req.Name, reload.ResourceTypeConfigMap, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.ProcessConfigMap(reload.ConfigMapChange{ + return r.ReloadService.Process(reload.ConfigMapChange{ ConfigMap: cm, EventType: reload.EventTypeDelete, }, workloads) diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go index d28272c56..08c71ab49 100644 --- a/internal/pkg/controller/deployment_reconciler.go +++ b/internal/pkg/controller/deployment_reconciler.go @@ -26,6 +26,7 @@ type DeploymentReconciler struct { // Reconcile handles Deployment pause expiration. func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("deployment", req.NamespacedName) + log.Info("Deployment reconciling ", "namespace", req.Namespace, "name", req.Name) var deploy appsv1.Deployment if err := r.Get(ctx, req.NamespacedName, &deploy); err != nil { @@ -76,14 +77,16 @@ func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { // pausedByReloaderPredicate returns a predicate that only selects deployments // that have been paused by Reloader (have the paused-at annotation). func (r *DeploymentReconciler) pausedByReloaderPredicate() predicate.Predicate { - return predicate.NewPredicateFuncs(func(obj client.Object) bool { - annotations := obj.GetAnnotations() - if annotations == nil { - return false - } + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } - // Only process if deployment has our paused-at annotation - _, hasPausedAt := annotations[r.Config.Annotations.PausedAt] - return hasPausedAt - }) + // Only process if deployment has our paused-at annotation + _, hasPausedAt := annotations[r.Config.Annotations.PausedAt] + return hasPausedAt + }, + ) } diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index b3ac4d2f4..caa71fb83 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -32,7 +32,7 @@ type ReloadHandler struct { // Process handles the reload workflow: list workloads, get decisions, webhook or apply. func (h *ReloadHandler) Process( ctx context.Context, - namespace, resourceName, resourceKind string, + namespace, resourceName string, resourceType reload.ResourceType, getDecisions func([]workload.WorkloadAccessor) []reload.ReloadDecision, log logr.Logger, @@ -49,7 +49,7 @@ func (h *ReloadHandler) Process( return h.sendWebhook(ctx, resourceName, namespace, resourceType, decisions, log) } - h.applyReloads(ctx, resourceName, namespace, resourceKind, resourceType, decisions, log) + h.applyReloads(ctx, resourceName, namespace, resourceType, decisions, log) return ctrl.Result{}, nil } @@ -99,7 +99,7 @@ func (h *ReloadHandler) sendWebhook( func (h *ReloadHandler) applyReloads( ctx context.Context, - resourceName, resourceNamespace, resourceKind string, + resourceName, resourceNamespace string, resourceType reload.ResourceType, decisions []reload.ReloadDecision, log logr.Logger, @@ -127,13 +127,13 @@ func (h *ReloadHandler) applyReloads( "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) - h.EventRecorder.ReloadFailed(decision.Workload.GetObject(), resourceKind, resourceName, err) + h.EventRecorder.ReloadFailed(decision.Workload.GetObject(), resourceType.Kind(), resourceName, err) h.Collectors.RecordReload(false, resourceNamespace) continue } if updated { - h.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), resourceKind, resourceName) + h.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), resourceType.Kind(), resourceName) h.Collectors.RecordReload(true, resourceNamespace) log.Info("workload reloaded successfully", "workload", decision.Workload.GetName(), @@ -144,7 +144,7 @@ func (h *ReloadHandler) applyReloads( WorkloadKind: string(decision.Workload.Kind()), WorkloadName: decision.Workload.GetName(), WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: resourceKind, + ResourceKind: resourceType.Kind(), ResourceName: resourceName, ResourceNamespace: resourceNamespace, Timestamp: time.Now(), diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index f8af3de0f..b91064908 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -43,8 +43,9 @@ func UpdateWorkloadWithRetry( } } -// updateStandardWorkload updates Deployments, DaemonSets, StatefulSets, etc. -func updateStandardWorkload( +// retryWithReload wraps the common retry logic for workload updates. +// It handles re-fetching on conflict, applying reload changes, and calling the update function. +func retryWithReload( ctx context.Context, c client.Client, reloadService *reload.Service, @@ -54,18 +55,17 @@ func updateStandardWorkload( namespace string, hash string, autoReload bool, + updateFn func() error, ) (bool, error) { var updated bool isFirstAttempt := true err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - // On retry, re-fetch the object to get the latest ResourceVersion if !isFirstAttempt { obj := wl.GetObject() key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { if errors.IsNotFound(err) { - // Object was deleted, nothing to update return nil } return err @@ -73,17 +73,8 @@ func updateStandardWorkload( } isFirstAttempt = false - // Apply reload changes (this modifies the workload in-place) var applyErr error - updated, applyErr = reloadService.ApplyReload( - ctx, - wl, - resourceName, - resourceType, - namespace, - hash, - autoReload, - ) + updated, applyErr = reloadService.ApplyReload(ctx, wl, resourceName, resourceType, namespace, hash, autoReload) if applyErr != nil { return applyErr } @@ -92,13 +83,30 @@ func updateStandardWorkload( return nil } - // Attempt update with field ownership - return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + return updateFn() }) return updated, err } +// updateStandardWorkload updates Deployments, DaemonSets, StatefulSets, etc. +func updateStandardWorkload( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + return retryWithReload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + func() error { + return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + }) +} + // updateJobWithRecreate deletes the Job and recreates it with the updated spec. // Jobs are immutable after creation, so we must delete and recreate. func updateJobWithRecreate( @@ -254,46 +262,8 @@ func updateArgoRollout( return false, nil } - var updated bool - isFirstAttempt := true - - err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - // On retry, re-fetch the object to get the latest ResourceVersion - if !isFirstAttempt { - obj := rolloutWl.GetObject() - key := client.ObjectKeyFromObject(obj) - if err := c.Get(ctx, key, obj); err != nil { - if errors.IsNotFound(err) { - // Object was deleted, nothing to update - return nil - } - return err - } - } - isFirstAttempt = false - - // Apply reload changes (this modifies the workload in-place) - var applyErr error - updated, applyErr = reloadService.ApplyReload( - ctx, - wl, - resourceName, - resourceType, - namespace, - hash, - autoReload, - ) - if applyErr != nil { - return applyErr - } - - if !updated { - return nil - } - - // Use the RolloutWorkload's Update method which handles the rollout strategy - return rolloutWl.Update(ctx, c) - }) - - return updated, err + return retryWithReload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + func() error { + return rolloutWl.Update(ctx, c) + }) } diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index c6c79131c..c28f8e38b 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -62,9 +62,9 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } - return r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, "Secret", reload.ResourceTypeSecret, + return r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, reload.ResourceTypeSecret, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.ProcessSecret(reload.SecretChange{ + return r.ReloadService.Process(reload.SecretChange{ Secret: &secret, EventType: reload.EventTypeUpdate, }, workloads) @@ -78,9 +78,9 @@ func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, l secret.Name = req.Name secret.Namespace = req.Namespace - return r.reloadHandler().Process(ctx, req.Namespace, req.Name, "Secret", reload.ResourceTypeSecret, + return r.reloadHandler().Process(ctx, req.Namespace, req.Name, reload.ResourceTypeSecret, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.ProcessSecret(reload.SecretChange{ + return r.ReloadService.Process(reload.SecretChange{ Secret: secret, EventType: reload.EventTypeDelete, }, workloads) diff --git a/internal/pkg/reload/matcher.go b/internal/pkg/reload/matcher.go index 6f56d8ef2..d7a26fbd8 100644 --- a/internal/pkg/reload/matcher.go +++ b/internal/pkg/reload/matcher.go @@ -17,6 +17,18 @@ const ( ResourceTypeSecret ResourceType = "secret" ) +// Kind returns the capitalized Kubernetes Kind (e.g., "ConfigMap", "Secret"). +func (r ResourceType) Kind() string { + switch r { + case ResourceTypeConfigMap: + return "ConfigMap" + case ResourceTypeSecret: + return "Secret" + default: + return string(r) + } +} + // MatchResult contains the result of checking if a workload should be reloaded. type MatchResult struct { ShouldReload bool diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index 030ded5dd..f24c60a72 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -8,68 +8,51 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" ) -// ConfigMapPredicates returns predicates for filtering ConfigMap events. -func ConfigMapPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { +// resourcePredicates returns predicates for filtering resource events. +// The hashFn computes a hash from old and new objects to detect content changes. +func resourcePredicates(cfg *config.Config, hashFn func(old, new client.Object) (string, string, bool)) predicate.Predicate { return predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - // Only process create events if ReloadOnCreate is enabled - // or if SyncAfterRestart is enabled (for initial sync) return cfg.ReloadOnCreate || cfg.SyncAfterRestart }, UpdateFunc: func(e event.UpdateEvent) bool { - // Always process updates, but filter by content change - oldCM, okOld := e.ObjectOld.(*corev1.ConfigMap) - newCM, okNew := e.ObjectNew.(*corev1.ConfigMap) - if !okOld || !okNew { + oldHash, newHash, ok := hashFn(e.ObjectOld, e.ObjectNew) + if !ok { return false } - - // Check if the data actually changed - oldHash := hasher.HashConfigMap(oldCM) - newHash := hasher.HashConfigMap(newCM) return oldHash != newHash }, DeleteFunc: func(e event.DeleteEvent) bool { - // Only process delete events if ReloadOnDelete is enabled return cfg.ReloadOnDelete }, GenericFunc: func(e event.GenericEvent) bool { - // Ignore generic events return false }, } } +// ConfigMapPredicates returns predicates for filtering ConfigMap events. +func ConfigMapPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { + return resourcePredicates(cfg, func(old, new client.Object) (string, string, bool) { + oldCM, okOld := old.(*corev1.ConfigMap) + newCM, okNew := new.(*corev1.ConfigMap) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashConfigMap(oldCM), hasher.HashConfigMap(newCM), true + }) +} + // SecretPredicates returns predicates for filtering Secret events. func SecretPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - // Only process create events if ReloadOnCreate is enabled - // or if SyncAfterRestart is enabled (for initial sync) - return cfg.ReloadOnCreate || cfg.SyncAfterRestart - }, - UpdateFunc: func(e event.UpdateEvent) bool { - // Always process updates, but filter by content change - oldSecret, okOld := e.ObjectOld.(*corev1.Secret) - newSecret, okNew := e.ObjectNew.(*corev1.Secret) - if !okOld || !okNew { - return false - } - - // Check if the data actually changed - oldHash := hasher.HashSecret(oldSecret) - newHash := hasher.HashSecret(newSecret) - return oldHash != newHash - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Only process delete events if ReloadOnDelete is enabled - return cfg.ReloadOnDelete - }, - GenericFunc: func(e event.GenericEvent) bool { - // Ignore generic events - return false - }, - } + return resourcePredicates(cfg, func(old, new client.Object) (string, string, bool) { + oldSecret, okOld := old.(*corev1.Secret) + newSecret, okNew := new.(*corev1.Secret) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashSecret(oldSecret), hasher.HashSecret(newSecret), true + }) } // NamespaceChecker defines the interface for checking if a namespace is allowed. diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index e9ff12b65..acec2d593 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -28,18 +28,45 @@ func NewService(cfg *config.Config) *Service { } } +// ResourceChange represents a change event for a ConfigMap or Secret. +type ResourceChange interface { + IsNil() bool + GetEventType() EventType + GetName() string + GetNamespace() string + GetAnnotations() map[string]string + GetResourceType() ResourceType + ComputeHash(hasher *Hasher) string +} + // ConfigMapChange represents a change event for a ConfigMap. type ConfigMapChange struct { ConfigMap *corev1.ConfigMap EventType EventType } +func (c ConfigMapChange) IsNil() bool { return c.ConfigMap == nil } +func (c ConfigMapChange) GetEventType() EventType { return c.EventType } +func (c ConfigMapChange) GetName() string { return c.ConfigMap.Name } +func (c ConfigMapChange) GetNamespace() string { return c.ConfigMap.Namespace } +func (c ConfigMapChange) GetAnnotations() map[string]string { return c.ConfigMap.Annotations } +func (c ConfigMapChange) GetResourceType() ResourceType { return ResourceTypeConfigMap } +func (c ConfigMapChange) ComputeHash(h *Hasher) string { return h.HashConfigMap(c.ConfigMap) } + // SecretChange represents a change event for a Secret. type SecretChange struct { Secret *corev1.Secret EventType EventType } +func (c SecretChange) IsNil() bool { return c.Secret == nil } +func (c SecretChange) GetEventType() EventType { return c.EventType } +func (c SecretChange) GetName() string { return c.Secret.Name } +func (c SecretChange) GetNamespace() string { return c.Secret.Namespace } +func (c SecretChange) GetAnnotations() map[string]string { return c.Secret.Annotations } +func (c SecretChange) GetResourceType() ResourceType { return ResourceTypeSecret } +func (c SecretChange) ComputeHash(h *Hasher) string { return h.HashSecret(c.Secret) } + // EventType represents the type of change event. type EventType string @@ -77,55 +104,31 @@ func FilterDecisions(decisions []ReloadDecision) []ReloadDecision { return result } -// ProcessConfigMap evaluates all workloads to determine which should be reloaded. -func (s *Service) ProcessConfigMap(change ConfigMapChange, workloads []workload.WorkloadAccessor) []ReloadDecision { - if change.ConfigMap == nil { +// Process evaluates all workloads to determine which should be reloaded. +func (s *Service) Process(change ResourceChange, workloads []workload.WorkloadAccessor) []ReloadDecision { + if change.IsNil() { return nil } - if !s.shouldProcessEvent(change.EventType) { + if !s.shouldProcessEvent(change.GetEventType()) { return nil } - hash := s.hasher.HashConfigMap(change.ConfigMap) - if change.EventType == EventTypeDelete { + hash := change.ComputeHash(s.hasher) + if change.GetEventType() == EventTypeDelete { hash = s.hasher.EmptyHash() } return s.processResource( - change.ConfigMap.Name, - change.ConfigMap.Namespace, - change.ConfigMap.Annotations, - ResourceTypeConfigMap, + change.GetName(), + change.GetNamespace(), + change.GetAnnotations(), + change.GetResourceType(), hash, workloads, ) } -// ProcessSecret evaluates all workloads to determine which should be reloaded. -func (s *Service) ProcessSecret(change SecretChange, workloads []workload.WorkloadAccessor) []ReloadDecision { - if change.Secret == nil { - return nil - } - - if !s.shouldProcessEvent(change.EventType) { - return nil - } - - hash := s.hasher.HashSecret(change.Secret) - if change.EventType == EventTypeDelete { - hash = s.hasher.EmptyHash() - } - - return s.processResource( - change.Secret.Name, - change.Secret.Namespace, - change.Secret.Annotations, - ResourceTypeSecret, - hash, - workloads, - ) -} func (s *Service) processResource( resourceName string, diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index 068804249..4b260a250 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -51,7 +51,7 @@ func TestService_ProcessConfigMap_AutoReload(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) if len(decisions) != 1 { t.Fatalf("Expected 1 decision, got %d", len(decisions)) @@ -98,7 +98,7 @@ func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) if len(decisions) != 1 { t.Fatalf("Expected 1 decision, got %d", len(decisions)) @@ -157,7 +157,7 @@ func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) // Should still get a decision, but ShouldReload should be false for _, d := range decisions { @@ -205,7 +205,7 @@ func TestService_ProcessSecret_AutoReload(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessSecret(change, workloads) + decisions := svc.Process(change, workloads) if len(decisions) != 1 { t.Fatalf("Expected 1 decision, got %d", len(decisions)) @@ -246,7 +246,7 @@ func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { EventType: EventTypeDelete, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) if len(decisions) != 1 { t.Fatalf("Expected 1 decision, got %d", len(decisions)) @@ -287,7 +287,7 @@ func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { EventType: EventTypeDelete, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) // Should return nil when delete events are disabled if decisions != nil { @@ -496,7 +496,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) if len(decisions) != 3 { t.Fatalf("Expected 3 decisions, got %d", len(decisions)) @@ -572,7 +572,7 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { EventType: EventTypeUpdate, } - decisions := svc.ProcessConfigMap(change, workloads) + decisions := svc.Process(change, workloads) // Should only affect deploy1 (same namespace) reloadCount := 0 diff --git a/internal/pkg/workload/cronjob.go b/internal/pkg/workload/cronjob.go index 42df8eca6..80d672e33 100644 --- a/internal/pkg/workload/cronjob.go +++ b/internal/pkg/workload/cronjob.go @@ -103,99 +103,11 @@ func (w *CronJobWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *CronJobWorkload) UsesConfigMap(name string) bool { - spec := &w.cronjob.Spec.JobTemplate.Spec.Template.Spec - - // Check volumes - for _, vol := range spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.cronjob.Spec.JobTemplate.Spec.Template.Spec, name) } func (w *CronJobWorkload) UsesSecret(name string) bool { - spec := &w.cronjob.Spec.JobTemplate.Spec.Template.Spec - - // Check volumes - for _, vol := range spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.cronjob.Spec.JobTemplate.Spec.Template.Spec, name) } func (w *CronJobWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/daemonset.go b/internal/pkg/workload/daemonset.go index ca51f4b5d..85ac7b053 100644 --- a/internal/pkg/workload/daemonset.go +++ b/internal/pkg/workload/daemonset.go @@ -96,95 +96,11 @@ func (w *DaemonSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *DaemonSetWorkload) UsesConfigMap(name string) bool { - // Check volumes - for _, vol := range w.daemonset.Spec.Template.Spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.daemonset.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.daemonset.Spec.Template.Spec, name) } func (w *DaemonSetWorkload) UsesSecret(name string) bool { - // Check volumes - for _, vol := range w.daemonset.Spec.Template.Spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.daemonset.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.daemonset.Spec.Template.Spec, name) } func (w *DaemonSetWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go index 1b5ab5f89..e4ebefb53 100644 --- a/internal/pkg/workload/deployment.go +++ b/internal/pkg/workload/deployment.go @@ -96,97 +96,11 @@ func (w *DeploymentWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *DeploymentWorkload) UsesConfigMap(name string) bool { - // Check volumes - for _, vol := range w.deployment.Spec.Template.Spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.deployment.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - // Check individual env vars - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.deployment.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.deployment.Spec.Template.Spec, name) } func (w *DeploymentWorkload) UsesSecret(name string) bool { - // Check volumes - for _, vol := range w.deployment.Spec.Template.Spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.deployment.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - // Check individual env vars - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.deployment.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.deployment.Spec.Template.Spec, name) } func (w *DeploymentWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/job.go b/internal/pkg/workload/job.go index 85b01e9b3..4e6c9fc67 100644 --- a/internal/pkg/workload/job.go +++ b/internal/pkg/workload/job.go @@ -102,95 +102,11 @@ func (w *JobWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *JobWorkload) UsesConfigMap(name string) bool { - // Check volumes - for _, vol := range w.job.Spec.Template.Spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range w.job.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.job.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.job.Spec.Template.Spec, name) } func (w *JobWorkload) UsesSecret(name string) bool { - // Check volumes - for _, vol := range w.job.Spec.Template.Spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range w.job.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.job.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.job.Spec.Template.Spec, name) } func (w *JobWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go index 7ea9643d1..f19c17132 100644 --- a/internal/pkg/workload/rollout.go +++ b/internal/pkg/workload/rollout.go @@ -137,99 +137,11 @@ func (w *RolloutWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *RolloutWorkload) UsesConfigMap(name string) bool { - spec := &w.rollout.Spec.Template.Spec - - // Check volumes - for _, vol := range spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.rollout.Spec.Template.Spec, name) } func (w *RolloutWorkload) UsesSecret(name string) bool { - spec := &w.rollout.Spec.Template.Spec - - // Check volumes - for _, vol := range spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check containers - for _, container := range spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.rollout.Spec.Template.Spec, name) } func (w *RolloutWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/statefulset.go b/internal/pkg/workload/statefulset.go index 003cef3d3..31dddeb29 100644 --- a/internal/pkg/workload/statefulset.go +++ b/internal/pkg/workload/statefulset.go @@ -96,95 +96,11 @@ func (w *StatefulSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { } func (w *StatefulSetWorkload) UsesConfigMap(name string) bool { - // Check volumes - for _, vol := range w.statefulset.Spec.Template.Spec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.ConfigMap != nil && source.ConfigMap.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.statefulset.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesConfigMap(&w.statefulset.Spec.Template.Spec, name) } func (w *StatefulSetWorkload) UsesSecret(name string) bool { - // Check volumes - for _, vol := range w.statefulset.Spec.Template.Spec.Volumes { - if vol.Secret != nil && vol.Secret.SecretName == name { - return true - } - if vol.Projected != nil { - for _, source := range vol.Projected.Sources { - if source.Secret != nil && source.Secret.Name == name { - return true - } - } - } - } - - // Check envFrom - for _, container := range w.statefulset.Spec.Template.Spec.Containers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - // Check init containers - for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { - for _, envFrom := range container.EnvFrom { - if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { - return true - } - } - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { - return true - } - } - } - - return false + return SpecUsesSecret(&w.statefulset.Spec.Template.Spec, name) } func (w *StatefulSetWorkload) GetOwnerReferences() []metav1.OwnerReference { diff --git a/internal/pkg/workload/uses.go b/internal/pkg/workload/uses.go new file mode 100644 index 000000000..fd37a2f3c --- /dev/null +++ b/internal/pkg/workload/uses.go @@ -0,0 +1,77 @@ +package workload + +import corev1 "k8s.io/api/core/v1" + +// SpecUsesConfigMap checks if a PodSpec references the named ConfigMap. +func SpecUsesConfigMap(spec *corev1.PodSpec, name string) bool { + for _, vol := range spec.Volumes { + if vol.ConfigMap != nil && vol.ConfigMap.Name == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.ConfigMap != nil && source.ConfigMap.Name == name { + return true + } + } + } + } + + if containersUseConfigMap(spec.Containers, name) { + return true + } + return containersUseConfigMap(spec.InitContainers, name) +} + +func containersUseConfigMap(containers []corev1.Container, name string) bool { + for _, container := range containers { + for _, envFrom := range container.EnvFrom { + if envFrom.ConfigMapRef != nil && envFrom.ConfigMapRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == name { + return true + } + } + } + return false +} + +// SpecUsesSecret checks if a PodSpec references the named Secret. +func SpecUsesSecret(spec *corev1.PodSpec, name string) bool { + for _, vol := range spec.Volumes { + if vol.Secret != nil && vol.Secret.SecretName == name { + return true + } + if vol.Projected != nil { + for _, source := range vol.Projected.Sources { + if source.Secret != nil && source.Secret.Name == name { + return true + } + } + } + } + + if containersUseSecret(spec.Containers, name) { + return true + } + return containersUseSecret(spec.InitContainers, name) +} + +func containersUseSecret(containers []corev1.Container, name string) bool { + for _, container := range containers { + for _, envFrom := range container.EnvFrom { + if envFrom.SecretRef != nil && envFrom.SecretRef.Name == name { + return true + } + } + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == name { + return true + } + } + } + return false +} From 3cf01197487dfd7c58dffaa33d45d23cfda91ace Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:56 +0100 Subject: [PATCH 14/35] refactor(workload): centralize workload listing with registry-based listers and add Argo Rollouts support --- internal/pkg/config/config.go | 25 +- internal/pkg/config/config_test.go | 26 -- internal/pkg/config/validation.go | 11 +- internal/pkg/config/validation_test.go | 14 + .../pkg/controller/deployment_reconciler.go | 2 +- internal/pkg/events/recorder_test.go | 184 ++++++++++++ internal/pkg/metrics/prometheus_test.go | 194 ++++++++++++ internal/pkg/webhook/webhook_test.go | 283 ++++++++++++++++++ internal/pkg/workload/lister.go | 125 ++++---- internal/pkg/workload/registry.go | 59 +++- 10 files changed, 804 insertions(+), 119 deletions(-) create mode 100644 internal/pkg/events/recorder_test.go create mode 100644 internal/pkg/metrics/prometheus_test.go create mode 100644 internal/pkg/webhook/webhook_test.go diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index e7ec77f7a..c833328fa 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( + "strings" "time" "k8s.io/apimachinery/pkg/labels" @@ -157,7 +158,7 @@ func DefaultAnnotations() AnnotationConfig { // IsResourceIgnored checks if a resource name should be ignored (case-insensitive). func (c *Config) IsResourceIgnored(name string) bool { for _, ignored := range c.IgnoredResources { - if equalFold(ignored, name) { + if strings.EqualFold(ignored, name) { return true } } @@ -167,7 +168,7 @@ func (c *Config) IsResourceIgnored(name string) bool { // IsWorkloadIgnored checks if a workload type should be ignored (case-insensitive). func (c *Config) IsWorkloadIgnored(workloadType string) bool { for _, ignored := range c.IgnoredWorkloads { - if equalFold(ignored, workloadType) { + if strings.EqualFold(ignored, workloadType) { return true } } @@ -184,23 +185,3 @@ func (c *Config) IsNamespaceIgnored(namespace string) bool { return false } -func equalFold(s, t string) bool { - if len(s) != len(t) { - return false - } - for i := 0; i < len(s); i++ { - c1, c2 := s[i], t[i] - if c1 != c2 { - if 'A' <= c1 && c1 <= 'Z' { - c1 += 'a' - 'A' - } - if 'A' <= c2 && c2 <= 'Z' { - c2 += 'a' - 'A' - } - if c1 != c2 { - return false - } - } - } - return true -} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 38b7ab538..17a3e33df 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -201,29 +201,3 @@ func TestConfig_IsNamespaceIgnored(t *testing.T) { } } -func TestEqualFold(t *testing.T) { - tests := []struct { - s, t string - want bool - }{ - {"abc", "abc", true}, - {"ABC", "abc", true}, - {"abc", "ABC", true}, - {"aBc", "AbC", true}, - {"abc", "abcd", false}, - {"", "", true}, - {"a", "", false}, - {"", "a", false}, - } - - for _, tt := range tests { - t.Run( - tt.s+"_"+tt.t, func(t *testing.T) { - got := equalFold(tt.s, tt.t) - if got != tt.want { - t.Errorf("equalFold(%q, %q) = %v, want %v", tt.s, tt.t, got, tt.want) - } - }, - ) - } -} diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 0ebce4004..161102a01 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/stakater/Reloader/internal/pkg/workload" "k8s.io/apimachinery/pkg/labels" ) @@ -102,8 +103,16 @@ func (c *Config) Validate() error { // Normalize IgnoredResources to lowercase for consistent comparison c.IgnoredResources = normalizeToLower(c.IgnoredResources) - // Normalize IgnoredWorkloads to lowercase + // Validate and normalize IgnoredWorkloads c.IgnoredWorkloads = normalizeToLower(c.IgnoredWorkloads) + for _, w := range c.IgnoredWorkloads { + if _, err := workload.KindFromString(w); err != nil { + errs = append(errs, ValidationError{ + Field: "IgnoredWorkloads", + Message: fmt.Sprintf("unknown workload type %q", w), + }) + } + } if len(errs) > 0 { return errs diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go index 54e0a47d7..45eafb732 100644 --- a/internal/pkg/config/validation_test.go +++ b/internal/pkg/config/validation_test.go @@ -178,6 +178,20 @@ func TestConfig_Validate_NormalizesIgnoredWorkloads(t *testing.T) { } } +func TestConfig_Validate_InvalidIgnoredWorkload(t *testing.T) { + cfg := NewDefault() + cfg.IgnoredWorkloads = []string{"deployment", "invalidtype"} + + err := cfg.Validate() + if err == nil { + t.Fatal("Validate() should return error for invalid workload type") + } + + if !strings.Contains(err.Error(), "invalidtype") { + t.Errorf("Error should mention invalid workload type, got: %v", err) + } +} + func TestConfig_Validate_MultipleErrors(t *testing.T) { cfg := NewDefault() cfg.ReloadStrategy = "invalid" diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go index 08c71ab49..b0acb643f 100644 --- a/internal/pkg/controller/deployment_reconciler.go +++ b/internal/pkg/controller/deployment_reconciler.go @@ -26,7 +26,7 @@ type DeploymentReconciler struct { // Reconcile handles Deployment pause expiration. func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("deployment", req.NamespacedName) - log.Info("Deployment reconciling ", "namespace", req.Namespace, "name", req.Name) + log.V(1).Info("reconciling deployment", "namespace", req.Namespace, "name", req.Name) var deploy appsv1.Deployment if err := r.Get(ctx, req.NamespacedName, &deploy); err != nil { diff --git a/internal/pkg/events/recorder_test.go b/internal/pkg/events/recorder_test.go new file mode 100644 index 000000000..9bf6a9170 --- /dev/null +++ b/internal/pkg/events/recorder_test.go @@ -0,0 +1,184 @@ +package events + +import ( + "errors" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +func TestNewRecorder_NilInput(t *testing.T) { + r := NewRecorder(nil) + if r != nil { + t.Error("NewRecorder(nil) should return nil") + } +} + +func TestNewRecorder_ValidInput(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + if r == nil { + t.Error("NewRecorder with valid recorder should not return nil") + } +} + +func TestReloadSuccess_RecordsEvent(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + r.ReloadSuccess(pod, "ConfigMap", "my-config") + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + // Event format: "Normal Reloaded Reloaded due to ConfigMap my-config change" + expectedContains := []string{"Normal", "Reloaded", "ConfigMap", "my-config"} + for _, expected := range expectedContains { + if !contains(event, expected) { + t.Errorf("Event %q should contain %q", event, expected) + } + } + default: + t.Error("Expected event to be recorded, but none was") + } +} + +func TestReloadFailed_RecordsWarningEvent(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + testErr := errors.New("update conflict") + r.ReloadFailed(pod, "Secret", "my-secret", testErr) + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + // Event format: "Warning ReloadFailed Failed to reload due to Secret my-secret change: update conflict" + expectedContains := []string{"Warning", "ReloadFailed", "Secret", "my-secret", "update conflict"} + for _, expected := range expectedContains { + if !contains(event, expected) { + t.Errorf("Event %q should contain %q", event, expected) + } + } + default: + t.Error("Expected event to be recorded, but none was") + } +} + +func TestNilRecorder_NoPanic(t *testing.T) { + var r *Recorder = nil + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + // These should not panic + r.ReloadSuccess(pod, "ConfigMap", "my-config") + r.ReloadFailed(pod, "Secret", "my-secret", errors.New("test error")) +} + +func TestRecorder_NilInternalRecorder(t *testing.T) { + // Create a Recorder with nil internal recorder (edge case) + r := &Recorder{recorder: nil} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + // These should not panic + r.ReloadSuccess(pod, "ConfigMap", "my-config") + r.ReloadFailed(pod, "Secret", "my-secret", errors.New("test error")) +} + +func TestEventConstants(t *testing.T) { + if EventTypeNormal != corev1.EventTypeNormal { + t.Errorf("EventTypeNormal = %q, want %q", EventTypeNormal, corev1.EventTypeNormal) + } + if EventTypeWarning != corev1.EventTypeWarning { + t.Errorf("EventTypeWarning = %q, want %q", EventTypeWarning, corev1.EventTypeWarning) + } + if ReasonReloaded != "Reloaded" { + t.Errorf("ReasonReloaded = %q, want %q", ReasonReloaded, "Reloaded") + } + if ReasonReloadFailed != "ReloadFailed" { + t.Errorf("ReasonReloadFailed = %q, want %q", ReasonReloadFailed, "ReloadFailed") + } +} + +func TestReloadSuccess_DifferentObjectTypes(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + r := NewRecorder(fakeRecorder) + + tests := []struct { + name string + object runtime.Object + }{ + { + name: "Pod", + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}, + }, + }, + { + name: "ConfigMap", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r.ReloadSuccess(tt.object, "ConfigMap", "my-config") + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + default: + t.Error("Expected event to be recorded") + } + }) + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstring(s, substr)) +} + +func containsSubstring(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/pkg/metrics/prometheus_test.go b/internal/pkg/metrics/prometheus_test.go new file mode 100644 index 000000000..47b4392d4 --- /dev/null +++ b/internal/pkg/metrics/prometheus_test.go @@ -0,0 +1,194 @@ +package metrics + +import ( + "os" + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +func TestNewCollectors_CreatesCounters(t *testing.T) { + collectors := NewCollectors() + + if collectors.Reloaded == nil { + t.Error("NewCollectors() should create Reloaded counter") + } + if collectors.ReloadedByNamespace == nil { + t.Error("NewCollectors() should create ReloadedByNamespace counter") + } +} + +func TestNewCollectors_InitializesWithZero(t *testing.T) { + collectors := NewCollectors() + + // Check that success=true counter is initialized to 0 + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 0 { + t.Errorf("Initial success=true counter = %v, want 0", metric.Counter.GetValue()) + } + + // Check that success=false counter is initialized to 0 + err = collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 0 { + t.Errorf("Initial success=false counter = %v, want 0", metric.Counter.GetValue()) + } +} + +func TestRecordReload_Success(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(true, "default") + + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=true counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_Failure(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(false, "default") + + metric := &dto.Metric{} + err := collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=false counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_MultipleIncrements(t *testing.T) { + collectors := NewCollectors() + collectors.RecordReload(true, "default") + collectors.RecordReload(true, "default") + collectors.RecordReload(false, "default") + + metric := &dto.Metric{} + + err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 2 { + t.Errorf("success=true counter = %v, want 2", metric.Counter.GetValue()) + } + + err = collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("success=false counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_WithNamespaceTracking(t *testing.T) { + // Enable namespace tracking + os.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") + defer os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + + collectors := NewCollectors() + collectors.RecordReload(true, "kube-system") + + metric := &dto.Metric{} + err := collectors.ReloadedByNamespace.With(prometheus.Labels{ + "success": "true", + "namespace": "kube-system", + }).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("namespace counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestRecordReload_WithoutNamespaceTracking(t *testing.T) { + // Ensure namespace tracking is disabled + os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + + collectors := NewCollectors() + collectors.RecordReload(true, "kube-system") + + // The ReloadedByNamespace counter should not be incremented + // We can verify by checking countByNamespace is false + if collectors.countByNamespace { + t.Error("countByNamespace should be false when env var is not set") + } +} + +func TestNilCollectors_NoPanic(t *testing.T) { + var c *Collectors = nil + + // This should not panic + c.RecordReload(true, "default") + c.RecordReload(false, "default") +} + +func TestRecordReload_DifferentNamespaces(t *testing.T) { + os.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") + defer os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + + collectors := NewCollectors() + collectors.RecordReload(true, "namespace-a") + collectors.RecordReload(true, "namespace-b") + collectors.RecordReload(true, "namespace-a") + + metric := &dto.Metric{} + + // Check namespace-a has 2 reloads + err := collectors.ReloadedByNamespace.With(prometheus.Labels{ + "success": "true", + "namespace": "namespace-a", + }).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 2 { + t.Errorf("namespace-a counter = %v, want 2", metric.Counter.GetValue()) + } + + // Check namespace-b has 1 reload + err = collectors.ReloadedByNamespace.With(prometheus.Labels{ + "success": "true", + "namespace": "namespace-b", + }).Write(metric) + if err != nil { + t.Fatalf("Failed to get metric: %v", err) + } + if metric.Counter.GetValue() != 1 { + t.Errorf("namespace-b counter = %v, want 1", metric.Counter.GetValue()) + } +} + +func TestCollectors_MetricNames(t *testing.T) { + collectors := NewCollectors() + + // Verify the Reloaded metric has correct description + ch := make(chan *prometheus.Desc, 10) + collectors.Reloaded.Describe(ch) + close(ch) + + found := false + for desc := range ch { + if desc.String() != "" { + found = true + } + } + if !found { + t.Error("Expected Reloaded metric to have a description") + } +} diff --git a/internal/pkg/webhook/webhook_test.go b/internal/pkg/webhook/webhook_test.go new file mode 100644 index 000000000..acb7b983d --- /dev/null +++ b/internal/pkg/webhook/webhook_test.go @@ -0,0 +1,283 @@ +package webhook + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-logr/logr" +) + +func TestNewClient_SetsURL(t *testing.T) { + c := NewClient("http://example.com/webhook", logr.Discard()) + + if c == nil { + t.Fatal("NewClient should not return nil") + } + if c.url != "http://example.com/webhook" { + t.Errorf("URL = %q, want %q", c.url, "http://example.com/webhook") + } + if c.httpClient == nil { + t.Error("httpClient should not be nil") + } + if c.httpClient.Timeout != 30*time.Second { + t.Errorf("Timeout = %v, want %v", c.httpClient.Timeout, 30*time.Second) + } +} + +func TestIsConfigured_NilClient(t *testing.T) { + var c *Client = nil + + if c.IsConfigured() { + t.Error("IsConfigured() should return false for nil client") + } +} + +func TestIsConfigured_EmptyURL(t *testing.T) { + c := NewClient("", logr.Discard()) + + if c.IsConfigured() { + t.Error("IsConfigured() should return false for empty URL") + } +} + +func TestIsConfigured_ValidURL(t *testing.T) { + c := NewClient("http://example.com/webhook", logr.Discard()) + + if !c.IsConfigured() { + t.Error("IsConfigured() should return true for valid URL") + } +} + +func TestSend_EmptyURL_ReturnsNil(t *testing.T) { + c := NewClient("", logr.Discard()) + + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + } + + err := c.Send(context.Background(), payload) + if err != nil { + t.Errorf("Send() with empty URL should return nil, got %v", err) + } +} + +func TestSend_MarshalPayload(t *testing.T) { + var receivedPayload Payload + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + json.Unmarshal(body, &receivedPayload) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + Hash: "abc123", + Timestamp: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), + Workloads: []WorkloadInfo{ + {Kind: "Deployment", Name: "my-deploy", Namespace: "default"}, + }, + } + + err := c.Send(context.Background(), payload) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if receivedPayload.Kind != "ConfigMap" { + t.Errorf("Received Kind = %q, want %q", receivedPayload.Kind, "ConfigMap") + } + if receivedPayload.Namespace != "default" { + t.Errorf("Received Namespace = %q, want %q", receivedPayload.Namespace, "default") + } + if receivedPayload.ResourceName != "my-config" { + t.Errorf("Received ResourceName = %q, want %q", receivedPayload.ResourceName, "my-config") + } + if receivedPayload.Hash != "abc123" { + t.Errorf("Received Hash = %q, want %q", receivedPayload.Hash, "abc123") + } + if len(receivedPayload.Workloads) != 1 { + t.Errorf("Received Workloads count = %d, want 1", len(receivedPayload.Workloads)) + } +} + +func TestSend_SetsCorrectHeaders(t *testing.T) { + var contentType, userAgent string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + contentType = r.Header.Get("Content-Type") + userAgent = r.Header.Get("User-Agent") + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if contentType != "application/json" { + t.Errorf("Content-Type = %q, want %q", contentType, "application/json") + } + if userAgent != "Reloader/2.0" { + t.Errorf("User-Agent = %q, want %q", userAgent, "Reloader/2.0") + } +} + +func TestSend_UsesPostMethod(t *testing.T) { + var method string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + method = r.Method + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err != nil { + t.Fatalf("Send() error = %v", err) + } + + if method != http.MethodPost { + t.Errorf("Method = %q, want %q", method, http.MethodPost) + } +} + +func TestSend_Non2xxResponse(t *testing.T) { + tests := []struct { + name string + statusCode int + wantErr bool + }{ + {"200 OK", 200, false}, + {"201 Created", 201, false}, + {"204 No Content", 204, false}, + {"299 upper bound", 299, false}, + {"300 redirect", 300, true}, + {"400 Bad Request", 400, true}, + {"404 Not Found", 404, true}, + {"500 Internal Error", 500, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + err := c.Send(context.Background(), Payload{}) + + if (err != nil) != tt.wantErr { + t.Errorf("Send() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestSend_NetworkError(t *testing.T) { + // Use a URL that won't connect + c := NewClient("http://127.0.0.1:1", logr.Discard()) + + err := c.Send(context.Background(), Payload{}) + if err == nil { + t.Error("Send() should return error for network failure") + } +} + +func TestSend_ContextCancellation(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(100 * time.Millisecond) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + c := NewClient(server.URL, logr.Discard()) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + err := c.Send(ctx, Payload{}) + if err == nil { + t.Error("Send() should return error for cancelled context") + } +} + +func TestPayload_JSONSerialization(t *testing.T) { + payload := Payload{ + Kind: "ConfigMap", + Namespace: "default", + ResourceName: "my-config", + ResourceType: "configmap", + Hash: "abc123", + Timestamp: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), + Workloads: []WorkloadInfo{ + {Kind: "Deployment", Name: "my-deploy", Namespace: "default"}, + {Kind: "StatefulSet", Name: "my-sts", Namespace: "default"}, + }, + } + + data, err := json.Marshal(payload) + if err != nil { + t.Fatalf("Failed to marshal payload: %v", err) + } + + var unmarshaled Payload + if err := json.Unmarshal(data, &unmarshaled); err != nil { + t.Fatalf("Failed to unmarshal payload: %v", err) + } + + if unmarshaled.Kind != payload.Kind { + t.Errorf("Kind = %q, want %q", unmarshaled.Kind, payload.Kind) + } + if len(unmarshaled.Workloads) != 2 { + t.Errorf("Workloads count = %d, want 2", len(unmarshaled.Workloads)) + } +} + +func TestWorkloadInfo_JSONSerialization(t *testing.T) { + info := WorkloadInfo{ + Kind: "Deployment", + Name: "my-deploy", + Namespace: "production", + } + + data, err := json.Marshal(info) + if err != nil { + t.Fatalf("Failed to marshal: %v", err) + } + + var unmarshaled WorkloadInfo + if err := json.Unmarshal(data, &unmarshaled); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + if unmarshaled.Kind != "Deployment" { + t.Errorf("Kind = %q, want %q", unmarshaled.Kind, "Deployment") + } + if unmarshaled.Name != "my-deploy" { + t.Errorf("Name = %q, want %q", unmarshaled.Name, "my-deploy") + } + if unmarshaled.Namespace != "production" { + t.Errorf("Namespace = %q, want %q", unmarshaled.Namespace, "production") + } +} diff --git a/internal/pkg/workload/lister.go b/internal/pkg/workload/lister.go index a1487bb74..a30bdfcc7 100644 --- a/internal/pkg/workload/lister.go +++ b/internal/pkg/workload/lister.go @@ -3,6 +3,7 @@ package workload import ( "context" + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,63 +50,81 @@ func (l *Lister) List(ctx context.Context, namespace string) ([]WorkloadAccessor } func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([]WorkloadAccessor, error) { - switch kind { - case KindDeployment: - var list appsv1.DeploymentList - if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewDeploymentWorkload(&list.Items[i]) - } - return result, nil + lister := l.Registry.ListerFor(kind) + if lister == nil { + return nil, nil + } + return lister(ctx, l.Client, namespace) +} - case KindDaemonSet: - var list appsv1.DaemonSetList - if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewDaemonSetWorkload(&list.Items[i]) - } - return result, nil +func listDeployments(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list appsv1.DeploymentList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewDeploymentWorkload(&list.Items[i]) + } + return result, nil +} - case KindStatefulSet: - var list appsv1.StatefulSetList - if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewStatefulSetWorkload(&list.Items[i]) - } - return result, nil +func listDaemonSets(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list appsv1.DaemonSetList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewDaemonSetWorkload(&list.Items[i]) + } + return result, nil +} - case KindJob: - var list batchv1.JobList - if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewJobWorkload(&list.Items[i]) - } - return result, nil +func listStatefulSets(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list appsv1.StatefulSetList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewStatefulSetWorkload(&list.Items[i]) + } + return result, nil +} - case KindCronJob: - var list batchv1.CronJobList - if err := l.Client.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewCronJobWorkload(&list.Items[i]) - } - return result, nil +func listJobs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list batchv1.JobList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewJobWorkload(&list.Items[i]) + } + return result, nil +} - default: - return nil, nil +func listCronJobs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list batchv1.CronJobList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewCronJobWorkload(&list.Items[i]) + } + return result, nil +} + +func listRollouts(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list argorolloutv1alpha1.RolloutList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewRolloutWorkload(&list.Items[i]) + } + return result, nil } diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 8525d5624..5e7ad0c1a 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -1,7 +1,9 @@ package workload import ( + "context" "fmt" + "strings" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" @@ -9,16 +11,36 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// WorkloadLister is a function that lists workloads of a specific kind. +type WorkloadLister func(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) + // Registry provides factory methods for creating Workload instances. type Registry struct { argoRolloutsEnabled bool + listers map[Kind]WorkloadLister } // NewRegistry creates a new workload registry. func NewRegistry(argoRolloutsEnabled bool) *Registry { - return &Registry{ + r := &Registry{ argoRolloutsEnabled: argoRolloutsEnabled, + listers: map[Kind]WorkloadLister{ + KindDeployment: listDeployments, + KindDaemonSet: listDaemonSets, + KindStatefulSet: listStatefulSets, + KindJob: listJobs, + KindCronJob: listCronJobs, + }, + } + if argoRolloutsEnabled { + r.listers[KindArgoRollout] = listRollouts } + return r +} + +// ListerFor returns the lister function for the given kind, or nil if not found. +func (r *Registry) ListerFor(kind Kind) WorkloadLister { + return r.listers[kind] } // SupportedKinds returns all supported workload kinds. @@ -59,22 +81,27 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { } } +// kindAliases maps string representations to Kind constants. +// Supports lowercase, title case, and plural forms for user convenience. +var kindAliases = map[string]Kind{ + "deployment": KindDeployment, + "deployments": KindDeployment, + "daemonset": KindDaemonSet, + "daemonsets": KindDaemonSet, + "statefulset": KindStatefulSet, + "statefulsets": KindStatefulSet, + "rollout": KindArgoRollout, + "rollouts": KindArgoRollout, + "job": KindJob, + "jobs": KindJob, + "cronjob": KindCronJob, + "cronjobs": KindCronJob, +} + // KindFromString converts a string to a Kind. func KindFromString(s string) (Kind, error) { - switch s { - case "Deployment", "deployment", "deployments": - return KindDeployment, nil - case "DaemonSet", "daemonset", "daemonsets": - return KindDaemonSet, nil - case "StatefulSet", "statefulset", "statefulsets": - return KindStatefulSet, nil - case "Rollout", "rollout", "rollouts": - return KindArgoRollout, nil - case "Job", "job", "jobs": - return KindJob, nil - case "CronJob", "cronjob", "cronjobs": - return KindCronJob, nil - default: - return "", fmt.Errorf("unknown workload kind: %s", s) + if k, ok := kindAliases[strings.ToLower(s)]; ok { + return k, nil } + return "", fmt.Errorf("unknown workload kind: %s", s) } From 3a8c300d350acd83135cc1b15331526f7c7b1c9d Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:56 +0100 Subject: [PATCH 15/35] refactor: unify label set implementations and rename variables for clarity and consistency across packages --- internal/pkg/config/flags.go | 212 ++++++++++++------ .../pkg/controller/namespace_reconciler.go | 15 +- internal/pkg/metrics/prometheus.go | 5 +- internal/pkg/reload/predicate.go | 13 +- 4 files changed, 153 insertions(+), 92 deletions(-) diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index f3fe49552..1de5d2ef8 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -25,104 +25,174 @@ var fv flagValues // Call this before parsing flags, then call ApplyFlags after parsing. func BindFlags(fs *pflag.FlagSet, cfg *Config) { // Auto reload - fs.BoolVar(&cfg.AutoReloadAll, "auto-reload-all", cfg.AutoReloadAll, - "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations") + fs.BoolVar( + &cfg.AutoReloadAll, "auto-reload-all", cfg.AutoReloadAll, + "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations", + ) // Reload strategy - fs.StringVar((*string)(&cfg.ReloadStrategy), "reload-strategy", string(cfg.ReloadStrategy), - "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'") + fs.StringVar( + (*string)(&cfg.ReloadStrategy), "reload-strategy", string(cfg.ReloadStrategy), + "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'", + ) - // Argo Rollouts (note: capital A in Argo for backward compatibility) - fs.StringVar(&fv.isArgoRollouts, "is-Argo-Rollouts", "false", - "Enable Argo Rollouts support (true/false)") + // Argo Rollouts + fs.StringVar( + &fv.isArgoRollouts, "is-Argo-Rollouts", "false", + "Enable Argo Rollouts support (true/false)", + ) // Event watching - fs.StringVar(&fv.reloadOnCreate, "reload-on-create", "false", - "Reload when configmaps/secrets are created (true/false)") - fs.StringVar(&fv.reloadOnDelete, "reload-on-delete", "false", - "Reload when configmaps/secrets are deleted (true/false)") + fs.StringVar( + &fv.reloadOnCreate, "reload-on-create", "false", + "Reload when configmaps/secrets are created (true/false)", + ) + fs.StringVar( + &fv.reloadOnDelete, "reload-on-delete", "false", + "Reload when configmaps/secrets are deleted (true/false)", + ) // Sync after restart - fs.BoolVar(&cfg.SyncAfterRestart, "sync-after-restart", cfg.SyncAfterRestart, - "Trigger sync operation after restart") + fs.BoolVar( + &cfg.SyncAfterRestart, "sync-after-restart", cfg.SyncAfterRestart, + "Trigger sync operation after restart", + ) // High availability / Leader election - fs.BoolVar(&cfg.EnableHA, "enable-ha", cfg.EnableHA, - "Enable high-availability mode with leader election") - fs.StringVar(&cfg.LeaderElection.LockName, "leader-election-id", cfg.LeaderElection.LockName, - "Name of the lease resource for leader election") - fs.StringVar(&cfg.LeaderElection.Namespace, "leader-election-namespace", cfg.LeaderElection.Namespace, - "Namespace for the leader election lease (defaults to pod namespace)") - fs.DurationVar(&cfg.LeaderElection.LeaseDuration, "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, - "Duration that non-leader candidates will wait before attempting to acquire leadership") - fs.DurationVar(&cfg.LeaderElection.RenewDeadline, "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, - "Duration that the acting leader will retry refreshing leadership before giving up") - fs.DurationVar(&cfg.LeaderElection.RetryPeriod, "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, - "Duration between leader election retries") - fs.BoolVar(&cfg.LeaderElection.ReleaseOnCancel, "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, - "Release the leader lock when the manager is stopped") + fs.BoolVar( + &cfg.EnableHA, "enable-ha", cfg.EnableHA, + "Enable high-availability mode with leader election", + ) + fs.StringVar( + &cfg.LeaderElection.LockName, "leader-election-id", cfg.LeaderElection.LockName, + "Name of the lease resource for leader election", + ) + fs.StringVar( + &cfg.LeaderElection.Namespace, "leader-election-namespace", cfg.LeaderElection.Namespace, + "Namespace for the leader election lease (defaults to pod namespace)", + ) + fs.DurationVar( + &cfg.LeaderElection.LeaseDuration, "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, + "Duration that non-leader candidates will wait before attempting to acquire leadership", + ) + fs.DurationVar( + &cfg.LeaderElection.RenewDeadline, "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, + "Duration that the acting leader will retry refreshing leadership before giving up", + ) + fs.DurationVar( + &cfg.LeaderElection.RetryPeriod, "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, + "Duration between leader election retries", + ) + fs.BoolVar( + &cfg.LeaderElection.ReleaseOnCancel, "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, + "Release the leader lock when the manager is stopped", + ) // Webhook - fs.StringVar(&cfg.WebhookURL, "webhook-url", cfg.WebhookURL, - "URL to send notification instead of triggering reload") + fs.StringVar( + &cfg.WebhookURL, "webhook-url", cfg.WebhookURL, + "URL to send notification instead of triggering reload", + ) // Filtering - resources (use StringVar not StringSliceVar for simpler parsing) - fs.StringVar(&fv.ignoredResources, "resources-to-ignore", "", - "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')") - fs.StringVar(&fv.ignoredWorkloads, "ignored-workload-types", "", - "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") - fs.StringVar(&fv.ignoredNamespaces, "namespaces-to-ignore", "", - "Comma-separated list of namespaces to ignore") + fs.StringVar( + &fv.ignoredResources, "resources-to-ignore", "", + "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')", + ) + fs.StringVar( + &fv.ignoredWorkloads, "ignored-workload-types", "", + "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)", + ) + fs.StringVar( + &fv.ignoredNamespaces, "namespaces-to-ignore", "", + "Comma-separated list of namespaces to ignore", + ) // Filtering - selectors - fs.StringVar(&fv.namespaceSelectors, "namespace-selector", "", - "Comma-separated list of namespace label selectors") - fs.StringVar(&fv.resourceSelectors, "resource-label-selector", "", - "Comma-separated list of resource label selectors") + fs.StringVar( + &fv.namespaceSelectors, "namespace-selector", "", + "Comma-separated list of namespace label selectors", + ) + fs.StringVar( + &fv.resourceSelectors, "resource-label-selector", "", + "Comma-separated list of resource label selectors", + ) // Logging - fs.StringVar(&cfg.LogFormat, "log-format", cfg.LogFormat, - "Log format: 'json' or empty for default") - fs.StringVar(&cfg.LogLevel, "log-level", cfg.LogLevel, - "Log level: trace, debug, info, warning, error, fatal, panic") + fs.StringVar( + &cfg.LogFormat, "log-format", cfg.LogFormat, + "Log format: 'json' or empty for default", + ) + fs.StringVar( + &cfg.LogLevel, "log-level", cfg.LogLevel, + "Log level: trace, debug, info, warning, error, fatal, panic", + ) // Metrics - fs.StringVar(&cfg.MetricsAddr, "metrics-addr", cfg.MetricsAddr, - "Address to serve metrics on") + fs.StringVar( + &cfg.MetricsAddr, "metrics-addr", cfg.MetricsAddr, + "Address to serve metrics on", + ) // Health probes - fs.StringVar(&cfg.HealthAddr, "health-addr", cfg.HealthAddr, - "Address to serve health probes on") + fs.StringVar( + &cfg.HealthAddr, "health-addr", cfg.HealthAddr, + "Address to serve health probes on", + ) // Profiling - fs.BoolVar(&cfg.EnablePProf, "enable-pprof", cfg.EnablePProf, - "Enable pprof profiling server") - fs.StringVar(&cfg.PProfAddr, "pprof-addr", cfg.PProfAddr, - "Address for pprof server") + fs.BoolVar( + &cfg.EnablePProf, "enable-pprof", cfg.EnablePProf, + "Enable pprof profiling server", + ) + fs.StringVar( + &cfg.PProfAddr, "pprof-addr", cfg.PProfAddr, + "Address for pprof server", + ) // Annotation customization (flag names match v1 for backward compatibility) - fs.StringVar(&cfg.Annotations.Auto, "auto-annotation", cfg.Annotations.Auto, - "Annotation to detect changes in secrets/configmaps") - fs.StringVar(&cfg.Annotations.ConfigmapAuto, "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, - "Annotation to detect changes in configmaps") - fs.StringVar(&cfg.Annotations.SecretAuto, "secret-auto-annotation", cfg.Annotations.SecretAuto, - "Annotation to detect changes in secrets") - fs.StringVar(&cfg.Annotations.ConfigmapReload, "configmap-annotation", cfg.Annotations.ConfigmapReload, - "Annotation to detect changes in configmaps, specified by name") - fs.StringVar(&cfg.Annotations.SecretReload, "secret-annotation", cfg.Annotations.SecretReload, - "Annotation to detect changes in secrets, specified by name") - fs.StringVar(&cfg.Annotations.Search, "auto-search-annotation", cfg.Annotations.Search, - "Annotation to detect changes in configmaps or secrets tagged with special match annotation") - fs.StringVar(&cfg.Annotations.Match, "search-match-annotation", cfg.Annotations.Match, - "Annotation to mark secrets or configmaps to match the search") - fs.StringVar(&cfg.Annotations.PausePeriod, "pause-deployment-annotation", cfg.Annotations.PausePeriod, - "Annotation to define the time period to pause a deployment after a configmap/secret change") - fs.StringVar(&cfg.Annotations.PausedAt, "pause-deployment-time-annotation", cfg.Annotations.PausedAt, - "Annotation to indicate when a deployment was paused by Reloader") + fs.StringVar( + &cfg.Annotations.Auto, "auto-annotation", cfg.Annotations.Auto, + "Annotation to detect changes in secrets/configmaps", + ) + fs.StringVar( + &cfg.Annotations.ConfigmapAuto, "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, + "Annotation to detect changes in configmaps", + ) + fs.StringVar( + &cfg.Annotations.SecretAuto, "secret-auto-annotation", cfg.Annotations.SecretAuto, + "Annotation to detect changes in secrets", + ) + fs.StringVar( + &cfg.Annotations.ConfigmapReload, "configmap-annotation", cfg.Annotations.ConfigmapReload, + "Annotation to detect changes in configmaps, specified by name", + ) + fs.StringVar( + &cfg.Annotations.SecretReload, "secret-annotation", cfg.Annotations.SecretReload, + "Annotation to detect changes in secrets, specified by name", + ) + fs.StringVar( + &cfg.Annotations.Search, "auto-search-annotation", cfg.Annotations.Search, + "Annotation to detect changes in configmaps or secrets tagged with special match annotation", + ) + fs.StringVar( + &cfg.Annotations.Match, "search-match-annotation", cfg.Annotations.Match, + "Annotation to mark secrets or configmaps to match the search", + ) + fs.StringVar( + &cfg.Annotations.PausePeriod, "pause-deployment-annotation", cfg.Annotations.PausePeriod, + "Annotation to define the time period to pause a deployment after a configmap/secret change", + ) + fs.StringVar( + &cfg.Annotations.PausedAt, "pause-deployment-time-annotation", cfg.Annotations.PausedAt, + "Annotation to indicate when a deployment was paused by Reloader", + ) // Watched namespace (for single-namespace mode) - fs.StringVar(&cfg.WatchedNamespace, "watch-namespace", cfg.WatchedNamespace, - "Namespace to watch (empty for all namespaces)") + fs.StringVar( + &cfg.WatchedNamespace, "watch-namespace", cfg.WatchedNamespace, + "Namespace to watch (empty for all namespaces)", + ) } // ApplyFlags applies flag values that need post-processing. diff --git a/internal/pkg/controller/namespace_reconciler.go b/internal/pkg/controller/namespace_reconciler.go index ab25fff0c..8cc03d63f 100644 --- a/internal/pkg/controller/namespace_reconciler.go +++ b/internal/pkg/controller/namespace_reconciler.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -124,7 +125,7 @@ func (r *NamespaceReconciler) matchesSelectors(ns *corev1.Namespace) bool { } for _, selector := range r.Config.NamespaceSelectors { - if selector.Matches(nsLabelsSet(nsLabels)) { + if selector.Matches(reload.LabelsSet(nsLabels)) { return true } } @@ -132,18 +133,6 @@ func (r *NamespaceReconciler) matchesSelectors(ns *corev1.Namespace) bool { return false } -// nsLabelsSet implements labels.Labels interface for a map. -type nsLabelsSet map[string]string - -func (ls nsLabelsSet) Has(key string) bool { - _, ok := ls[key] - return ok -} - -func (ls nsLabelsSet) Get(key string) string { - return ls[key] -} - // SetupWithManager sets up the controller with the Manager. func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/internal/pkg/metrics/prometheus.go b/internal/pkg/metrics/prometheus.go index f78ef03d0..87f9148be 100644 --- a/internal/pkg/metrics/prometheus.go +++ b/internal/pkg/metrics/prometheus.go @@ -48,11 +48,10 @@ func NewCollectors() Collectors { }, ) - //set 0 as default value reloaded.With(prometheus.Labels{"success": "true"}).Add(0) reloaded.With(prometheus.Labels{"success": "false"}).Add(0) - reloaded_by_namespace := prometheus.NewCounterVec( + reloadedByNamespace := prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "reloader", Name: "reload_executed_total_by_namespace", @@ -65,7 +64,7 @@ func NewCollectors() Collectors { ) return Collectors{ Reloaded: reloaded, - ReloadedByNamespace: reloaded_by_namespace, + ReloadedByNamespace: reloadedByNamespace, countByNamespace: os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled", } } diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index f24c60a72..3582dd54d 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -102,7 +102,7 @@ func LabelSelectorPredicate(cfg *config.Config) predicate.Predicate { // Check if any selector matches for _, selector := range cfg.ResourceSelectors { - if selector.Matches(labelsSet(labels)) { + if selector.Matches(LabelsSet(labels)) { return true } } @@ -111,15 +111,18 @@ func LabelSelectorPredicate(cfg *config.Config) predicate.Predicate { }) } -// labelsSet implements labels.Labels interface for a map. -type labelsSet map[string]string +// LabelsSet implements the k8s.io/apimachinery/pkg/labels.Labels interface +// for a map[string]string. This allows using label maps with label selectors. +type LabelsSet map[string]string -func (ls labelsSet) Has(key string) bool { +// Has returns whether the provided label key exists in the set. +func (ls LabelsSet) Has(key string) bool { _, ok := ls[key] return ok } -func (ls labelsSet) Get(key string) string { +// Get returns the value for the provided label key. +func (ls LabelsSet) Get(key string) string { return ls[key] } From fa60c1d8533e5a38863b4f8cc27d119961435898 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:57 +0100 Subject: [PATCH 16/35] refactor(reload): move resource types and reload decision structs to dedicated files and remove redundant code --- internal/pkg/controller/filter.go | 39 ++++++++++++++ internal/pkg/controller/handler.go | 34 ------------ internal/pkg/metadata/metadata.go | 72 ------------------------- internal/pkg/metadata/publisher.go | 80 ++++++++++++++++++++++++++++ internal/pkg/reload/change.go | 56 +++++++++++++++++++ internal/pkg/reload/decision.go | 30 +++++++++++ internal/pkg/reload/matcher.go | 22 -------- internal/pkg/reload/resource_type.go | 23 ++++++++ internal/pkg/reload/service.go | 77 -------------------------- 9 files changed, 228 insertions(+), 205 deletions(-) create mode 100644 internal/pkg/controller/filter.go create mode 100644 internal/pkg/metadata/publisher.go create mode 100644 internal/pkg/reload/change.go create mode 100644 internal/pkg/reload/decision.go create mode 100644 internal/pkg/reload/resource_type.go diff --git a/internal/pkg/controller/filter.go b/internal/pkg/controller/filter.go new file mode 100644 index 000000000..c3a387b4e --- /dev/null +++ b/internal/pkg/controller/filter.go @@ -0,0 +1,39 @@ +package controller + +import ( + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// BuildEventFilter combines a resource-specific predicate with common filters. +func BuildEventFilter(resourcePredicate predicate.Predicate, cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.And( + resourcePredicate, + reload.NamespaceFilterPredicate(cfg), + reload.LabelSelectorPredicate(cfg), + reload.IgnoreAnnotationPredicate(cfg), + createEventPredicate(cfg, initialized), + ) +} + +func createEventPredicate(cfg *config.Config, initialized *bool) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if !*initialized && !cfg.SyncAfterRestart { + return false + } + return cfg.ReloadOnCreate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return cfg.ReloadOnDelete + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index caa71fb83..00b7218a3 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" - "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/reload" @@ -14,8 +13,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/workload" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) // ReloadHandler handles the common reload workflow. @@ -154,34 +151,3 @@ func (h *ReloadHandler) applyReloads( } } } - -// BuildEventFilter combines a resource-specific predicate with common filters. -func BuildEventFilter(resourcePredicate predicate.Predicate, cfg *config.Config, initialized *bool) predicate.Predicate { - return predicate.And( - resourcePredicate, - reload.NamespaceFilterPredicate(cfg), - reload.LabelSelectorPredicate(cfg), - reload.IgnoreAnnotationPredicate(cfg), - createEventPredicate(cfg, initialized), - ) -} - -func createEventPredicate(cfg *config.Config, initialized *bool) predicate.Predicate { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - if !*initialized && !cfg.SyncAfterRestart { - return false - } - return cfg.ReloadOnCreate - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return cfg.ReloadOnDelete - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go index f0e22f126..9bfae8d5e 100644 --- a/internal/pkg/metadata/metadata.go +++ b/internal/pkg/metadata/metadata.go @@ -3,19 +3,14 @@ package metadata import ( - "context" "encoding/json" - "fmt" "os" "runtime" "time" - "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -195,73 +190,6 @@ func (m *MetaInfo) ToConfigMap() *corev1.ConfigMap { } } -// Publisher handles creating and updating the metadata ConfigMap. -type Publisher struct { - client client.Client - cfg *config.Config - log logr.Logger -} - -// NewPublisher creates a new Publisher. -func NewPublisher(c client.Client, cfg *config.Config, log logr.Logger) *Publisher { - return &Publisher{ - client: c, - cfg: cfg, - log: log, - } -} - -// Publish creates or updates the metadata ConfigMap. -func (p *Publisher) Publish(ctx context.Context) error { - namespace := os.Getenv(EnvReloaderNamespace) - if namespace == "" { - p.log.Info("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") - return nil - } - - metaInfo := NewMetaInfo(p.cfg) - configMap := metaInfo.ToConfigMap() - - existing := &corev1.ConfigMap{} - err := p.client.Get(ctx, client.ObjectKey{ - Name: ConfigMapName, - Namespace: namespace, - }, existing) - - if err != nil { - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to get existing meta info configmap: %w", err) - } - p.log.Info("Creating meta info configmap") - if err := p.client.Create(ctx, configMap, client.FieldOwner(FieldManager)); err != nil { - return fmt.Errorf("failed to create meta info configmap: %w", err) - } - p.log.Info("Meta info configmap created successfully") - return nil - } - - p.log.Info("Meta info configmap already exists, updating it") - existing.Data = configMap.Data - existing.Labels = configMap.Labels - if err := p.client.Update(ctx, existing, client.FieldOwner(FieldManager)); err != nil { - return fmt.Errorf("failed to update meta info configmap: %w", err) - } - p.log.Info("Meta info configmap updated successfully") - return nil -} - -// PublishMetaInfoConfigMap is a convenience function that creates a Publisher and calls Publish. -func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config, log logr.Logger) error { - publisher := NewPublisher(c, cfg, log) - return publisher.Publish(ctx) -} - -// CreateOrUpdate creates or updates the metadata ConfigMap using the provided client. -func CreateOrUpdate(c client.Client, cfg *config.Config, log logr.Logger) error { - ctx := context.Background() - return PublishMetaInfoConfigMap(ctx, c, cfg, log) -} - func toJSON(data interface{}) string { jsonData, err := json.Marshal(data) if err != nil { diff --git a/internal/pkg/metadata/publisher.go b/internal/pkg/metadata/publisher.go new file mode 100644 index 000000000..78bfa92ab --- /dev/null +++ b/internal/pkg/metadata/publisher.go @@ -0,0 +1,80 @@ +package metadata + +import ( + "context" + "fmt" + "os" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Publisher handles creating and updating the metadata ConfigMap. +type Publisher struct { + client client.Client + cfg *config.Config + log logr.Logger +} + +// NewPublisher creates a new Publisher. +func NewPublisher(c client.Client, cfg *config.Config, log logr.Logger) *Publisher { + return &Publisher{ + client: c, + cfg: cfg, + log: log, + } +} + +// Publish creates or updates the metadata ConfigMap. +func (p *Publisher) Publish(ctx context.Context) error { + namespace := os.Getenv(EnvReloaderNamespace) + if namespace == "" { + p.log.Info("RELOADER_NAMESPACE is not set, skipping meta info configmap creation") + return nil + } + + metaInfo := NewMetaInfo(p.cfg) + configMap := metaInfo.ToConfigMap() + + existing := &corev1.ConfigMap{} + err := p.client.Get(ctx, client.ObjectKey{ + Name: ConfigMapName, + Namespace: namespace, + }, existing) + + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get existing meta info configmap: %w", err) + } + p.log.Info("Creating meta info configmap") + if err := p.client.Create(ctx, configMap, client.FieldOwner(FieldManager)); err != nil { + return fmt.Errorf("failed to create meta info configmap: %w", err) + } + p.log.Info("Meta info configmap created successfully") + return nil + } + + p.log.Info("Meta info configmap already exists, updating it") + existing.Data = configMap.Data + existing.Labels = configMap.Labels + if err := p.client.Update(ctx, existing, client.FieldOwner(FieldManager)); err != nil { + return fmt.Errorf("failed to update meta info configmap: %w", err) + } + p.log.Info("Meta info configmap updated successfully") + return nil +} + +// PublishMetaInfoConfigMap is a convenience function that creates a Publisher and calls Publish. +func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config.Config, log logr.Logger) error { + publisher := NewPublisher(c, cfg, log) + return publisher.Publish(ctx) +} + +// CreateOrUpdate creates or updates the metadata ConfigMap using the provided client. +func CreateOrUpdate(c client.Client, cfg *config.Config, log logr.Logger) error { + ctx := context.Background() + return PublishMetaInfoConfigMap(ctx, c, cfg, log) +} diff --git a/internal/pkg/reload/change.go b/internal/pkg/reload/change.go new file mode 100644 index 000000000..b7fa4443d --- /dev/null +++ b/internal/pkg/reload/change.go @@ -0,0 +1,56 @@ +package reload + +import ( + corev1 "k8s.io/api/core/v1" +) + +// EventType represents the type of change event. +type EventType string + +const ( + // EventTypeCreate indicates a resource was created. + EventTypeCreate EventType = "create" + // EventTypeUpdate indicates a resource was updated. + EventTypeUpdate EventType = "update" + // EventTypeDelete indicates a resource was deleted. + EventTypeDelete EventType = "delete" +) + +// ResourceChange represents a change event for a ConfigMap or Secret. +type ResourceChange interface { + IsNil() bool + GetEventType() EventType + GetName() string + GetNamespace() string + GetAnnotations() map[string]string + GetResourceType() ResourceType + ComputeHash(hasher *Hasher) string +} + +// ConfigMapChange represents a change event for a ConfigMap. +type ConfigMapChange struct { + ConfigMap *corev1.ConfigMap + EventType EventType +} + +func (c ConfigMapChange) IsNil() bool { return c.ConfigMap == nil } +func (c ConfigMapChange) GetEventType() EventType { return c.EventType } +func (c ConfigMapChange) GetName() string { return c.ConfigMap.Name } +func (c ConfigMapChange) GetNamespace() string { return c.ConfigMap.Namespace } +func (c ConfigMapChange) GetAnnotations() map[string]string { return c.ConfigMap.Annotations } +func (c ConfigMapChange) GetResourceType() ResourceType { return ResourceTypeConfigMap } +func (c ConfigMapChange) ComputeHash(h *Hasher) string { return h.HashConfigMap(c.ConfigMap) } + +// SecretChange represents a change event for a Secret. +type SecretChange struct { + Secret *corev1.Secret + EventType EventType +} + +func (c SecretChange) IsNil() bool { return c.Secret == nil } +func (c SecretChange) GetEventType() EventType { return c.EventType } +func (c SecretChange) GetName() string { return c.Secret.Name } +func (c SecretChange) GetNamespace() string { return c.Secret.Namespace } +func (c SecretChange) GetAnnotations() map[string]string { return c.Secret.Annotations } +func (c SecretChange) GetResourceType() ResourceType { return ResourceTypeSecret } +func (c SecretChange) ComputeHash(h *Hasher) string { return h.HashSecret(c.Secret) } diff --git a/internal/pkg/reload/decision.go b/internal/pkg/reload/decision.go new file mode 100644 index 000000000..6002b3b26 --- /dev/null +++ b/internal/pkg/reload/decision.go @@ -0,0 +1,30 @@ +package reload + +import ( + "github.com/stakater/Reloader/internal/pkg/workload" +) + +// ReloadDecision contains the result of evaluating whether to reload a workload. +type ReloadDecision struct { + // Workload is the workload accessor. + Workload workload.WorkloadAccessor + // ShouldReload indicates whether the workload should be reloaded. + ShouldReload bool + // AutoReload indicates if this is an auto-reload. + AutoReload bool + // Reason provides a human-readable explanation. + Reason string + // Hash is the computed hash of the resource content. + Hash string +} + +// FilterDecisions returns only decisions where ShouldReload is true. +func FilterDecisions(decisions []ReloadDecision) []ReloadDecision { + var result []ReloadDecision + for _, d := range decisions { + if d.ShouldReload { + result = append(result, d) + } + } + return result +} diff --git a/internal/pkg/reload/matcher.go b/internal/pkg/reload/matcher.go index d7a26fbd8..e817f7f59 100644 --- a/internal/pkg/reload/matcher.go +++ b/internal/pkg/reload/matcher.go @@ -7,28 +7,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/config" ) -// ResourceType represents the type of Kubernetes resource. -type ResourceType string - -const ( - // ResourceTypeConfigMap represents a ConfigMap resource. - ResourceTypeConfigMap ResourceType = "configmap" - // ResourceTypeSecret represents a Secret resource. - ResourceTypeSecret ResourceType = "secret" -) - -// Kind returns the capitalized Kubernetes Kind (e.g., "ConfigMap", "Secret"). -func (r ResourceType) Kind() string { - switch r { - case ResourceTypeConfigMap: - return "ConfigMap" - case ResourceTypeSecret: - return "Secret" - default: - return string(r) - } -} - // MatchResult contains the result of checking if a workload should be reloaded. type MatchResult struct { ShouldReload bool diff --git a/internal/pkg/reload/resource_type.go b/internal/pkg/reload/resource_type.go new file mode 100644 index 000000000..0404e815f --- /dev/null +++ b/internal/pkg/reload/resource_type.go @@ -0,0 +1,23 @@ +package reload + +// ResourceType represents the type of Kubernetes resource. +type ResourceType string + +const ( + // ResourceTypeConfigMap represents a ConfigMap resource. + ResourceTypeConfigMap ResourceType = "configmap" + // ResourceTypeSecret represents a Secret resource. + ResourceTypeSecret ResourceType = "secret" +) + +// Kind returns the capitalized Kubernetes Kind (e.g., "ConfigMap", "Secret"). +func (r ResourceType) Kind() string { + switch r { + case ResourceTypeConfigMap: + return "ConfigMap" + case ResourceTypeSecret: + return "Secret" + default: + return string(r) + } +} diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index acec2d593..964608973 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -28,82 +28,6 @@ func NewService(cfg *config.Config) *Service { } } -// ResourceChange represents a change event for a ConfigMap or Secret. -type ResourceChange interface { - IsNil() bool - GetEventType() EventType - GetName() string - GetNamespace() string - GetAnnotations() map[string]string - GetResourceType() ResourceType - ComputeHash(hasher *Hasher) string -} - -// ConfigMapChange represents a change event for a ConfigMap. -type ConfigMapChange struct { - ConfigMap *corev1.ConfigMap - EventType EventType -} - -func (c ConfigMapChange) IsNil() bool { return c.ConfigMap == nil } -func (c ConfigMapChange) GetEventType() EventType { return c.EventType } -func (c ConfigMapChange) GetName() string { return c.ConfigMap.Name } -func (c ConfigMapChange) GetNamespace() string { return c.ConfigMap.Namespace } -func (c ConfigMapChange) GetAnnotations() map[string]string { return c.ConfigMap.Annotations } -func (c ConfigMapChange) GetResourceType() ResourceType { return ResourceTypeConfigMap } -func (c ConfigMapChange) ComputeHash(h *Hasher) string { return h.HashConfigMap(c.ConfigMap) } - -// SecretChange represents a change event for a Secret. -type SecretChange struct { - Secret *corev1.Secret - EventType EventType -} - -func (c SecretChange) IsNil() bool { return c.Secret == nil } -func (c SecretChange) GetEventType() EventType { return c.EventType } -func (c SecretChange) GetName() string { return c.Secret.Name } -func (c SecretChange) GetNamespace() string { return c.Secret.Namespace } -func (c SecretChange) GetAnnotations() map[string]string { return c.Secret.Annotations } -func (c SecretChange) GetResourceType() ResourceType { return ResourceTypeSecret } -func (c SecretChange) ComputeHash(h *Hasher) string { return h.HashSecret(c.Secret) } - -// EventType represents the type of change event. -type EventType string - -const ( - // EventTypeCreate indicates a resource was created. - EventTypeCreate EventType = "create" - // EventTypeUpdate indicates a resource was updated. - EventTypeUpdate EventType = "update" - // EventTypeDelete indicates a resource was deleted. - EventTypeDelete EventType = "delete" -) - -// ReloadDecision contains the result of evaluating whether to reload a workload. -type ReloadDecision struct { - // Workload is the workload accessor. - Workload workload.WorkloadAccessor - // ShouldReload indicates whether the workload should be reloaded. - ShouldReload bool - // AutoReload indicates if this is an auto-reload. - AutoReload bool - // Reason provides a human-readable explanation. - Reason string - // Hash is the computed hash of the resource content. - Hash string -} - -// FilterDecisions returns only decisions where ShouldReload is true. -func FilterDecisions(decisions []ReloadDecision) []ReloadDecision { - var result []ReloadDecision - for _, d := range decisions { - if d.ShouldReload { - result = append(result, d) - } - } - return result -} - // Process evaluates all workloads to determine which should be reloaded. func (s *Service) Process(change ResourceChange, workloads []workload.WorkloadAccessor) []ReloadDecision { if change.IsNil() { @@ -129,7 +53,6 @@ func (s *Service) Process(change ResourceChange, workloads []workload.WorkloadAc ) } - func (s *Service) processResource( resourceName string, resourceNamespace string, From 841f6f3868d2ebf2680ef329ec85857385babfd5 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:57 +0100 Subject: [PATCH 17/35] feat: Improve test coverage of important packages --- internal/pkg/controller/filter_test.go | 197 +++++ internal/pkg/controller/retry_test.go | 131 +++ internal/pkg/reload/decision_test.go | 113 +++ internal/pkg/reload/predicate_test.go | 330 ++++++++ internal/pkg/reload/resource_type_test.go | 36 + internal/pkg/reload/service_test.go | 736 +++++++++++++++++ internal/pkg/workload/registry_test.go | 250 ++++++ internal/pkg/workload/workload_test.go | 953 +++++++++++++++++++++- 8 files changed, 2724 insertions(+), 22 deletions(-) create mode 100644 internal/pkg/controller/filter_test.go create mode 100644 internal/pkg/controller/retry_test.go create mode 100644 internal/pkg/reload/decision_test.go create mode 100644 internal/pkg/reload/resource_type_test.go create mode 100644 internal/pkg/workload/registry_test.go diff --git a/internal/pkg/controller/filter_test.go b/internal/pkg/controller/filter_test.go new file mode 100644 index 000000000..267c2b244 --- /dev/null +++ b/internal/pkg/controller/filter_test.go @@ -0,0 +1,197 @@ +package controller + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/config" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestCreateEventPredicate_CreateEvent(t *testing.T) { + tests := []struct { + name string + reloadOnCreate bool + syncAfterRestart bool + initialized bool + expectedResult bool + }{ + { + name: "reload on create enabled, initialized", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: true, + expectedResult: true, + }, + { + name: "reload on create disabled, initialized", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: true, + expectedResult: false, + }, + { + name: "not initialized, sync after restart enabled", + reloadOnCreate: true, + syncAfterRestart: true, + initialized: false, + expectedResult: true, + }, + { + name: "not initialized, sync after restart disabled", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: false, + expectedResult: false, + }, + { + name: "not initialized, sync after restart disabled, reload on create disabled", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: false, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnCreate: tt.reloadOnCreate, + SyncAfterRestart: tt.syncAfterRestart, + } + initialized := tt.initialized + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.CreateEvent{Object: cm} + result := pred.Create(e) + + if result != tt.expectedResult { + t.Errorf("CreateFunc() = %v, want %v", result, tt.expectedResult) + } + }) + } +} + +func TestCreateEventPredicate_UpdateEvent(t *testing.T) { + cfg := &config.Config{} + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + result := pred.Update(e) + + // Update events should always return true + if !result { + t.Error("UpdateFunc() should always return true") + } +} + +func TestCreateEventPredicate_DeleteEvent(t *testing.T) { + tests := []struct { + name string + reloadOnDelete bool + expectedResult bool + }{ + { + name: "reload on delete enabled", + reloadOnDelete: true, + expectedResult: true, + }, + { + name: "reload on delete disabled", + reloadOnDelete: false, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnDelete: tt.reloadOnDelete, + } + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.DeleteEvent{Object: cm} + result := pred.Delete(e) + + if result != tt.expectedResult { + t.Errorf("DeleteFunc() = %v, want %v", result, tt.expectedResult) + } + }) + } +} + +func TestCreateEventPredicate_GenericEvent(t *testing.T) { + cfg := &config.Config{} + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.GenericEvent{Object: cm} + result := pred.Generic(e) + + // Generic events should always return false + if result { + t.Error("GenericFunc() should always return false") + } +} + +func TestBuildEventFilter(t *testing.T) { + cfg := &config.Config{ + ReloadOnCreate: true, + ReloadOnDelete: true, + } + initialized := true + + // Create a simple always-true predicate as the resource predicate + resourcePred := &alwaysTruePredicate{} + + filter := BuildEventFilter(resourcePred, cfg, &initialized) + + // The filter should be created without error + if filter == nil { + t.Fatal("BuildEventFilter() should return a non-nil predicate") + } + + // Test update event passes (since resourcePred returns true and update always returns true) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + result := filter.Update(e) + + // Since namespace filter is empty (all namespaces allowed), this should pass + if !result { + t.Error("UpdateFunc() should return true when all predicates pass") + } +} + +// alwaysTruePredicate is a helper predicate for testing +type alwaysTruePredicate struct{} + +func (p *alwaysTruePredicate) Create(_ event.CreateEvent) bool { return true } +func (p *alwaysTruePredicate) Delete(_ event.DeleteEvent) bool { return true } +func (p *alwaysTruePredicate) Update(_ event.UpdateEvent) bool { return true } +func (p *alwaysTruePredicate) Generic(_ event.GenericEvent) bool { return true } diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go new file mode 100644 index 000000000..a3e9fc2fa --- /dev/null +++ b/internal/pkg/controller/retry_test.go @@ -0,0 +1,131 @@ +package controller + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestUpdateWorkloadWithRetry_SwitchCases(t *testing.T) { + // Test that the switch statement correctly identifies workload types + // Note: Full integration tests require a fake k8s client, so we just test type detection + + tests := []struct { + name string + workload workload.WorkloadAccessor + expectedKind workload.Kind + }{ + { + name: "deployment workload", + workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }), + expectedKind: workload.KindDeployment, + }, + { + name: "daemonset workload", + workload: workload.NewDaemonSetWorkload(&appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }), + expectedKind: workload.KindDaemonSet, + }, + { + name: "statefulset workload", + workload: workload.NewStatefulSetWorkload(&appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }), + expectedKind: workload.KindStatefulSet, + }, + { + name: "job workload", + workload: workload.NewJobWorkload(&batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }), + expectedKind: workload.KindJob, + }, + { + name: "cronjob workload", + workload: workload.NewCronJobWorkload(&batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }), + expectedKind: workload.KindCronJob, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Verify the workload kind is correctly identified + if tt.workload.Kind() != tt.expectedKind { + t.Errorf("workload.Kind() = %v, want %v", tt.workload.Kind(), tt.expectedKind) + } + }) + } +} + +func TestJobWorkloadTypeCast(t *testing.T) { + // Test that JobWorkload type cast works correctly + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: "default"}, + } + jobWl := workload.NewJobWorkload(job) + + if jobWl.GetName() != "test-job" { + t.Errorf("JobWorkload.GetName() = %v, want test-job", jobWl.GetName()) + } + + // Test GetJob method + gotJob := jobWl.GetJob() + if gotJob.Name != "test-job" { + t.Errorf("JobWorkload.GetJob().Name = %v, want test-job", gotJob.Name) + } + + // Verify it satisfies WorkloadAccessor interface + var _ workload.WorkloadAccessor = jobWl +} + +func TestCronJobWorkloadTypeCast(t *testing.T) { + // Test that CronJobWorkload type cast works correctly + cronJob := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cronjob", Namespace: "default"}, + Spec: batchv1.CronJobSpec{ + Schedule: "*/5 * * * *", + }, + } + cronJobWl := workload.NewCronJobWorkload(cronJob) + + if cronJobWl.GetName() != "test-cronjob" { + t.Errorf("CronJobWorkload.GetName() = %v, want test-cronjob", cronJobWl.GetName()) + } + + // Test GetCronJob method + gotCronJob := cronJobWl.GetCronJob() + if gotCronJob.Name != "test-cronjob" { + t.Errorf("CronJobWorkload.GetCronJob().Name = %v, want test-cronjob", gotCronJob.Name) + } + + // Verify it satisfies WorkloadAccessor interface + var _ workload.WorkloadAccessor = cronJobWl +} + +func TestResourceTypeKind(t *testing.T) { + // Test that ResourceType.Kind() returns correct values + tests := []struct { + resourceType reload.ResourceType + expectedKind string + }{ + {reload.ResourceTypeConfigMap, "ConfigMap"}, + {reload.ResourceTypeSecret, "Secret"}, + } + + for _, tt := range tests { + t.Run(string(tt.resourceType), func(t *testing.T) { + if got := tt.resourceType.Kind(); got != tt.expectedKind { + t.Errorf("ResourceType.Kind() = %v, want %v", got, tt.expectedKind) + } + }) + } +} diff --git a/internal/pkg/reload/decision_test.go b/internal/pkg/reload/decision_test.go new file mode 100644 index 000000000..eb158d1ab --- /dev/null +++ b/internal/pkg/reload/decision_test.go @@ -0,0 +1,113 @@ +package reload + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestFilterDecisions(t *testing.T) { + // Create some mock workloads for testing + wl1 := workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy1", Namespace: "default"}, + }) + wl2 := workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy2", Namespace: "default"}, + }) + wl3 := workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy3", Namespace: "default"}, + }) + + tests := []struct { + name string + decisions []ReloadDecision + wantCount int + wantNames []string + }{ + { + name: "empty list", + decisions: []ReloadDecision{}, + wantCount: 0, + wantNames: nil, + }, + { + name: "all should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: true, Reason: "test"}, + {Workload: wl2, ShouldReload: true, Reason: "test"}, + }, + wantCount: 2, + wantNames: []string{"deploy1", "deploy2"}, + }, + { + name: "none should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: false, Reason: "test"}, + {Workload: wl2, ShouldReload: false, Reason: "test"}, + }, + wantCount: 0, + wantNames: nil, + }, + { + name: "mixed - some should reload", + decisions: []ReloadDecision{ + {Workload: wl1, ShouldReload: true, Reason: "test"}, + {Workload: wl2, ShouldReload: false, Reason: "test"}, + {Workload: wl3, ShouldReload: true, Reason: "test"}, + }, + wantCount: 2, + wantNames: []string{"deploy1", "deploy3"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FilterDecisions(tt.decisions) + + if len(result) != tt.wantCount { + t.Errorf("FilterDecisions() returned %d decisions, want %d", len(result), tt.wantCount) + } + + if tt.wantNames != nil { + for i, d := range result { + if d.Workload.GetName() != tt.wantNames[i] { + t.Errorf("FilterDecisions()[%d].Workload.GetName() = %s, want %s", + i, d.Workload.GetName(), tt.wantNames[i]) + } + } + } + }) + } +} + +func TestReloadDecision_Fields(t *testing.T) { + wl := workload.NewDeploymentWorkload(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }) + + decision := ReloadDecision{ + Workload: wl, + ShouldReload: true, + AutoReload: true, + Reason: "test reason", + Hash: "abc123", + } + + if decision.Workload.GetName() != "test" { + t.Errorf("ReloadDecision.Workload.GetName() = %v, want test", decision.Workload.GetName()) + } + if !decision.ShouldReload { + t.Error("ReloadDecision.ShouldReload should be true") + } + if !decision.AutoReload { + t.Error("ReloadDecision.AutoReload should be true") + } + if decision.Reason != "test reason" { + t.Errorf("ReloadDecision.Reason = %v, want 'test reason'", decision.Reason) + } + if decision.Hash != "abc123" { + t.Errorf("ReloadDecision.Hash = %v, want 'abc123'", decision.Hash) + } +} diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go index b2cce7017..285d367b7 100644 --- a/internal/pkg/reload/predicate_test.go +++ b/internal/pkg/reload/predicate_test.go @@ -607,3 +607,333 @@ func TestNamespaceFilterPredicateWithCache_NilCache(t *testing.T) { }) } } + +func TestIgnoreAnnotationPredicate_Create(t *testing.T) { + cfg := config.NewDefault() + predicate := IgnoreAnnotationPredicate(cfg) + + tests := []struct { + name string + annotations map[string]string + wantAllow bool + }{ + { + name: "no annotations", + annotations: nil, + wantAllow: true, + }, + { + name: "empty annotations", + annotations: map[string]string{}, + wantAllow: true, + }, + { + name: "other annotations only", + annotations: map[string]string{"other": "value"}, + wantAllow: true, + }, + { + name: "ignore annotation true", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + { + name: "ignore annotation false", + annotations: map[string]string{cfg.Annotations.Ignore: "false"}, + wantAllow: true, + }, + { + name: "ignore annotation with other value", + annotations: map[string]string{cfg.Annotations.Ignore: "yes"}, + wantAllow: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestIgnoreAnnotationPredicate_AllEventTypes(t *testing.T) { + cfg := config.NewDefault() + predicate := IgnoreAnnotationPredicate(cfg) + + ignoredCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ignored-cm", + Namespace: "default", + Annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + }, + } + + allowedCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allowed-cm", + Namespace: "default", + }, + } + + // Test Update + if predicate.Update(event.UpdateEvent{ObjectNew: ignoredCM}) { + t.Error("Update() should block ignored resource") + } + if !predicate.Update(event.UpdateEvent{ObjectNew: allowedCM}) { + t.Error("Update() should allow non-ignored resource") + } + + // Test Delete + if predicate.Delete(event.DeleteEvent{Object: ignoredCM}) { + t.Error("Delete() should block ignored resource") + } + if !predicate.Delete(event.DeleteEvent{Object: allowedCM}) { + t.Error("Delete() should allow non-ignored resource") + } + + // Test Generic + if predicate.Generic(event.GenericEvent{Object: ignoredCM}) { + t.Error("Generic() should block ignored resource") + } + if !predicate.Generic(event.GenericEvent{Object: allowedCM}) { + t.Error("Generic() should allow non-ignored resource") + } +} + +func TestCombinedPredicates(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = []string{"kube-system"} + + nsPredicate := NamespaceFilterPredicate(cfg) + ignorePredicate := IgnoreAnnotationPredicate(cfg) + + combined := CombinedPredicates(nsPredicate, ignorePredicate) + + tests := []struct { + name string + namespace string + annotations map[string]string + wantAllow bool + }{ + { + name: "both predicates pass", + namespace: "default", + annotations: nil, + wantAllow: true, + }, + { + name: "namespace predicate fails", + namespace: "kube-system", + annotations: nil, + wantAllow: false, + }, + { + name: "ignore predicate fails", + namespace: "default", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + { + name: "both predicates fail", + namespace: "kube-system", + annotations: map[string]string{cfg.Annotations.Ignore: "true"}, + wantAllow: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := combined.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }) + } +} + +func TestConfigMapPredicates_Update(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + oldCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + newCMSameContent := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + newCMDifferentContent := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string]string{"key": "value2"}, + } + + // Same content should not trigger update + e := event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMSameContent} + if predicate.Update(e) { + t.Error("Update() should return false when content is the same") + } + + // Different content should trigger update + e = event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMDifferentContent} + if !predicate.Update(e) { + t.Error("Update() should return true when content changed") + } +} + +func TestConfigMapPredicates_InvalidTypes(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + // Test with non-ConfigMap types + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + // Old is secret, new is configmap - should return false + e := event.UpdateEvent{ObjectOld: secret, ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should return false for mismatched types") + } + + // Both are secrets - should return false + e = event.UpdateEvent{ObjectOld: secret, ObjectNew: secret} + if predicate.Update(e) { + t.Error("Update() should return false for non-ConfigMap types") + } +} + +func TestConfigMapPredicates_CreateDeleteGeneric(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = true + cfg.ReloadOnDelete = true + hasher := NewHasher() + predicate := ConfigMapPredicates(cfg, hasher) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + // Test Create + if !predicate.Create(event.CreateEvent{Object: cm}) { + t.Error("Create() should return true when ReloadOnCreate is true") + } + + // Test Delete + if !predicate.Delete(event.DeleteEvent{Object: cm}) { + t.Error("Delete() should return true when ReloadOnDelete is true") + } + + // Test Generic (should always return false) + if predicate.Generic(event.GenericEvent{Object: cm}) { + t.Error("Generic() should always return false") + } +} + +func TestSecretPredicates_Update(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := SecretPredicates(cfg, hasher) + + oldSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value1")}, + } + newSecretSameContent := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value1")}, + } + newSecretDifferentContent := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("value2")}, + } + + // Same content should not trigger update + e := event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretSameContent} + if predicate.Update(e) { + t.Error("Update() should return false when content is the same") + } + + // Different content should trigger update + e = event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretDifferentContent} + if !predicate.Update(e) { + t.Error("Update() should return true when content changed") + } +} + +func TestSecretPredicates_InvalidTypes(t *testing.T) { + cfg := config.NewDefault() + hasher := NewHasher() + predicate := SecretPredicates(cfg, hasher) + + // Test with non-Secret types + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + // Old is configmap, new is secret - should return false + e := event.UpdateEvent{ObjectOld: cm, ObjectNew: secret} + if predicate.Update(e) { + t.Error("Update() should return false for mismatched types") + } + + // Both are configmaps - should return false + e = event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} + if predicate.Update(e) { + t.Error("Update() should return false for non-Secret types") + } +} + +func TestLabelsSet(t *testing.T) { + ls := LabelsSet{"app": "test", "env": "prod"} + + // Test Has + if !ls.Has("app") { + t.Error("Has(app) should return true") + } + if ls.Has("nonexistent") { + t.Error("Has(nonexistent) should return false") + } + + // Test Get + if ls.Get("app") != "test" { + t.Errorf("Get(app) = %v, want test", ls.Get("app")) + } + if ls.Get("env") != "prod" { + t.Errorf("Get(env) = %v, want prod", ls.Get("env")) + } + if ls.Get("nonexistent") != "" { + t.Errorf("Get(nonexistent) = %v, want empty string", ls.Get("nonexistent")) + } +} diff --git a/internal/pkg/reload/resource_type_test.go b/internal/pkg/reload/resource_type_test.go new file mode 100644 index 000000000..428f29ed7 --- /dev/null +++ b/internal/pkg/reload/resource_type_test.go @@ -0,0 +1,36 @@ +package reload + +import ( + "testing" +) + +func TestResourceType_Kind(t *testing.T) { + tests := []struct { + resourceType ResourceType + want string + }{ + {ResourceTypeConfigMap, "ConfigMap"}, + {ResourceTypeSecret, "Secret"}, + {ResourceType("unknown"), "unknown"}, + {ResourceType("custom"), "custom"}, + } + + for _, tt := range tests { + t.Run(string(tt.resourceType), func(t *testing.T) { + got := tt.resourceType.Kind() + if got != tt.want { + t.Errorf("ResourceType(%q).Kind() = %v, want %v", tt.resourceType, got, tt.want) + } + }) + } +} + +func TestResourceTypeConstants(t *testing.T) { + // Verify the constant values are as expected + if ResourceTypeConfigMap != "configmap" { + t.Errorf("ResourceTypeConfigMap = %v, want configmap", ResourceTypeConfigMap) + } + if ResourceTypeSecret != "secret" { + t.Errorf("ResourceTypeSecret = %v, want secret", ResourceTypeSecret) + } +} diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index 4b260a250..daea21ce1 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -587,6 +587,742 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { } } +func TestService_Hasher(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + hasher := svc.Hasher() + if hasher == nil { + t.Fatal("Expected Hasher to return non-nil hasher") + } + + // Verify it's functional + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string]string{"key": "value"}, + } + hash := hasher.HashConfigMap(cm) + if hash == "" { + t.Error("Expected hasher to produce non-empty hash") + } +} + +func TestService_shouldProcessEvent(t *testing.T) { + tests := []struct { + name string + reloadOnCreate bool + reloadOnDelete bool + eventType EventType + expected bool + }{ + {"create enabled", true, false, EventTypeCreate, true}, + {"create disabled", false, false, EventTypeCreate, false}, + {"delete enabled", false, true, EventTypeDelete, true}, + {"delete disabled", false, false, EventTypeDelete, false}, + {"update always true", false, false, EventTypeUpdate, true}, + {"unknown event", false, false, EventType("unknown"), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = tt.reloadOnCreate + cfg.ReloadOnDelete = tt.reloadOnDelete + svc := NewService(cfg) + + result := svc.shouldProcessEvent(tt.eventType) + if result != tt.expected { + t.Errorf("shouldProcessEvent(%s) = %v, want %v", tt.eventType, result, tt.expected) + } + }) + } +} + +func TestService_findVolumeUsingResource_ConfigMap(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + tests := []struct { + name string + volumes []corev1.Volume + resourceName string + resourceType ResourceType + wantVolume string + }{ + { + name: "direct configmap volume", + volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + }, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "config-vol", + }, + { + name: "projected configmap volume", + volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "projected-cm"}, + }, + }, + }, + }, + }, + }, + }, + resourceName: "projected-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "projected-vol", + }, + { + name: "no match", + volumes: []corev1.Volume{ + { + Name: "other-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "other-cm"}, + }, + }, + }, + }, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "", + }, + { + name: "empty volumes", + volumes: []corev1.Volume{}, + resourceName: "my-cm", + resourceType: ResourceTypeConfigMap, + wantVolume: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, tt.resourceType) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }) + } +} + +func TestService_findVolumeUsingResource_Secret(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + tests := []struct { + name string + volumes []corev1.Volume + resourceName string + wantVolume string + }{ + { + name: "direct secret volume", + volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + resourceName: "my-secret", + wantVolume: "secret-vol", + }, + { + name: "projected secret volume", + volumes: []corev1.Volume{ + { + Name: "projected-vol", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "projected-secret"}, + }, + }, + }, + }, + }, + }, + }, + resourceName: "projected-secret", + wantVolume: "projected-vol", + }, + { + name: "no match", + volumes: []corev1.Volume{ + { + Name: "other-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "other-secret", + }, + }, + }, + }, + resourceName: "my-secret", + wantVolume: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, ResourceTypeSecret) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }) + } +} + +func TestService_findContainerWithVolumeMount(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + tests := []struct { + name string + containers []corev1.Container + volumeName string + wantName string + shouldMatch bool + }{ + { + name: "container with matching volume mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + }, + volumeName: "config-vol", + wantName: "container1", + shouldMatch: true, + }, + { + name: "second container with matching mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{}, + }, + { + Name: "container2", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + }, + volumeName: "config-vol", + wantName: "container2", + shouldMatch: true, + }, + { + name: "no matching mount", + containers: []corev1.Container{ + { + Name: "container1", + VolumeMounts: []corev1.VolumeMount{ + {Name: "other-vol", MountPath: "/other"}, + }, + }, + }, + volumeName: "config-vol", + shouldMatch: false, + }, + { + name: "empty containers", + containers: []corev1.Container{}, + volumeName: "config-vol", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.findContainerWithVolumeMount(tt.containers, tt.volumeName) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithVolumeMount() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }) + } +} + +func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + tests := []struct { + name string + containers []corev1.Container + resourceName string + wantName string + shouldMatch bool + }{ + { + name: "container with ConfigMapKeyRef", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-cm", + wantName: "app", + shouldMatch: true, + }, + { + name: "container with ConfigMapRef in EnvFrom", + containers: []corev1.Container{ + { + Name: "app", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + }, + }, + resourceName: "my-cm", + wantName: "app", + shouldMatch: true, + }, + { + name: "no matching env ref", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SIMPLE_VAR", + Value: "value", + }, + }, + }, + }, + resourceName: "my-cm", + shouldMatch: false, + }, + { + name: "env without ValueFrom", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + {Name: "VAR1", Value: "val"}, + }, + }, + }, + resourceName: "my-cm", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeConfigMap) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }) + } +} + +func TestService_findContainerWithEnvRef_Secret(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + tests := []struct { + name string + containers []corev1.Container + resourceName string + wantName string + shouldMatch bool + }{ + { + name: "container with SecretKeyRef", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}, + Key: "password", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + wantName: "app", + shouldMatch: true, + }, + { + name: "container with SecretRef in EnvFrom", + containers: []corev1.Container{ + { + Name: "app", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}, + }, + }, + }, + }, + }, + resourceName: "my-secret", + wantName: "app", + shouldMatch: true, + }, + { + name: "no matching env ref", + containers: []corev1.Container{ + { + Name: "app", + Env: []corev1.EnvVar{ + { + Name: "SIMPLE_VAR", + Value: "value", + }, + }, + }, + }, + resourceName: "my-secret", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeSecret) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } + } + }) + } +} + +func TestService_findTargetContainer_AutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Test with autoReload=true and volume mount + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_EnvRef(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Test with autoReload=true and env ref (no volume) + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "sidecar", + Image: "busybox", + }, + { + Name: "app", + Image: "nginx", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VAL", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_InitContainer(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Test with autoReload=true where init container uses the volume + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + } + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-vol", MountPath: "/config"}, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + // Should return first main container when init container uses the volume + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_InitContainerEnvRef(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // Test with autoReload=true where init container has env ref + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + Image: "busybox", + Env: []corev1.EnvVar{ + { + Name: "CONFIG_VAL", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cm"}, + Key: "key", + }, + }, + }, + }, + }, + } + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "app", + Image: "nginx", + }, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + // Should return first main container when init container has the env ref + if container.Name != "app" { + t.Errorf("Expected container 'app', got %q", container.Name) + } +} + +func TestService_findTargetContainer_NoContainers(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{} + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, false) + if container != nil { + t.Error("Expected nil container for empty container list") + } +} + +func TestService_findTargetContainer_NonAutoReload(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "first", Image: "nginx"}, + {Name: "second", Image: "busybox"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + // Without autoReload, should return first container + container := svc.findTargetContainer(accessor, "my-cm", ResourceTypeConfigMap, false) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "first" { + t.Errorf("Expected first container, got %q", container.Name) + } +} + +func TestService_findTargetContainer_AutoReload_FallbackToFirst(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + // autoReload=true but no matching volume or env ref - should fallback to first container + deploy := createTestDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "first", Image: "nginx"}, + {Name: "second", Image: "busybox"}, + } + accessor := workload.NewDeploymentWorkload(deploy) + + container := svc.findTargetContainer(accessor, "non-existent", ResourceTypeConfigMap, true) + if container == nil { + t.Fatal("Expected to find a container") + } + if container.Name != "first" { + t.Errorf("Expected first container as fallback, got %q", container.Name) + } +} + +func TestService_ProcessNilChange(t *testing.T) { + cfg := config.NewDefault() + svc := NewService(cfg) + + deploy := createTestDeployment("test", "default", nil) + workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} + + // Test with nil ConfigMap + change := ConfigMapChange{ + ConfigMap: nil, + EventType: EventTypeUpdate, + } + + decisions := svc.Process(change, workloads) + if decisions != nil { + t.Errorf("Expected nil decisions for nil change, got %v", decisions) + } +} + +func TestService_ProcessCreateEventDisabled(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = false + svc := NewService(cfg) + + deploy := createTestDeployment("test", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }) + workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + + change := ConfigMapChange{ + ConfigMap: cm, + EventType: EventTypeCreate, + } + + decisions := svc.Process(change, workloads) + if decisions != nil { + t.Errorf("Expected nil decisions when create events disabled, got %v", decisions) + } +} + // Helper function to create a test deployment func createTestDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { replicas := int32(1) diff --git a/internal/pkg/workload/registry_test.go b/internal/pkg/workload/registry_test.go new file mode 100644 index 000000000..0bb47d14a --- /dev/null +++ b/internal/pkg/workload/registry_test.go @@ -0,0 +1,250 @@ +package workload + +import ( + "testing" + + argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNewRegistry_WithoutArgoRollouts(t *testing.T) { + r := NewRegistry(false) + + kinds := r.SupportedKinds() + if len(kinds) != 5 { + t.Errorf("SupportedKinds() = %d kinds, want 5", len(kinds)) + } + + // Should not include ArgoRollout + for _, k := range kinds { + if k == KindArgoRollout { + t.Error("SupportedKinds() should not include ArgoRollout when disabled") + } + } + + // ListerFor should return nil for ArgoRollout + if r.ListerFor(KindArgoRollout) != nil { + t.Error("ListerFor(KindArgoRollout) should return nil when disabled") + } +} + +func TestNewRegistry_WithArgoRollouts(t *testing.T) { + r := NewRegistry(true) + + kinds := r.SupportedKinds() + if len(kinds) != 6 { + t.Errorf("SupportedKinds() = %d kinds, want 6", len(kinds)) + } + + // Should include ArgoRollout + found := false + for _, k := range kinds { + if k == KindArgoRollout { + found = true + break + } + } + if !found { + t.Error("SupportedKinds() should include ArgoRollout when enabled") + } + + // ListerFor should return a function for ArgoRollout + if r.ListerFor(KindArgoRollout) == nil { + t.Error("ListerFor(KindArgoRollout) should return a function when enabled") + } +} + +func TestRegistry_ListerFor_AllKinds(t *testing.T) { + r := NewRegistry(true) + + tests := []struct { + kind Kind + wantNil bool + }{ + {KindDeployment, false}, + {KindDaemonSet, false}, + {KindStatefulSet, false}, + {KindJob, false}, + {KindCronJob, false}, + {KindArgoRollout, false}, + {Kind("unknown"), true}, + } + + for _, tt := range tests { + lister := r.ListerFor(tt.kind) + if (lister == nil) != tt.wantNil { + t.Errorf("ListerFor(%s) = nil? %v, want nil? %v", tt.kind, lister == nil, tt.wantNil) + } + } +} + +func TestRegistry_FromObject_Deployment(t *testing.T) { + r := NewRegistry(false) + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(deploy) + if err != nil { + t.Fatalf("FromObject(Deployment) error = %v", err) + } + if w.Kind() != KindDeployment { + t.Errorf("FromObject(Deployment).Kind() = %v, want %v", w.Kind(), KindDeployment) + } +} + +func TestRegistry_FromObject_DaemonSet(t *testing.T) { + r := NewRegistry(false) + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(ds) + if err != nil { + t.Fatalf("FromObject(DaemonSet) error = %v", err) + } + if w.Kind() != KindDaemonSet { + t.Errorf("FromObject(DaemonSet).Kind() = %v, want %v", w.Kind(), KindDaemonSet) + } +} + +func TestRegistry_FromObject_StatefulSet(t *testing.T) { + r := NewRegistry(false) + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(sts) + if err != nil { + t.Fatalf("FromObject(StatefulSet) error = %v", err) + } + if w.Kind() != KindStatefulSet { + t.Errorf("FromObject(StatefulSet).Kind() = %v, want %v", w.Kind(), KindStatefulSet) + } +} + +func TestRegistry_FromObject_Job(t *testing.T) { + r := NewRegistry(false) + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(job) + if err != nil { + t.Fatalf("FromObject(Job) error = %v", err) + } + if w.Kind() != KindJob { + t.Errorf("FromObject(Job).Kind() = %v, want %v", w.Kind(), KindJob) + } +} + +func TestRegistry_FromObject_CronJob(t *testing.T) { + r := NewRegistry(false) + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(cj) + if err != nil { + t.Fatalf("FromObject(CronJob) error = %v", err) + } + if w.Kind() != KindCronJob { + t.Errorf("FromObject(CronJob).Kind() = %v, want %v", w.Kind(), KindCronJob) + } +} + +func TestRegistry_FromObject_Rollout_Enabled(t *testing.T) { + r := NewRegistry(true) + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(rollout) + if err != nil { + t.Fatalf("FromObject(Rollout) error = %v", err) + } + if w.Kind() != KindArgoRollout { + t.Errorf("FromObject(Rollout).Kind() = %v, want %v", w.Kind(), KindArgoRollout) + } +} + +func TestRegistry_FromObject_Rollout_Disabled(t *testing.T) { + r := NewRegistry(false) + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(rollout) + if err == nil { + t.Error("FromObject(Rollout) should return error when Argo Rollouts disabled") + } +} + +func TestRegistry_FromObject_UnsupportedType(t *testing.T) { + r := NewRegistry(false) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(cm) + if err == nil { + t.Error("FromObject(ConfigMap) should return error for unsupported type") + } +} + +func TestKindFromString(t *testing.T) { + tests := []struct { + input string + want Kind + wantErr bool + }{ + // Lowercase + {"deployment", KindDeployment, false}, + {"daemonset", KindDaemonSet, false}, + {"statefulset", KindStatefulSet, false}, + {"job", KindJob, false}, + {"cronjob", KindCronJob, false}, + {"rollout", KindArgoRollout, false}, + // Plural forms + {"deployments", KindDeployment, false}, + {"daemonsets", KindDaemonSet, false}, + {"statefulsets", KindStatefulSet, false}, + {"jobs", KindJob, false}, + {"cronjobs", KindCronJob, false}, + {"rollouts", KindArgoRollout, false}, + // Mixed case + {"Deployment", KindDeployment, false}, + {"DAEMONSET", KindDaemonSet, false}, + {"StatefulSet", KindStatefulSet, false}, + // Unknown + {"unknown", "", true}, + {"replicaset", "", true}, + {"", "", true}, + } + + for _, tt := range tests { + got, err := KindFromString(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("KindFromString(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + continue + } + if got != tt.want { + t.Errorf("KindFromString(%q) = %v, want %v", tt.input, got, tt.want) + } + } +} + +func TestNewLister(t *testing.T) { + r := NewRegistry(false) + l := NewLister(nil, r, nil) + + if l == nil { + t.Fatal("NewLister should not return nil") + } + if l.Registry != r { + t.Error("NewLister should set Registry") + } +} diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go index b616e0c60..674f7dbc3 100644 --- a/internal/pkg/workload/workload_test.go +++ b/internal/pkg/workload/workload_test.go @@ -5,6 +5,7 @@ import ( argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -597,6 +598,9 @@ func TestDaemonSetWorkload_BasicGetters(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-ds", Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, }, } @@ -608,6 +612,128 @@ func TestDaemonSetWorkload_BasicGetters(t *testing.T) { if w.GetName() != "test-ds" { t.Errorf("GetName() = %v, want test-ds", w.GetName()) } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != ds { + t.Error("GetObject() should return the underlying daemonset") + } +} + +func TestDaemonSetWorkload_PodTemplateAnnotations(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDaemonSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{}, + }, + } + + w := NewDaemonSetWorkload(ds) + + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDaemonSetWorkload_Containers(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + InitContainers: []corev1.Container{ + {Name: "init", Image: "busybox"}, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDaemonSetWorkload_Volumes(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } } func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { @@ -638,6 +764,157 @@ func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { if !w.UsesConfigMap("ds-config") { t.Error("DaemonSet UsesConfigMap should return true for ConfigMap volume") } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDaemonSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ds-env-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesConfigMap("ds-env-config") { + t.Error("DaemonSet UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestDaemonSetWorkload_UsesSecret(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "ds-secret", + }, + }, + }, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + if !w.UsesSecret("ds-secret") { + t.Error("DaemonSet UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDaemonSetWorkload_GetEnvFromSources(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, + }, + }, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestDaemonSetWorkload_DeepCopy(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +func TestDaemonSetWorkload_GetOwnerReferences(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "test-owner", + }, + }, + }, + } + + w := NewDaemonSetWorkload(ds) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) + } } // StatefulSet tests @@ -646,35 +923,344 @@ func TestStatefulSetWorkload_BasicGetters(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-sts", Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if w.Kind() != KindStatefulSet { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindStatefulSet) + } + if w.GetName() != "test-sts" { + t.Errorf("GetName() = %v, want test-sts", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != sts { + t.Error("GetObject() should return the underlying statefulset") + } +} + +func TestStatefulSetWorkload_PodTemplateAnnotations(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestStatefulSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{}, + }, + } + + w := NewStatefulSetWorkload(sts) + + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestStatefulSetWorkload_Containers(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + InitContainers: []corev1.Container{ + {Name: "init", Image: "busybox"}, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestStatefulSetWorkload_Volumes(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestStatefulSetWorkload_UsesConfigMap(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "sts-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesConfigMap("sts-config") { + t.Error("StatefulSet UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestStatefulSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "sts-env-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesConfigMap("sts-env-config") { + t.Error("StatefulSet UsesConfigMap should return true for envFrom ConfigMap") + } +} + +func TestStatefulSetWorkload_UsesSecret(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "sts-secret", + }, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesSecret("sts-secret") { + t.Error("StatefulSet UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestStatefulSetWorkload_UsesSecret_EnvFrom(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "sts-env-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + if !w.UsesSecret("sts-env-secret") { + t.Error("StatefulSet UsesSecret should return true for envFrom Secret") + } +} + +func TestStatefulSetWorkload_GetEnvFromSources(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, + }, + }, + }, + }, + }, + }, + } + + w := NewStatefulSetWorkload(sts) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestStatefulSetWorkload_DeepCopy(t *testing.T) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "main", Image: "nginx"}, + }, + }, + }, }, } w := NewStatefulSetWorkload(sts) + copy := w.DeepCopy() - if w.Kind() != KindStatefulSet { - t.Errorf("Kind() = %v, want %v", w.Kind(), KindStatefulSet) - } - if w.GetName() != "test-sts" { - t.Errorf("GetName() = %v, want test-sts", w.GetName()) + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") } } -func TestStatefulSetWorkload_UsesSecret(t *testing.T) { +func TestStatefulSetWorkload_GetOwnerReferences(t *testing.T) { sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-vol", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "sts-secret", - }, - }, - }, - }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: "test-owner", }, }, }, @@ -682,8 +1268,9 @@ func TestStatefulSetWorkload_UsesSecret(t *testing.T) { w := NewStatefulSetWorkload(sts) - if !w.UsesSecret("sts-secret") { - t.Error("StatefulSet UsesSecret should return true for Secret volume") + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) } } @@ -915,3 +1502,325 @@ func TestToRolloutStrategy(t *testing.T) { } } } + +// Job tests +func TestJobWorkload_BasicGetters(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewJobWorkload(job) + + if w.Kind() != KindJob { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindJob) + } + if w.GetName() != "test-job" { + t.Errorf("GetName() = %v, want test-job", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != job { + t.Error("GetObject() should return the underlying job") + } +} + +func TestJobWorkload_PodTemplateAnnotations(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + } + + w := NewJobWorkload(job) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestJobWorkload_UsesConfigMap(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "job-config", + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewJobWorkload(job) + + if !w.UsesConfigMap("job-config") { + t.Error("Job UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("Job UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestJobWorkload_UsesSecret(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "job-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewJobWorkload(job) + + if !w.UsesSecret("job-secret") { + t.Error("Job UsesSecret should return true for Secret envFrom") + } +} + +func TestJobWorkload_DeepCopy(t *testing.T) { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "original": "value", + }, + }, + }, + }, + } + + w := NewJobWorkload(job) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +// CronJob tests +func TestCronJobWorkload_BasicGetters(t *testing.T) { + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cronjob", + Namespace: "test-ns", + Annotations: map[string]string{ + "key": "value", + }, + }, + } + + w := NewCronJobWorkload(cj) + + if w.Kind() != KindCronJob { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindCronJob) + } + if w.GetName() != "test-cronjob" { + t.Errorf("GetName() = %v, want test-cronjob", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != cj { + t.Error("GetObject() should return the underlying cronjob") + } +} + +func TestCronJobWorkload_PodTemplateAnnotations(t *testing.T) { + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + }, + }, + }, + }, + } + + w := NewCronJobWorkload(cj) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestCronJobWorkload_UsesConfigMap(t *testing.T) { + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "cronjob-config", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewCronJobWorkload(cj) + + if !w.UsesConfigMap("cronjob-config") { + t.Error("CronJob UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("CronJob UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestCronJobWorkload_UsesSecret(t *testing.T) { + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Env: []corev1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "cronjob-secret", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + w := NewCronJobWorkload(cj) + + if !w.UsesSecret("cronjob-secret") { + t.Error("CronJob UsesSecret should return true for Secret envVar") + } +} + +func TestCronJobWorkload_DeepCopy(t *testing.T) { + cj := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "original": "value", + }, + }, + }, + }, + }, + }, + } + + w := NewCronJobWorkload(cj) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +// Test that Job and CronJob implement the interface +func TestJobCronJobWorkloadInterface(t *testing.T) { + var _ WorkloadAccessor = (*JobWorkload)(nil) + var _ WorkloadAccessor = (*CronJobWorkload)(nil) +} From 0c40064872efc8c8440c8eeb0cdb7095785617cb Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:57 +0100 Subject: [PATCH 18/35] build: update build configuration to use internal metadata package and improve ldflags injection, fix defer resp.Body.Close() usage, replace os.Setenv with t.Setenv in tests, correct error message casing, and adjust Dockerfile and Makefile for cmd/reloader structure --- .goreleaser.yml | 9 ++++++- Dockerfile | 12 +++++----- Dockerfile.ubi | 1 + Makefile | 15 ++++++++---- internal/pkg/alerting/http.go | 2 +- internal/pkg/metadata/metadata_test.go | 32 +++++++------------------ internal/pkg/metrics/prometheus_test.go | 11 ++++----- internal/pkg/webhook/webhook.go | 2 +- internal/pkg/webhook/webhook_test.go | 2 +- internal/pkg/workload/registry.go | 2 +- 10 files changed, 43 insertions(+), 45 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 08953b788..b49ad2293 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,5 +1,7 @@ builds: -- env: +- main: ./cmd/reloader + binary: reloader + env: - CGO_ENABLED=0 goos: - windows @@ -11,6 +13,11 @@ builds: - arm - arm64 - ppc64le + ldflags: + - -s -w + - -X github.com/stakater/Reloader/internal/pkg/metadata.Version={{.Version}} + - -X github.com/stakater/Reloader/internal/pkg/metadata.Commit={{.Commit}} + - -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate={{.Date}} archives: - name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" snapshot: diff --git a/Dockerfile b/Dockerfile index 53cc26d8b..0391463c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,9 +23,8 @@ COPY go.sum go.sum RUN go mod download # Copy the go source -COPY main.go main.go +COPY cmd/ cmd/ COPY internal/ internal/ -COPY pkg/ pkg/ # Build RUN CGO_ENABLED=0 \ @@ -34,10 +33,11 @@ RUN CGO_ENABLED=0 \ GOPROXY=${GOPROXY} \ GOPRIVATE=${GOPRIVATE} \ GO111MODULE=on \ - go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \ - -X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \ - -X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE}" \ - -installsuffix 'static' -mod=mod -a -o manager ./ + go build -ldflags="-s -w \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Version=${VERSION} \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Commit=${COMMIT} \ + -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate=${BUILD_DATE}" \ + -installsuffix 'static' -mod=mod -a -o manager ./cmd/reloader # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 435973004..b33a79908 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,6 +1,7 @@ ARG BUILDER_IMAGE ARG BASE_IMAGE +# First stage: Build the binary (using the standard Dockerfile as builder) FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE} AS SRC FROM ${BASE_IMAGE:-registry.access.redhat.com/ubi9/ubi:latest} AS ubi diff --git a/Makefile b/Makefile index 8444e1f76..1f7565375 100644 --- a/Makefile +++ b/Makefile @@ -20,10 +20,17 @@ BUILD= GOCMD = go GOFLAGS ?= $(GOFLAGS:) -LDFLAGS = GOPROXY ?= GOPRIVATE ?= +# Version information for ldflags +GIT_COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") +BUILD_DATE ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +LDFLAGS = -s -w \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Version=$(VERSION) \ + -X github.com/stakater/Reloader/internal/pkg/metadata.Commit=$(GIT_COMMIT) \ + -X github.com/stakater/Reloader/internal/pkg/metadata.BuildDate=$(BUILD_DATE) + ## Location to install dependencies to LOCALBIN ?= $(shell pwd)/bin $(LOCALBIN): @@ -97,10 +104,10 @@ install: "$(GOCMD)" mod download run: - go run ./main.go + go run ./cmd/reloader build: - "$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}" + "$(GOCMD)" build ${GOFLAGS} -ldflags '${LDFLAGS}' -o "${BINARY}" ./cmd/reloader lint: golangci-lint ## Run golangci-lint on the codebase $(GOLANGCI_LINT) run ./... @@ -140,7 +147,7 @@ manifest: docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH) test: - "$(GOCMD)" test -timeout 1800s -v ./... + "$(GOCMD)" test -timeout 1800s -v ./cmd/... ./internal/... stop: @docker stop "${BINARY}" diff --git a/internal/pkg/alerting/http.go b/internal/pkg/alerting/http.go index 827091e71..ab086e576 100644 --- a/internal/pkg/alerting/http.go +++ b/internal/pkg/alerting/http.go @@ -46,7 +46,7 @@ func (c *httpClient) post(ctx context.Context, url string, body []byte) error { if err != nil { return fmt.Errorf("sending request: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() if resp.StatusCode < 200 || resp.StatusCode >= 300 { body, _ := io.ReadAll(resp.Body) diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go index b001da6ad..2ef4f5755 100644 --- a/internal/pkg/metadata/metadata_test.go +++ b/internal/pkg/metadata/metadata_test.go @@ -3,7 +3,6 @@ package metadata import ( "context" "encoding/json" - "os" "testing" "github.com/go-logr/logr" @@ -112,12 +111,8 @@ func TestNewReloaderOptions(t *testing.T) { func TestMetaInfo_ToConfigMap(t *testing.T) { // Set environment variables - os.Setenv(EnvReloaderNamespace, "reloader-ns") - os.Setenv(EnvReloaderDeploymentName, "reloader-deploy") - defer func() { - os.Unsetenv(EnvReloaderNamespace) - os.Unsetenv(EnvReloaderDeploymentName) - }() + t.Setenv(EnvReloaderNamespace, "reloader-ns") + t.Setenv(EnvReloaderDeploymentName, "reloader-deploy") cfg := config.NewDefault() metaInfo := NewMetaInfo(cfg) @@ -164,8 +159,8 @@ func TestMetaInfo_ToConfigMap(t *testing.T) { } func TestPublisher_Publish_NoNamespace(t *testing.T) { - // Ensure RELOADER_NAMESPACE is not set - os.Unsetenv(EnvReloaderNamespace) + // Ensure RELOADER_NAMESPACE is not set (empty value) + t.Setenv(EnvReloaderNamespace, "") scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -182,12 +177,8 @@ func TestPublisher_Publish_NoNamespace(t *testing.T) { func TestPublisher_Publish_CreateNew(t *testing.T) { // Set environment variables - os.Setenv(EnvReloaderNamespace, "test-ns") - os.Setenv(EnvReloaderDeploymentName, "test-deploy") - defer func() { - os.Unsetenv(EnvReloaderNamespace) - os.Unsetenv(EnvReloaderDeploymentName) - }() + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -215,12 +206,8 @@ func TestPublisher_Publish_CreateNew(t *testing.T) { func TestPublisher_Publish_UpdateExisting(t *testing.T) { // Set environment variables - os.Setenv(EnvReloaderNamespace, "test-ns") - os.Setenv(EnvReloaderDeploymentName, "test-deploy") - defer func() { - os.Unsetenv(EnvReloaderNamespace) - os.Unsetenv(EnvReloaderDeploymentName) - }() + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -273,8 +260,7 @@ func TestPublisher_Publish_UpdateExisting(t *testing.T) { func TestPublishMetaInfoConfigMap(t *testing.T) { // Set environment variables - os.Setenv(EnvReloaderNamespace, "test-ns") - defer os.Unsetenv(EnvReloaderNamespace) + t.Setenv(EnvReloaderNamespace, "test-ns") scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) diff --git a/internal/pkg/metrics/prometheus_test.go b/internal/pkg/metrics/prometheus_test.go index 47b4392d4..b1b851010 100644 --- a/internal/pkg/metrics/prometheus_test.go +++ b/internal/pkg/metrics/prometheus_test.go @@ -1,7 +1,6 @@ package metrics import ( - "os" "testing" "github.com/prometheus/client_golang/prometheus" @@ -97,8 +96,7 @@ func TestRecordReload_MultipleIncrements(t *testing.T) { func TestRecordReload_WithNamespaceTracking(t *testing.T) { // Enable namespace tracking - os.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") - defer os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") collectors := NewCollectors() collectors.RecordReload(true, "kube-system") @@ -117,8 +115,8 @@ func TestRecordReload_WithNamespaceTracking(t *testing.T) { } func TestRecordReload_WithoutNamespaceTracking(t *testing.T) { - // Ensure namespace tracking is disabled - os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + // Ensure namespace tracking is disabled (t.Setenv to empty resets it) + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "") collectors := NewCollectors() collectors.RecordReload(true, "kube-system") @@ -139,8 +137,7 @@ func TestNilCollectors_NoPanic(t *testing.T) { } func TestRecordReload_DifferentNamespaces(t *testing.T) { - os.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") - defer os.Unsetenv("METRICS_COUNT_BY_NAMESPACE") + t.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") collectors := NewCollectors() collectors.RecordReload(true, "namespace-a") diff --git a/internal/pkg/webhook/webhook.go b/internal/pkg/webhook/webhook.go index a40c5a5c6..d5b3c4cd7 100644 --- a/internal/pkg/webhook/webhook.go +++ b/internal/pkg/webhook/webhook.go @@ -73,7 +73,7 @@ func (c *Client) Send(ctx context.Context, payload Payload) error { if err != nil { return fmt.Errorf("sending request: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() if resp.StatusCode < 200 || resp.StatusCode >= 300 { return fmt.Errorf("webhook returned status %d", resp.StatusCode) diff --git a/internal/pkg/webhook/webhook_test.go b/internal/pkg/webhook/webhook_test.go index acb7b983d..b88ed246a 100644 --- a/internal/pkg/webhook/webhook_test.go +++ b/internal/pkg/webhook/webhook_test.go @@ -74,7 +74,7 @@ func TestSend_MarshalPayload(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, _ := io.ReadAll(r.Body) - json.Unmarshal(body, &receivedPayload) + _ = json.Unmarshal(body, &receivedPayload) w.WriteHeader(http.StatusOK) })) defer server.Close() diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 5e7ad0c1a..920a7eabd 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -73,7 +73,7 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { return NewCronJobWorkload(o), nil case *argorolloutv1alpha1.Rollout: if !r.argoRolloutsEnabled { - return nil, fmt.Errorf("Argo Rollouts support is not enabled") + return nil, fmt.Errorf("argo Rollouts support is not enabled") } return NewRolloutWorkload(o), nil default: From 172517512bf4f5435288f955c81823a3eea31028 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:58 +0100 Subject: [PATCH 19/35] feat: e2e tests and a lot of refactoring for existing tests --- .github/workflows/pull_request.yaml | 5 +- Makefile | 5 +- go.mod | 2 +- internal/pkg/alerting/alerter_test.go | 6 +- internal/pkg/config/config.go | 115 +- internal/pkg/config/config_test.go | 2 - internal/pkg/config/flags.go | 2 +- internal/pkg/config/flags_test.go | 84 +- internal/pkg/config/validation.go | 2 +- internal/pkg/constants/constants.go | 7 - .../controller/configmap_reconciler_test.go | 837 ++------------ internal/pkg/controller/filter_test.go | 148 ++- internal/pkg/controller/manager.go | 32 + .../controller/namespace_reconciler_test.go | 182 +-- internal/pkg/controller/retry_test.go | 324 ++++-- .../pkg/controller/secret_reconciler_test.go | 1021 ++--------------- internal/pkg/controller/test_helpers_test.go | 418 +++++++ internal/pkg/events/recorder_test.go | 39 +- internal/pkg/metadata/metadata.go | 98 +- internal/pkg/metadata/metadata_test.go | 100 +- internal/pkg/metrics/prometheus_test.go | 40 +- internal/pkg/reload/decision_test.go | 77 +- internal/pkg/reload/hasher_test.go | 72 +- internal/pkg/reload/matcher_test.go | 176 ++- internal/pkg/reload/pause.go | 3 - internal/pkg/reload/predicate.go | 111 +- internal/pkg/reload/predicate_test.go | 380 +++--- internal/pkg/reload/resource_type_test.go | 24 +- internal/pkg/reload/service_test.go | 251 ++-- internal/pkg/testutil/rand.go | 21 + internal/pkg/testutil/testutil.go | 465 ++++++++ internal/pkg/workload/registry.go | 22 +- internal/pkg/workload/registry_test.go | 4 +- test/e2e/e2e_test.go | 519 +++++++++ 34 files changed, 2719 insertions(+), 2875 deletions(-) delete mode 100644 internal/pkg/constants/constants.go create mode 100644 internal/pkg/controller/test_helpers_test.go create mode 100644 internal/pkg/testutil/rand.go create mode 100644 internal/pkg/testutil/testutil.go create mode 100644 test/e2e/e2e_test.go diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index e4b1c6f2c..3eb0f6c7f 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -106,9 +106,12 @@ jobs: kubectl cluster-info - - name: Test + - name: Unit Tests run: make test + - name: E2E Tests + run: make e2e + - name: Generate Tags id: generate_tag run: | diff --git a/Makefile b/Makefile index 1f7565375..57dd27b34 100644 --- a/Makefile +++ b/Makefile @@ -147,7 +147,10 @@ manifest: docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH) test: - "$(GOCMD)" test -timeout 1800s -v ./cmd/... ./internal/... + "$(GOCMD)" test -timeout 1800s -v -short ./cmd/... ./internal/... + +e2e: + "$(GOCMD)" test -timeout 1800s -v ./test/... stop: @docker stop "${BINARY}" diff --git a/go.mod b/go.mod index 206a9d1b2..eb84625cc 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/go-logr/zerologr v1.2.3 github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.2 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 @@ -43,7 +44,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/internal/pkg/alerting/alerter_test.go b/internal/pkg/alerting/alerter_test.go index 74cc95e61..c7dd2e8d7 100644 --- a/internal/pkg/alerting/alerter_test.go +++ b/internal/pkg/alerting/alerter_test.go @@ -45,9 +45,9 @@ func testAlertMessage() AlertMessage { func TestNewAlerter(t *testing.T) { tests := []struct { - name string - setup func(*config.Config) - wantType string + name string + setup func(*config.Config) + wantType string }{ { name: "disabled", diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index c833328fa..0bb972d90 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -26,75 +26,75 @@ const ( // Config holds all configuration for Reloader. type Config struct { - Annotations AnnotationConfig - AutoReloadAll bool - ReloadStrategy ReloadStrategy - ArgoRolloutsEnabled bool - ArgoRolloutStrategy ArgoRolloutStrategy - ReloadOnCreate bool - ReloadOnDelete bool - SyncAfterRestart bool - EnableHA bool - WebhookURL string - - IgnoredResources []string - IgnoredWorkloads []string - IgnoredNamespaces []string - NamespaceSelectors []labels.Selector - ResourceSelectors []labels.Selector - NamespaceSelectorStrings []string - ResourceSelectorStrings []string - - LogFormat string - LogLevel string - MetricsAddr string - HealthAddr string - EnablePProf bool - PProfAddr string - - Alerting AlertingConfig - LeaderElection LeaderElectionConfig - WatchedNamespace string - SyncPeriod time.Duration + Annotations AnnotationConfig `json:"annotations"` + AutoReloadAll bool `json:"autoReloadAll"` + ReloadStrategy ReloadStrategy `json:"reloadStrategy"` + ArgoRolloutsEnabled bool `json:"argoRolloutsEnabled"` + ArgoRolloutStrategy ArgoRolloutStrategy `json:"argoRolloutStrategy"` + ReloadOnCreate bool `json:"reloadOnCreate"` + ReloadOnDelete bool `json:"reloadOnDelete"` + SyncAfterRestart bool `json:"syncAfterRestart"` + EnableHA bool `json:"enableHA"` + WebhookURL string `json:"webhookUrl,omitempty"` + + IgnoredResources []string `json:"ignoredResources,omitempty"` + IgnoredWorkloads []string `json:"ignoredWorkloads,omitempty"` + IgnoredNamespaces []string `json:"ignoredNamespaces,omitempty"` + NamespaceSelectors []labels.Selector `json:"-"` + ResourceSelectors []labels.Selector `json:"-"` + NamespaceSelectorStrings []string `json:"namespaceSelectors,omitempty"` + ResourceSelectorStrings []string `json:"resourceSelectors,omitempty"` + + LogFormat string `json:"logFormat,omitempty"` + LogLevel string `json:"logLevel"` + MetricsAddr string `json:"metricsAddr"` + HealthAddr string `json:"healthAddr"` + EnablePProf bool `json:"enablePProf"` + PProfAddr string `json:"pprofAddr,omitempty"` + + Alerting AlertingConfig `json:"alerting"` + LeaderElection LeaderElectionConfig `json:"leaderElection"` + WatchedNamespace string `json:"watchedNamespace,omitempty"` + SyncPeriod time.Duration `json:"syncPeriod"` } // AnnotationConfig holds customizable annotation keys. type AnnotationConfig struct { - Prefix string - Auto string - ConfigmapAuto string - SecretAuto string - ConfigmapReload string - SecretReload string - ConfigmapExclude string - SecretExclude string - Ignore string - Search string - Match string - RolloutStrategy string - PausePeriod string - PausedAt string - LastReloadedFrom string + Prefix string `json:"prefix"` + Auto string `json:"auto"` + ConfigmapAuto string `json:"configmapAuto"` + SecretAuto string `json:"secretAuto"` + ConfigmapReload string `json:"configmapReload"` + SecretReload string `json:"secretReload"` + ConfigmapExclude string `json:"configmapExclude"` + SecretExclude string `json:"secretExclude"` + Ignore string `json:"ignore"` + Search string `json:"search"` + Match string `json:"match"` + RolloutStrategy string `json:"rolloutStrategy"` + PausePeriod string `json:"pausePeriod"` + PausedAt string `json:"pausedAt"` + LastReloadedFrom string `json:"lastReloadedFrom"` } // AlertingConfig holds configuration for alerting integrations. type AlertingConfig struct { - Enabled bool - WebhookURL string - Sink string - Proxy string - Additional string + Enabled bool `json:"enabled"` + WebhookURL string `json:"webhookUrl,omitempty"` + Sink string `json:"sink,omitempty"` + Proxy string `json:"proxy,omitempty"` + Additional string `json:"additional,omitempty"` } // LeaderElectionConfig holds configuration for leader election. type LeaderElectionConfig struct { - LockName string - Namespace string - Identity string - LeaseDuration time.Duration - RenewDeadline time.Duration - RetryPeriod time.Duration - ReleaseOnCancel bool + LockName string `json:"lockName"` + Namespace string `json:"namespace,omitempty"` + Identity string `json:"identity,omitempty"` + LeaseDuration time.Duration `json:"leaseDuration"` + RenewDeadline time.Duration `json:"renewDeadline"` + RetryPeriod time.Duration `json:"retryPeriod"` + ReleaseOnCancel bool `json:"releaseOnCancel"` } // NewDefault creates a Config with default values. @@ -184,4 +184,3 @@ func (c *Config) IsNamespaceIgnored(namespace string) bool { } return false } - diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 17a3e33df..5ec7f7584 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -12,7 +12,6 @@ func TestNewDefault(t *testing.T) { t.Fatal("NewDefault() returned nil") } - // Test default values if cfg.ReloadStrategy != ReloadStrategyEnvVars { t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) } @@ -200,4 +199,3 @@ func TestConfig_IsNamespaceIgnored(t *testing.T) { ) } } - diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index 1de5d2ef8..e30be2ffd 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -208,7 +208,7 @@ func ApplyFlags(cfg *Config) error { cfg.IgnoredWorkloads = splitAndTrim(fv.ignoredWorkloads) cfg.IgnoredNamespaces = splitAndTrim(fv.ignoredNamespaces) - // Store raw selector strings (for backward compatibility) + // Store raw selector strings cfg.NamespaceSelectorStrings = splitAndTrim(fv.namespaceSelectors) cfg.ResourceSelectorStrings = splitAndTrim(fv.resourceSelectors) diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go index 4ddcbaeb9..06638d6bb 100644 --- a/internal/pkg/config/flags_test.go +++ b/internal/pkg/config/flags_test.go @@ -12,7 +12,6 @@ func TestBindFlags(t *testing.T) { BindFlags(fs, cfg) - // Verify flags are registered expectedFlags := []string{ "auto-reload-all", "reload-strategy", @@ -64,12 +63,10 @@ func TestBindFlags_DefaultValues(t *testing.T) { BindFlags(fs, cfg) - // Parse empty args to use defaults if err := fs.Parse([]string{}); err != nil { t.Fatalf("Parse() error = %v", err) } - // Check default values are preserved if cfg.ReloadStrategy != ReloadStrategyEnvVars { t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) } @@ -146,33 +143,33 @@ func TestApplyFlags_BooleanStrings(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Reset flag values - fv = flagValues{} + t.Run( + tt.name, func(t *testing.T) { + fv = flagValues{} - cfg := NewDefault() - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - BindFlags(fs, cfg) + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) - if err := fs.Parse(tt.args); err != nil { - t.Fatalf("Parse() error = %v", err) - } + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("Parse() error = %v", err) + } - err := ApplyFlags(cfg) - if (err != nil) != tt.wantErr { - t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) - return - } + err := ApplyFlags(cfg) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) + return + } - if cfg.ArgoRolloutsEnabled != tt.want { - t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) - } - }) + if cfg.ArgoRolloutsEnabled != tt.want { + t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) + } + }, + ) } } func TestApplyFlags_CommaSeparatedLists(t *testing.T) { - // Reset flag values fv = flagValues{} cfg := NewDefault() @@ -193,7 +190,6 @@ func TestApplyFlags_CommaSeparatedLists(t *testing.T) { t.Fatalf("ApplyFlags() error = %v", err) } - // Check ignored resources if len(cfg.IgnoredResources) != 2 { t.Errorf("IgnoredResources length = %d, want 2", len(cfg.IgnoredResources)) } @@ -201,7 +197,6 @@ func TestApplyFlags_CommaSeparatedLists(t *testing.T) { t.Errorf("IgnoredResources = %v", cfg.IgnoredResources) } - // Check ignored workloads if len(cfg.IgnoredWorkloads) != 2 { t.Errorf("IgnoredWorkloads length = %d, want 2", len(cfg.IgnoredWorkloads)) } @@ -213,7 +208,6 @@ func TestApplyFlags_CommaSeparatedLists(t *testing.T) { } func TestApplyFlags_Selectors(t *testing.T) { - // Reset flag values fv = flagValues{} cfg := NewDefault() @@ -241,14 +235,12 @@ func TestApplyFlags_Selectors(t *testing.T) { t.Errorf("ResourceSelectors length = %d, want 1", len(cfg.ResourceSelectors)) } - // Check string versions are preserved if len(cfg.NamespaceSelectorStrings) != 2 { t.Errorf("NamespaceSelectorStrings length = %d, want 2", len(cfg.NamespaceSelectorStrings)) } } func TestApplyFlags_InvalidSelector(t *testing.T) { - // Reset flag values fv = flagValues{} cfg := NewDefault() @@ -290,12 +282,14 @@ func TestParseBoolString(t *testing.T) { } for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - got := parseBoolString(tt.input) - if got != tt.want { - t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) - } - }) + t.Run( + tt.input, func(t *testing.T) { + got := parseBoolString(tt.input) + if got != tt.want { + t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) + } + }, + ) } } @@ -314,17 +308,19 @@ func TestSplitAndTrim(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := splitAndTrim(tt.input) - if len(got) != len(tt.want) { - t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) - return - } - for i := range got { - if got[i] != tt.want[i] { - t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + t.Run( + tt.name, func(t *testing.T) { + got := splitAndTrim(tt.input) + if len(got) != len(tt.want) { + t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + } } - } - }) + }, + ) } } diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 161102a01..85aa0c770 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -66,7 +66,7 @@ func (c *Config) Validate() error { default: errs = append( errs, ValidationError{ - Field: "ArgoRolloutStrategy", + Field: "ArgoRolloutStrategy", Message: fmt.Sprintf( "invalid value %q, must be %q or %q", c.ArgoRolloutStrategy, ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout, ), diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go deleted file mode 100644 index c1dc81a03..000000000 --- a/internal/pkg/constants/constants.go +++ /dev/null @@ -1,7 +0,0 @@ -package constants - -// Environment variable names for pod identity in HA mode. -const ( - PodNameEnv string = "POD_NAME" - PodNamespaceEnv string = "POD_NAMESPACE" -) diff --git a/internal/pkg/controller/configmap_reconciler_test.go b/internal/pkg/controller/configmap_reconciler_test.go index cd4e8d35c..ad457d987 100644 --- a/internal/pkg/controller/configmap_reconciler_test.go +++ b/internal/pkg/controller/configmap_reconciler_test.go @@ -1,844 +1,159 @@ package controller_test import ( - "context" "testing" - "github.com/go-logr/logr/testr" - "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/events" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/reload" - "github.com/stakater/Reloader/internal/pkg/webhook" - "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func newTestScheme() *runtime.Scheme { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = batchv1.AddToScheme(scheme) - return scheme -} - -func newTestConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { - scheme := newTestScheme() - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects(objects...). - Build() - - collectors := metrics.NewCollectors() - - return &controller.ConfigMapReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), - Collectors: &collectors, - EventRecorder: events.NewRecorder(nil), - WebhookClient: webhook.NewClient("", testr.New(t)), - Alerter: &alerting.NoOpAlerter{}, - } -} - func TestConfigMapReconciler_NotFound(t *testing.T) { cfg := config.NewDefault() - reconciler := newTestConfigMapReconciler(t, cfg) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "nonexistent-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue for NotFound") - } + reconciler := newConfigMapReconciler(t, cfg) + assertReconcileSuccess(t, reconciler, reconcileRequest("nonexistent-cm", "default")) } func TestConfigMapReconciler_NotFound_ReloadOnDelete(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = true - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "deleted-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "deleted-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "deleted-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("deleted-cm", "default")) } func TestConfigMapReconciler_IgnoredNamespace(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredNamespaces = []string{"kube-system"} - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "kube-system", - }, - Data: map[string]string{"key": "value"}, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "kube-system", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue for ignored namespace") - } + cm := testConfigMap("test-cm", "kube-system") + reconciler := newConfigMapReconciler(t, cfg, cm) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "kube-system")) } func TestConfigMapReconciler_NoMatchingWorkloads(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + deployment := testDeployment("test-deployment", "default", nil) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - EnvFrom: []corev1.EnvFromSource{{ - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "test-cm", - }, - }, - }}, - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + deployment := testDeploymentWithEnvFrom("test-deployment", "default", "test-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "test-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_WorkloadInDifferentNamespace(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "namespace-a", - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace-b", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "test-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "namespace-a", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "namespace-a") + deployment := testDeployment("test-deployment", "namespace-b", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "namespace-a")) } func TestConfigMapReconciler_IgnoredWorkloadType(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredWorkloads = []string{"deployment"} - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "test-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_DaemonSet(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - daemonset := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-daemonset", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "test-cm", - }, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, daemonset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + daemonset := testDaemonSet("test-daemonset", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_StatefulSet(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - statefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefulset", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "test-cm", - }, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, statefulset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("test-cm", "default") + statefulset := testStatefulSet("test-statefulset", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, statefulset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } func TestConfigMapReconciler_MultipleWorkloads(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "shared-cm", - Namespace: "default", - }, - Data: map[string]string{"key": "value"}, - } - - deployment1 := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment-1", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "shared-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test1"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test1"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - deployment2 := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment-2", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "shared-cm", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test2"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test2"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } + cm := testConfigMap("shared-cm", "default") + deployment1 := testDeployment("deployment-1", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) + deployment2 := testDeployment("deployment-2", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) + daemonset := testDaemonSet("daemonset-1", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "shared-cm", + }) - daemonset := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "daemonset-1", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.ConfigmapReload: "shared-cm", - }, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "daemon"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "daemon"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment1, deployment2, daemonset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "shared-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + reconciler := newConfigMapReconciler(t, cfg, cm, deployment1, deployment2, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("shared-cm", "default")) } func TestConfigMapReconciler_VolumeMount(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "volume-cm", - Namespace: "default", - }, - Data: map[string]string{"config.yaml": "key: value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - VolumeMounts: []corev1.VolumeMount{{ - Name: "config", - MountPath: "/etc/config", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "volume-cm", - }, - }, - }, - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "volume-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("volume-cm", "default") + deployment := testDeploymentWithVolume("test-deployment", "default", "volume-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("volume-cm", "default")) } func TestConfigMapReconciler_ProjectedVolume(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "projected-cm", - Namespace: "default", - }, - Data: map[string]string{"config.yaml": "key: value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - VolumeMounts: []corev1.VolumeMount{{ - Name: "config", - MountPath: "/etc/config", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{{ - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "projected-cm", - }, - }, - }}, - }, - }, - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "projected-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMap("projected-cm", "default") + deployment := testDeploymentWithProjectedVolume("test-deployment", "default", "projected-cm", "") + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("projected-cm", "default")) } func TestConfigMapReconciler_SearchAnnotation(t *testing.T) { cfg := config.NewDefault() - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.Match: "true", - }, - }, - Data: map[string]string{"key": "value"}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.Search: "true", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestConfigMapReconciler(t, cfg, cm, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-cm", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + cm := testConfigMapWithAnnotations("test-cm", "default", map[string]string{ + cfg.Annotations.Match: "true", + }) + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.Search: "true", + }) + reconciler := newConfigMapReconciler(t, cfg, cm, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } diff --git a/internal/pkg/controller/filter_test.go b/internal/pkg/controller/filter_test.go index 267c2b244..0d551e643 100644 --- a/internal/pkg/controller/filter_test.go +++ b/internal/pkg/controller/filter_test.go @@ -11,70 +11,72 @@ import ( func TestCreateEventPredicate_CreateEvent(t *testing.T) { tests := []struct { - name string - reloadOnCreate bool - syncAfterRestart bool - initialized bool - expectedResult bool + name string + reloadOnCreate bool + syncAfterRestart bool + initialized bool + expectedResult bool }{ { - name: "reload on create enabled, initialized", - reloadOnCreate: true, - syncAfterRestart: false, - initialized: true, - expectedResult: true, + name: "reload on create enabled, initialized", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: true, + expectedResult: true, }, { - name: "reload on create disabled, initialized", - reloadOnCreate: false, - syncAfterRestart: false, - initialized: true, - expectedResult: false, + name: "reload on create disabled, initialized", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: true, + expectedResult: false, }, { - name: "not initialized, sync after restart enabled", - reloadOnCreate: true, - syncAfterRestart: true, - initialized: false, - expectedResult: true, + name: "not initialized, sync after restart enabled", + reloadOnCreate: true, + syncAfterRestart: true, + initialized: false, + expectedResult: true, }, { - name: "not initialized, sync after restart disabled", - reloadOnCreate: true, - syncAfterRestart: false, - initialized: false, - expectedResult: false, + name: "not initialized, sync after restart disabled", + reloadOnCreate: true, + syncAfterRestart: false, + initialized: false, + expectedResult: false, }, { - name: "not initialized, sync after restart disabled, reload on create disabled", - reloadOnCreate: false, - syncAfterRestart: false, - initialized: false, - expectedResult: false, + name: "not initialized, sync after restart disabled, reload on create disabled", + reloadOnCreate: false, + syncAfterRestart: false, + initialized: false, + expectedResult: false, }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := &config.Config{ - ReloadOnCreate: tt.reloadOnCreate, - SyncAfterRestart: tt.syncAfterRestart, - } - initialized := tt.initialized - - pred := createEventPredicate(cfg, &initialized) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - } - - e := event.CreateEvent{Object: cm} - result := pred.Create(e) - - if result != tt.expectedResult { - t.Errorf("CreateFunc() = %v, want %v", result, tt.expectedResult) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnCreate: tt.reloadOnCreate, + SyncAfterRestart: tt.syncAfterRestart, + } + initialized := tt.initialized + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.CreateEvent{Object: cm} + result := pred.Create(e) + + if result != tt.expectedResult { + t.Errorf("CreateFunc() = %v, want %v", result, tt.expectedResult) + } + }, + ) } } @@ -91,7 +93,6 @@ func TestCreateEventPredicate_UpdateEvent(t *testing.T) { e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} result := pred.Update(e) - // Update events should always return true if !result { t.Error("UpdateFunc() should always return true") } @@ -116,25 +117,27 @@ func TestCreateEventPredicate_DeleteEvent(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := &config.Config{ - ReloadOnDelete: tt.reloadOnDelete, - } - initialized := true - - pred := createEventPredicate(cfg, &initialized) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - } - - e := event.DeleteEvent{Object: cm} - result := pred.Delete(e) - - if result != tt.expectedResult { - t.Errorf("DeleteFunc() = %v, want %v", result, tt.expectedResult) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := &config.Config{ + ReloadOnDelete: tt.reloadOnDelete, + } + initialized := true + + pred := createEventPredicate(cfg, &initialized) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + e := event.DeleteEvent{Object: cm} + result := pred.Delete(e) + + if result != tt.expectedResult { + t.Errorf("DeleteFunc() = %v, want %v", result, tt.expectedResult) + } + }, + ) } } @@ -151,7 +154,6 @@ func TestCreateEventPredicate_GenericEvent(t *testing.T) { e := event.GenericEvent{Object: cm} result := pred.Generic(e) - // Generic events should always return false if result { t.Error("GenericFunc() should always return false") } @@ -164,17 +166,14 @@ func TestBuildEventFilter(t *testing.T) { } initialized := true - // Create a simple always-true predicate as the resource predicate resourcePred := &alwaysTruePredicate{} filter := BuildEventFilter(resourcePred, cfg, &initialized) - // The filter should be created without error if filter == nil { t.Fatal("BuildEventFilter() should return a non-nil predicate") } - // Test update event passes (since resourcePred returns true and update always returns true) cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -182,7 +181,6 @@ func TestBuildEventFilter(t *testing.T) { e := event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} result := filter.Update(e) - // Since namespace filter is empty (all namespaces allowed), this should pass if !result { t.Error("UpdateFunc() should return true when all predicates pass") } diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index 2c887340a..bc83ca705 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -81,6 +82,37 @@ func NewManager(opts ManagerOptions) (ctrl.Manager, error) { return mgr, nil } +// NewManagerWithRestConfig creates a new controller-runtime manager with the given rest.Config. +// This is useful for testing where you have a pre-existing cluster configuration. +func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctrl.Manager, error) { + cfg := opts.Config + le := cfg.LeaderElection + + mgrOpts := ctrl.Options{ + Scheme: runtimeScheme, + Metrics: ctrlmetrics.Options{ + BindAddress: "0", // Disable metrics server in tests + }, + HealthProbeBindAddress: "0", // Disable health probes in tests + + // Leader election configuration + LeaderElection: cfg.EnableHA, + LeaderElectionID: le.LockName, + LeaderElectionNamespace: le.Namespace, + LeaderElectionReleaseOnCancel: le.ReleaseOnCancel, + LeaseDuration: &le.LeaseDuration, + RenewDeadline: &le.RenewDeadline, + RetryPeriod: &le.RetryPeriod, + } + + mgr, err := ctrl.NewManager(restConfig, mgrOpts) + if err != nil { + return nil, fmt.Errorf("creating manager: %w", err) + } + + return mgr, nil +} + // SetupReconcilers sets up all reconcilers with the manager. func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { registry := workload.NewRegistry(cfg.ArgoRolloutsEnabled) diff --git a/internal/pkg/controller/namespace_reconciler_test.go b/internal/pkg/controller/namespace_reconciler_test.go index 2ad83d1e1..3d4fcc657 100644 --- a/internal/pkg/controller/namespace_reconciler_test.go +++ b/internal/pkg/controller/namespace_reconciler_test.go @@ -1,25 +1,16 @@ package controller_test import ( - "context" "testing" - "github.com/go-logr/logr/testr" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestNamespaceCache_Basic(t *testing.T) { cache := controller.NewNamespaceCache(true) - // Test Add and Contains cache.Add("namespace-1") if !cache.Contains("namespace-1") { t.Error("Cache should contain namespace-1") @@ -28,7 +19,6 @@ func TestNamespaceCache_Basic(t *testing.T) { t.Error("Cache should not contain namespace-2") } - // Test Remove cache.Remove("namespace-1") if cache.Contains("namespace-1") { t.Error("Cache should not contain namespace-1 after removal") @@ -38,13 +28,9 @@ func TestNamespaceCache_Basic(t *testing.T) { func TestNamespaceCache_Disabled(t *testing.T) { cache := controller.NewNamespaceCache(false) - // When disabled, Contains should always return true if !cache.Contains("any-namespace") { t.Error("Disabled cache should return true for any namespace") } - if !cache.Contains("other-namespace") { - t.Error("Disabled cache should return true for any namespace") - } } func TestNamespaceCache_List(t *testing.T) { @@ -58,7 +44,6 @@ func TestNamespaceCache_List(t *testing.T) { t.Errorf("Expected 3 namespaces, got %d", len(list)) } - // Check all namespaces are in the list found := make(map[string]bool) for _, ns := range list { found[ns] = true @@ -71,53 +56,24 @@ func TestNamespaceCache_List(t *testing.T) { } func TestNamespaceCache_IsEnabled(t *testing.T) { - enabledCache := controller.NewNamespaceCache(true) - disabledCache := controller.NewNamespaceCache(false) - - if !enabledCache.IsEnabled() { + if !controller.NewNamespaceCache(true).IsEnabled() { t.Error("EnabledCache.IsEnabled() should return true") } - if disabledCache.IsEnabled() { + if controller.NewNamespaceCache(false).IsEnabled() { t.Error("DisabledCache.IsEnabled() should return false") } } func TestNamespaceReconciler_Add(t *testing.T) { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ns", - Labels: map[string]string{"env": "production"}, - }, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(ns). - Build() - cfg := config.NewDefault() selector, _ := labels.Parse("env=production") cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - reconciler := &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "test-ns"}, - } + ns := testNamespace("test-ns", map[string]string{"env": "production"}) + reconciler := newNamespaceReconciler(t, cfg, cache, ns) - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) if !cache.Contains("test-ns") { t.Error("Cache should contain test-ns after reconcile") @@ -125,45 +81,17 @@ func TestNamespaceReconciler_Add(t *testing.T) { } func TestNamespaceReconciler_Remove_LabelChange(t *testing.T) { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - // Namespace with non-matching labels - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ns", - Labels: map[string]string{"env": "staging"}, - }, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(ns). - Build() - cfg := config.NewDefault() selector, _ := labels.Parse("env=production") cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - // Pre-populate cache - cache.Add("test-ns") - - reconciler := &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } + cache.Add("test-ns") // Pre-populate - req := ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "test-ns"}, - } + ns := testNamespace("test-ns", map[string]string{"env": "staging"}) // Non-matching + reconciler := newNamespaceReconciler(t, cfg, cache, ns) - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) if cache.Contains("test-ns") { t.Error("Cache should not contain test-ns after reconcile (labels no longer match)") @@ -171,37 +99,16 @@ func TestNamespaceReconciler_Remove_LabelChange(t *testing.T) { } func TestNamespaceReconciler_Remove_Delete(t *testing.T) { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - // No namespace in cluster (simulates delete) - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - Build() - cfg := config.NewDefault() selector, _ := labels.Parse("env=production") cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - // Pre-populate cache - cache.Add("deleted-ns") - - reconciler := &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } + cache.Add("deleted-ns") // Pre-populate - req := ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "deleted-ns"}, - } + reconciler := newNamespaceReconciler(t, cfg, cache) // No namespace in cluster - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } + assertReconcileSuccess(t, reconciler, namespaceRequest("deleted-ns")) if cache.Contains("deleted-ns") { t.Error("Cache should not contain deleted-ns after reconcile") @@ -209,85 +116,32 @@ func TestNamespaceReconciler_Remove_Delete(t *testing.T) { } func TestNamespaceReconciler_MultipleSelectors(t *testing.T) { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ns", - Labels: map[string]string{"team": "platform"}, - }, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(ns). - Build() - cfg := config.NewDefault() selector1, _ := labels.Parse("env=production") selector2, _ := labels.Parse("team=platform") cfg.NamespaceSelectors = []labels.Selector{selector1, selector2} cache := controller.NewNamespaceCache(true) - reconciler := &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } + ns := testNamespace("test-ns", map[string]string{"team": "platform"}) + reconciler := newNamespaceReconciler(t, cfg, cache, ns) - req := ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "test-ns"}, - } + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - - // Should be added because it matches second selector (team=platform) if !cache.Contains("test-ns") { t.Error("Cache should contain test-ns (matches second selector)") } } func TestNamespaceReconciler_NoLabels(t *testing.T) { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - // Namespace with no labels - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ns", - }, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(ns). - Build() - cfg := config.NewDefault() selector, _ := labels.Parse("env=production") cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - reconciler := &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "test-ns"}, - } + ns := testNamespace("test-ns", nil) // No labels + reconciler := newNamespaceReconciler(t, cfg, cache, ns) - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } + assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) if cache.Contains("test-ns") { t.Error("Cache should not contain test-ns (no labels)") diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go index a3e9fc2fa..a6237586c 100644 --- a/internal/pkg/controller/retry_test.go +++ b/internal/pkg/controller/retry_test.go @@ -1,118 +1,286 @@ -package controller +package controller_test import ( + "context" "testing" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func TestUpdateWorkloadWithRetry_SwitchCases(t *testing.T) { - // Test that the switch statement correctly identifies workload types - // Note: Full integration tests require a fake k8s client, so we just test type detection - +func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { tests := []struct { name string - workload workload.WorkloadAccessor - expectedKind workload.Kind + object runtime.Object + workload func(runtime.Object) workload.WorkloadAccessor + resourceType reload.ResourceType + verify func(t *testing.T, c client.Client) }{ { - name: "deployment workload", - workload: workload.NewDeploymentWorkload(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }), - expectedKind: workload.KindDeployment, + name: "Deployment", + object: testDeployment("test-deployment", "default", nil), + workload: func(o runtime.Object) workload.WorkloadAccessor { + return workload.NewDeploymentWorkload(o.(*appsv1.Deployment)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var result appsv1.Deployment + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, }, { - name: "daemonset workload", - workload: workload.NewDaemonSetWorkload(&appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }), - expectedKind: workload.KindDaemonSet, + name: "DaemonSet", + object: testDaemonSet("test-daemonset", "default", nil), + workload: func(o runtime.Object) workload.WorkloadAccessor { + return workload.NewDaemonSetWorkload(o.(*appsv1.DaemonSet)) + }, + resourceType: reload.ResourceTypeSecret, + verify: func(t *testing.T, c client.Client) { + var result appsv1.DaemonSet + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-daemonset", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get daemonset: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, }, { - name: "statefulset workload", - workload: workload.NewStatefulSetWorkload(&appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }), - expectedKind: workload.KindStatefulSet, + name: "StatefulSet", + object: testStatefulSet("test-statefulset", "default", nil), + workload: func(o runtime.Object) workload.WorkloadAccessor { + return workload.NewStatefulSetWorkload(o.(*appsv1.StatefulSet)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var result appsv1.StatefulSet + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-statefulset", Namespace: "default"}, &result); err != nil { + t.Fatalf("Failed to get statefulset: %v", err) + } + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + }, }, { - name: "job workload", - workload: workload.NewJobWorkload(&batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }), - expectedKind: workload.KindJob, + name: "Job", + object: testJob("test-job", "default"), + workload: func(o runtime.Object) workload.WorkloadAccessor { + return workload.NewJobWorkload(o.(*batchv1.Job)) + }, + resourceType: reload.ResourceTypeConfigMap, + verify: func(t *testing.T, c client.Client) { + var jobs batchv1.JobList + if err := c.List(context.Background(), &jobs, client.InNamespace("default")); err != nil { + t.Fatalf("Failed to list jobs: %v", err) + } + if len(jobs.Items) != 1 { + t.Errorf("Expected 1 job (recreated), got %d", len(jobs.Items)) + } + }, }, { - name: "cronjob workload", - workload: workload.NewCronJobWorkload(&batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }), - expectedKind: workload.KindCronJob, + name: "CronJob", + object: testCronJob("test-cronjob", "default"), + workload: func(o runtime.Object) workload.WorkloadAccessor { + return workload.NewCronJobWorkload(o.(*batchv1.CronJob)) + }, + resourceType: reload.ResourceTypeSecret, + verify: func(t *testing.T, c client.Client) { + var jobs batchv1.JobList + if err := c.List(context.Background(), &jobs, client.InNamespace("default")); err != nil { + t.Fatalf("Failed to list jobs: %v", err) + } + if len(jobs.Items) != 1 { + t.Errorf("Expected 1 job from cronjob, got %d", len(jobs.Items)) + } + if len(jobs.Items) > 0 && jobs.Items[0].Annotations["cronjob.kubernetes.io/instantiate"] != "manual" { + t.Error("Expected job to have manual instantiate annotation") + } + }, }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Verify the workload kind is correctly identified - if tt.workload.Kind() != tt.expectedKind { - t.Errorf("workload.Kind() = %v, want %v", tt.workload.Kind(), tt.expectedKind) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme()). + WithRuntimeObjects(tt.object). + Build() + + wl := tt.workload(tt.object) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + wl, + "test-resource", + tt.resourceType, + "default", + "abc123", + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + tt.verify(t, fakeClient) + }, + ) } } -func TestJobWorkloadTypeCast(t *testing.T) { - // Test that JobWorkload type cast works correctly - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: "default"}, +func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { + tests := []struct { + name string + strategy config.ReloadStrategy + verify func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) + }{ + { + name: "EnvVarStrategy", + strategy: config.ReloadStrategyEnvVars, + verify: func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) { + found := false + for _, env := range result.Spec.Template.Spec.Containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" && env.Value == "abc123" { + found = true + break + } + } + if !found { + t.Error("Expected STAKATER_TEST_CM_CONFIGMAP env var to be set") + } + }, + }, + { + name: "AnnotationStrategy", + strategy: config.ReloadStrategyAnnotations, + verify: func(t *testing.T, cfg *config.Config, result *appsv1.Deployment) { + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + if _, ok := result.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; !ok { + t.Errorf("Expected %s annotation to be set", cfg.Annotations.LastReloadedFrom) + } + for _, env := range result.Spec.Template.Spec.Containers[0].Env { + if env.Name == "STAKATER_TEST_CM_CONFIGMAP" { + t.Error("Annotation strategy should not add env vars") + } + } + }, + }, } - jobWl := workload.NewJobWorkload(job) - if jobWl.GetName() != "test-job" { - t.Errorf("JobWorkload.GetName() = %v, want test-job", jobWl.GetName()) - } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadStrategy = tt.strategy + reloadService := reload.NewService(cfg) - // Test GetJob method - gotJob := jobWl.GetJob() - if gotJob.Name != "test-job" { - t.Errorf("JobWorkload.GetJob().Name = %v, want test-job", gotJob.Name) - } + deployment := testDeployment("test-deployment", "default", nil) + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + false, + ) - // Verify it satisfies WorkloadAccessor interface - var _ workload.WorkloadAccessor = jobWl + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + tt.verify(t, cfg, &result) + }, + ) + } } -func TestCronJobWorkloadTypeCast(t *testing.T) { - // Test that CronJobWorkload type cast works correctly - cronJob := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test-cronjob", Namespace: "default"}, - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", +func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + + deployment := testDeployment("test-deployment", "default", nil) + deployment.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + { + Name: "STAKATER_TEST_CM_CONFIGMAP", + Value: "abc123", }, } - cronJobWl := workload.NewCronJobWorkload(cronJob) - if cronJobWl.GetName() != "test-cronjob" { - t.Errorf("CronJobWorkload.GetName() = %v, want test-cronjob", cronJobWl.GetName()) - } + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme()). + WithObjects(deployment). + Build() - // Test GetCronJob method - gotCronJob := cronJobWl.GetCronJob() - if gotCronJob.Name != "test-cronjob" { - t.Errorf("CronJobWorkload.GetCronJob().Name = %v, want test-cronjob", gotCronJob.Name) - } + wl := workload.NewDeploymentWorkload(deployment) - // Verify it satisfies WorkloadAccessor interface - var _ workload.WorkloadAccessor = cronJobWl + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", // Same hash as already set + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if updated { + t.Error("Expected workload NOT to be updated (same hash)") + } } func TestResourceTypeKind(t *testing.T) { - // Test that ResourceType.Kind() returns correct values tests := []struct { resourceType reload.ResourceType expectedKind string @@ -122,10 +290,12 @@ func TestResourceTypeKind(t *testing.T) { } for _, tt := range tests { - t.Run(string(tt.resourceType), func(t *testing.T) { - if got := tt.resourceType.Kind(); got != tt.expectedKind { - t.Errorf("ResourceType.Kind() = %v, want %v", got, tt.expectedKind) - } - }) + t.Run( + string(tt.resourceType), func(t *testing.T) { + if got := tt.resourceType.Kind(); got != tt.expectedKind { + t.Errorf("ResourceType.Kind() = %v, want %v", got, tt.expectedKind) + } + }, + ) } } diff --git a/internal/pkg/controller/secret_reconciler_test.go b/internal/pkg/controller/secret_reconciler_test.go index 155324aa2..11a879b46 100644 --- a/internal/pkg/controller/secret_reconciler_test.go +++ b/internal/pkg/controller/secret_reconciler_test.go @@ -1,1017 +1,172 @@ package controller_test import ( - "context" "testing" - "github.com/go-logr/logr/testr" - "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/events" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/reload" - "github.com/stakater/Reloader/internal/pkg/webhook" - "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func newTestSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { - scheme := newTestScheme() - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects(objects...). - Build() - - collectors := metrics.NewCollectors() - - return &controller.SecretReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), - Collectors: &collectors, - EventRecorder: events.NewRecorder(nil), - WebhookClient: webhook.NewClient("", testr.New(t)), - Alerter: &alerting.NoOpAlerter{}, - } -} - func TestSecretReconciler_NotFound(t *testing.T) { cfg := config.NewDefault() - reconciler := newTestSecretReconciler(t, cfg) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "nonexistent-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue for NotFound") - } + reconciler := newSecretReconciler(t, cfg) + assertReconcileSuccess(t, reconciler, reconcileRequest("nonexistent-secret", "default")) } func TestSecretReconciler_NotFound_ReloadOnDelete(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = true - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "deleted-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "deleted-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "deleted-secret", + }) + reconciler := newSecretReconciler(t, cfg, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("deleted-secret", "default")) } func TestSecretReconciler_IgnoredNamespace(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredNamespaces = []string{"kube-system"} - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "kube-system", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - reconciler := newTestSecretReconciler(t, cfg, secret) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "kube-system", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue for ignored namespace") - } + secret := testSecret("test-secret", "kube-system") + reconciler := newSecretReconciler(t, cfg, secret) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "kube-system")) } func TestSecretReconciler_NoMatchingWorkloads(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + deployment := testDeployment("test-deployment", "default", nil) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } func TestSecretReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"password": []byte("secret123")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - EnvFrom: []corev1.EnvFromSource{{ - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "test-secret", - }, - }, - }}, - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + deployment := testDeploymentWithEnvFrom("test-deployment", "default", "", "test-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } func TestSecretReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"password": []byte("secret123")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "test-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } func TestSecretReconciler_WorkloadInDifferentNamespace(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "namespace-a", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace-b", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "test-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "namespace-a", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "namespace-a") + deployment := testDeployment("test-deployment", "namespace-b", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "namespace-a")) } func TestSecretReconciler_IgnoredWorkloadType(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredWorkloads = []string{"deployment"} - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "test-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } func TestSecretReconciler_DaemonSet(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - daemonset := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-daemonset", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "test-secret", - }, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, daemonset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + daemonset := testDaemonSet("test-daemonset", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } func TestSecretReconciler_StatefulSet(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - statefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefulset", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "test-secret", - }, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, statefulset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("test-secret", "default") + statefulset := testStatefulSet("test-statefulset", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", + }) + reconciler := newSecretReconciler(t, cfg, secret, statefulset) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } -func TestSecretReconciler_VolumeMount(t *testing.T) { +func TestSecretReconciler_MultipleWorkloads(t *testing.T) { cfg := config.NewDefault() - cfg.AutoReloadAll = true - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "volume-secret", - Namespace: "default", - }, - Data: map[string][]byte{"credentials": []byte("supersecret")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - VolumeMounts: []corev1.VolumeMount{{ - Name: "secrets", - MountPath: "/etc/secrets", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "secrets", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "volume-secret", - }, - }, - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "volume-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } -} - -func TestSecretReconciler_ProjectedVolume(t *testing.T) { - cfg := config.NewDefault() - cfg.AutoReloadAll = true + secret := testSecret("shared-secret", "default") + deployment1 := testDeployment("deployment-1", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) + deployment2 := testDeployment("deployment-2", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) + daemonset := testDaemonSet("daemonset-1", "default", map[string]string{ + cfg.Annotations.SecretReload: "shared-secret", + }) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "projected-secret", - Namespace: "default", - }, - Data: map[string][]byte{"credentials": []byte("supersecret")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - VolumeMounts: []corev1.VolumeMount{{ - Name: "secrets", - MountPath: "/etc/secrets", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "secrets", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{{ - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "projected-secret", - }, - }, - }}, - }, - }, - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "projected-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + reconciler := newSecretReconciler(t, cfg, secret, deployment1, deployment2, daemonset) + assertReconcileSuccess(t, reconciler, reconcileRequest("shared-secret", "default")) } -func TestSecretReconciler_EnvKeyRef(t *testing.T) { +func TestSecretReconciler_VolumeMount(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "key-ref-secret", - Namespace: "default", - }, - Data: map[string][]byte{"password": []byte("secret123")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - Env: []corev1.EnvVar{{ - Name: "DB_PASSWORD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "key-ref-secret", - }, - Key: "password", - }, - }, - }}, - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "key-ref-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("volume-secret", "default") + deployment := testDeploymentWithVolume("test-deployment", "default", "", "volume-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("volume-secret", "default")) } -func TestSecretReconciler_MultipleWorkloads(t *testing.T) { +func TestSecretReconciler_ProjectedVolume(t *testing.T) { cfg := config.NewDefault() + cfg.AutoReloadAll = true - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "shared-secret", - Namespace: "default", - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - deployment1 := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment-1", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "shared-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test1"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test1"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - deployment2 := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployment-2", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "shared-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test2"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test2"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - statefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "statefulset-1", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "shared-secret", - }, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "stateful"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "stateful"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment1, deployment2, statefulset) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "shared-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecret("projected-secret", "default") + deployment := testDeploymentWithProjectedVolume("test-deployment", "default", "", "projected-secret") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("projected-secret", "default")) } func TestSecretReconciler_SearchAnnotation(t *testing.T) { cfg := config.NewDefault() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.Match: "true", - }, - }, - Data: map[string][]byte{"key": []byte("value")}, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.Search: "true", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "test-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + secret := testSecretWithAnnotations("test-secret", "default", map[string]string{ + cfg.Annotations.Match: "true", + }) + deployment := testDeployment("test-deployment", "default", map[string]string{ + cfg.Annotations.Search: "true", + }) + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } -func TestSecretReconciler_TLSSecret(t *testing.T) { +func TestSecretReconciler_ServiceAccountTokenIgnored(t *testing.T) { cfg := config.NewDefault() + cfg.AutoReloadAll = true - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "tls-secret", - Namespace: "default", - }, - Type: corev1.SecretTypeTLS, - Data: map[string][]byte{ - "tls.crt": []byte("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----"), - "tls.key": []byte("-----BEGIN RSA PRIVATE KEY-----\ntest\n-----END RSA PRIVATE KEY-----"), - }, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "tls-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "tls-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } -} - -func TestSecretReconciler_ImagePullSecret(t *testing.T) { - cfg := config.NewDefault() + // Service account tokens should be ignored + secret := testSecret("sa-token", "default") + secret.Type = "kubernetes.io/service-account-token" - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "registry-secret", - Namespace: "default", - }, - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - ".dockerconfigjson": []byte(`{"auths":{}}`), - }, - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - Annotations: map[string]string{ - cfg.Annotations.SecretReload: "registry-secret", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, - ImagePullSecrets: []corev1.LocalObjectReference{{ - Name: "registry-secret", - }}, - }, - }, - }, - } - - reconciler := newTestSecretReconciler(t, cfg, secret, deployment) - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "registry-secret", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile failed: %v", err) - } - if result.Requeue { - t.Error("Should not requeue") - } + deployment := testDeploymentWithEnvFrom("test-deployment", "default", "", "sa-token") + reconciler := newSecretReconciler(t, cfg, secret, deployment) + assertReconcileSuccess(t, reconciler, reconcileRequest("sa-token", "default")) } diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go new file mode 100644 index 000000000..b33152329 --- /dev/null +++ b/internal/pkg/controller/test_helpers_test.go @@ -0,0 +1,418 @@ +package controller_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// testScheme is a shared scheme for all controller tests. +func testScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = batchv1.AddToScheme(scheme) + return scheme +} + +// newConfigMapReconciler creates a ConfigMapReconciler for testing. +func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { + t.Helper() + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme()). + WithRuntimeObjects(objects...). + Build() + + collectors := metrics.NewCollectors() + + return &controller.ConfigMapReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + ReloadService: reload.NewService(cfg), + Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Collectors: &collectors, + EventRecorder: events.NewRecorder(nil), + WebhookClient: webhook.NewClient("", testr.New(t)), + Alerter: &alerting.NoOpAlerter{}, + } +} + +// newSecretReconciler creates a SecretReconciler for testing. +func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { + t.Helper() + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme()). + WithRuntimeObjects(objects...). + Build() + + collectors := metrics.NewCollectors() + + return &controller.SecretReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + ReloadService: reload.NewService(cfg), + Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Collectors: &collectors, + EventRecorder: events.NewRecorder(nil), + WebhookClient: webhook.NewClient("", testr.New(t)), + Alerter: &alerting.NoOpAlerter{}, + } +} + +// testConfigMap creates a ConfigMap for testing. +func testConfigMap(name, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{"key": "value"}, + } +} + +// testConfigMapWithAnnotations creates a ConfigMap with annotations. +func testConfigMapWithAnnotations(name, namespace string, annotations map[string]string) *corev1.ConfigMap { + cm := testConfigMap(name, namespace) + cm.Annotations = annotations + return cm +} + +// testSecret creates a Secret for testing. +func testSecret(name, namespace string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{"key": []byte("value")}, + } +} + +// testSecretWithAnnotations creates a Secret with annotations. +func testSecretWithAnnotations(name, namespace string, annotations map[string]string) *corev1.Secret { + secret := testSecret(name, namespace) + secret.Annotations = annotations + return secret +} + +// testDeployment creates a minimal Deployment for testing. +func testDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// testDeploymentWithEnvFrom creates a Deployment with EnvFrom referencing a ConfigMap or Secret. +func testDeploymentWithEnvFrom(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := testDeployment(name, namespace, nil) + if configMapName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + return d +} + +// testDeploymentWithVolume creates a Deployment with a volume from ConfigMap or Secret. +func testDeploymentWithVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := testDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } + + if configMapName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + }, + } + } + if secretName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }, + } + } + return d +} + +// testDeploymentWithProjectedVolume creates a Deployment with a projected volume. +func testDeploymentWithProjectedVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := testDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } + + var sources []corev1.VolumeProjection + if configMapName != "" { + sources = append( + sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + sources = append( + sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + }, + }, + } + return d +} + +// testDaemonSet creates a minimal DaemonSet for testing. +func testDaemonSet(name, namespace string, annotations map[string]string) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// testStatefulSet creates a minimal StatefulSet for testing. +func testStatefulSet(name, namespace string, annotations map[string]string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// reconcileRequest creates a ctrl.Request for the given name and namespace. +func reconcileRequest(name, namespace string) ctrl.Request { + return ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } +} + +// namespaceRequest creates a ctrl.Request for a namespace (no namespace field needed). +func namespaceRequest(name string) ctrl.Request { + return ctrl.Request{ + NamespacedName: types.NamespacedName{Name: name}, + } +} + +// testNamespace creates a Namespace with optional labels. +func testNamespace(name string, labels map[string]string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} + +// newNamespaceReconciler creates a NamespaceReconciler for testing. +func newNamespaceReconciler(t *testing.T, cfg *config.Config, cache *controller.NamespaceCache, objects ...runtime.Object) *controller.NamespaceReconciler { + t.Helper() + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objects...). + Build() + + return &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, + } +} + +// assertReconcileSuccess runs reconcile and asserts no error and no requeue. +func assertReconcileSuccess(t *testing.T, reconciler interface { + Reconcile(context.Context, ctrl.Request) (ctrl.Result, error) +}, req ctrl.Request) { + t.Helper() + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile failed: %v", err) + } + if result.Requeue { + t.Error("Should not requeue") + } +} + +// testJob creates a minimal Job for testing. +func testJob(name, namespace string) *batchv1.Job { + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, + }, + }, + }, + } +} + +// testCronJob creates a minimal CronJob for testing. +func testCronJob(name, namespace string) *batchv1.CronJob { + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: "test-uid", + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*/5 * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/pkg/events/recorder_test.go b/internal/pkg/events/recorder_test.go index 9bf6a9170..475173ee2 100644 --- a/internal/pkg/events/recorder_test.go +++ b/internal/pkg/events/recorder_test.go @@ -102,7 +102,6 @@ func TestNilRecorder_NoPanic(t *testing.T) { } func TestRecorder_NilInternalRecorder(t *testing.T) { - // Create a Recorder with nil internal recorder (edge case) r := &Recorder{recorder: nil} pod := &corev1.Pod{ @@ -112,26 +111,10 @@ func TestRecorder_NilInternalRecorder(t *testing.T) { }, } - // These should not panic r.ReloadSuccess(pod, "ConfigMap", "my-config") r.ReloadFailed(pod, "Secret", "my-secret", errors.New("test error")) } -func TestEventConstants(t *testing.T) { - if EventTypeNormal != corev1.EventTypeNormal { - t.Errorf("EventTypeNormal = %q, want %q", EventTypeNormal, corev1.EventTypeNormal) - } - if EventTypeWarning != corev1.EventTypeWarning { - t.Errorf("EventTypeWarning = %q, want %q", EventTypeWarning, corev1.EventTypeWarning) - } - if ReasonReloaded != "Reloaded" { - t.Errorf("ReasonReloaded = %q, want %q", ReasonReloaded, "Reloaded") - } - if ReasonReloadFailed != "ReloadFailed" { - t.Errorf("ReasonReloadFailed = %q, want %q", ReasonReloadFailed, "ReloadFailed") - } -} - func TestReloadSuccess_DifferentObjectTypes(t *testing.T) { fakeRecorder := record.NewFakeRecorder(10) r := NewRecorder(fakeRecorder) @@ -155,18 +138,20 @@ func TestReloadSuccess_DifferentObjectTypes(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r.ReloadSuccess(tt.object, "ConfigMap", "my-config") - - select { - case event := <-fakeRecorder.Events: - if event == "" { + t.Run( + tt.name, func(t *testing.T) { + r.ReloadSuccess(tt.object, "ConfigMap", "my-config") + + select { + case event := <-fakeRecorder.Events: + if event == "" { + t.Error("Expected event to be recorded") + } + default: t.Error("Expected event to be recorded") } - default: - t.Error("Expected event to be recorded") - } - }) + }, + ) } } diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go index 9bfae8d5e..616d987a5 100644 --- a/internal/pkg/metadata/metadata.go +++ b/internal/pkg/metadata/metadata.go @@ -40,8 +40,8 @@ var ( type MetaInfo struct { // BuildInfo contains information about the build version, commit, and compilation details. BuildInfo BuildInfo `json:"buildInfo"` - // ReloaderOptions contains all the configuration options used by this Reloader instance. - ReloaderOptions ReloaderOptions `json:"reloaderOptions"` + // Config contains all the configuration options used by this Reloader instance. + Config *config.Config `json:"config"` // DeploymentInfo contains metadata about the Kubernetes deployment of this instance. DeploymentInfo DeploymentInfo `json:"deploymentInfo"` } @@ -66,56 +66,6 @@ type DeploymentInfo struct { Namespace string `json:"namespace"` } -// ReloaderOptions contains the configuration options for Reloader. -// This is a subset of config.Config that's relevant for the metadata ConfigMap. -type ReloaderOptions struct { - // AutoReloadAll enables automatic reloading of all resources. - AutoReloadAll bool `json:"autoReloadAll"` - // ReloadStrategy specifies the strategy used to trigger resource reloads. - ReloadStrategy string `json:"reloadStrategy"` - // IsArgoRollouts indicates whether support for Argo Rollouts is enabled. - IsArgoRollouts bool `json:"isArgoRollouts"` - // ReloadOnCreate indicates whether to trigger reloads when resources are created. - ReloadOnCreate bool `json:"reloadOnCreate"` - // ReloadOnDelete indicates whether to trigger reloads when resources are deleted. - ReloadOnDelete bool `json:"reloadOnDelete"` - // SyncAfterRestart indicates whether to sync add events after Reloader restarts. - SyncAfterRestart bool `json:"syncAfterRestart"` - // EnableHA indicates whether High Availability mode is enabled. - EnableHA bool `json:"enableHA"` - // WebhookURL is the URL to send webhook notifications to. - WebhookURL string `json:"webhookUrl"` - // LogFormat specifies the log format to use. - LogFormat string `json:"logFormat"` - // LogLevel specifies the log level to use. - LogLevel string `json:"logLevel"` - // ResourcesToIgnore is a list of resource types to ignore. - ResourcesToIgnore []string `json:"resourcesToIgnore"` - // WorkloadTypesToIgnore is a list of workload types to ignore. - WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"` - // NamespacesToIgnore is a list of namespaces to ignore. - NamespacesToIgnore []string `json:"namespacesToIgnore"` - // NamespaceSelectors is a list of namespace label selectors. - NamespaceSelectors []string `json:"namespaceSelectors"` - // ResourceSelectors is a list of resource label selectors. - ResourceSelectors []string `json:"resourceSelectors"` - - // Annotations - ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"` - SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"` - ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"` - ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"` - SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"` - IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"` - ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"` - SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"` - AutoSearchAnnotation string `json:"autoSearchAnnotation"` - SearchMatchAnnotation string `json:"searchMatchAnnotation"` - RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"` - PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"` - PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"` -} - // NewBuildInfo creates a new BuildInfo with current build information. func NewBuildInfo() BuildInfo { return BuildInfo{ @@ -126,45 +76,11 @@ func NewBuildInfo() BuildInfo { } } -// NewReloaderOptions creates ReloaderOptions from a Config. -func NewReloaderOptions(cfg *config.Config) ReloaderOptions { - return ReloaderOptions{ - AutoReloadAll: cfg.AutoReloadAll, - ReloadStrategy: string(cfg.ReloadStrategy), - IsArgoRollouts: cfg.ArgoRolloutsEnabled, - ReloadOnCreate: cfg.ReloadOnCreate, - ReloadOnDelete: cfg.ReloadOnDelete, - SyncAfterRestart: cfg.SyncAfterRestart, - EnableHA: cfg.EnableHA, - WebhookURL: cfg.WebhookURL, - LogFormat: cfg.LogFormat, - LogLevel: cfg.LogLevel, - ResourcesToIgnore: cfg.IgnoredResources, - WorkloadTypesToIgnore: cfg.IgnoredWorkloads, - NamespacesToIgnore: cfg.IgnoredNamespaces, - NamespaceSelectors: cfg.NamespaceSelectorStrings, - ResourceSelectors: cfg.ResourceSelectorStrings, - ConfigmapUpdateOnChangeAnnotation: cfg.Annotations.ConfigmapReload, - SecretUpdateOnChangeAnnotation: cfg.Annotations.SecretReload, - ReloaderAutoAnnotation: cfg.Annotations.Auto, - ConfigmapReloaderAutoAnnotation: cfg.Annotations.ConfigmapAuto, - SecretReloaderAutoAnnotation: cfg.Annotations.SecretAuto, - IgnoreResourceAnnotation: cfg.Annotations.Ignore, - ConfigmapExcludeReloaderAnnotation: cfg.Annotations.ConfigmapExclude, - SecretExcludeReloaderAnnotation: cfg.Annotations.SecretExclude, - AutoSearchAnnotation: cfg.Annotations.Search, - SearchMatchAnnotation: cfg.Annotations.Match, - RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, - PauseDeploymentAnnotation: cfg.Annotations.PausePeriod, - PauseDeploymentTimeAnnotation: cfg.Annotations.PausedAt, - } -} - // NewMetaInfo creates a new MetaInfo from configuration. func NewMetaInfo(cfg *config.Config) *MetaInfo { return &MetaInfo{ - BuildInfo: NewBuildInfo(), - ReloaderOptions: NewReloaderOptions(cfg), + BuildInfo: NewBuildInfo(), + Config: cfg, DeploymentInfo: DeploymentInfo{ Name: os.Getenv(EnvReloaderDeploymentName), Namespace: os.Getenv(EnvReloaderNamespace), @@ -183,9 +99,9 @@ func (m *MetaInfo) ToConfigMap() *corev1.ConfigMap { }, }, Data: map[string]string{ - "buildInfo": toJSON(m.BuildInfo), - "reloaderOptions": toJSON(m.ReloaderOptions), - "deploymentInfo": toJSON(m.DeploymentInfo), + "buildInfo": toJSON(m.BuildInfo), + "config": toJSON(m.Config), + "deploymentInfo": toJSON(m.DeploymentInfo), }, } } diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go index 2ef4f5755..fd87d1d65 100644 --- a/internal/pkg/metadata/metadata_test.go +++ b/internal/pkg/metadata/metadata_test.go @@ -19,7 +19,6 @@ func testLogger() logr.Logger { } func TestNewBuildInfo(t *testing.T) { - // Set build variables for testing oldVersion := Version oldCommit := Commit oldBuildDate := BuildDate @@ -49,7 +48,10 @@ func TestNewBuildInfo(t *testing.T) { } } -func TestNewReloaderOptions(t *testing.T) { +func TestNewMetaInfo(t *testing.T) { + t.Setenv(EnvReloaderNamespace, "test-ns") + t.Setenv(EnvReloaderDeploymentName, "test-deploy") + cfg := config.NewDefault() cfg.AutoReloadAll = true cfg.ReloadStrategy = config.ReloadStrategyAnnotations @@ -64,53 +66,39 @@ func TestNewReloaderOptions(t *testing.T) { cfg.IgnoredWorkloads = []string{"jobs"} cfg.IgnoredNamespaces = []string{"kube-system"} - opts := NewReloaderOptions(cfg) + metaInfo := NewMetaInfo(cfg) - if !opts.AutoReloadAll { + if !metaInfo.Config.AutoReloadAll { t.Error("AutoReloadAll should be true") } - if opts.ReloadStrategy != "annotations" { - t.Errorf("ReloadStrategy = %s, want annotations", opts.ReloadStrategy) + if metaInfo.Config.ReloadStrategy != config.ReloadStrategyAnnotations { + t.Errorf("ReloadStrategy = %s, want annotations", metaInfo.Config.ReloadStrategy) } - if !opts.IsArgoRollouts { - t.Error("IsArgoRollouts should be true") + if !metaInfo.Config.ArgoRolloutsEnabled { + t.Error("ArgoRolloutsEnabled should be true") } - if !opts.ReloadOnCreate { + if !metaInfo.Config.ReloadOnCreate { t.Error("ReloadOnCreate should be true") } - if !opts.ReloadOnDelete { + if !metaInfo.Config.ReloadOnDelete { t.Error("ReloadOnDelete should be true") } - if !opts.EnableHA { + if !metaInfo.Config.EnableHA { t.Error("EnableHA should be true") } - if opts.WebhookURL != "https://example.com/webhook" { - t.Errorf("WebhookURL = %s, want https://example.com/webhook", opts.WebhookURL) - } - if opts.LogFormat != "json" { - t.Errorf("LogFormat = %s, want json", opts.LogFormat) - } - if opts.LogLevel != "debug" { - t.Errorf("LogLevel = %s, want debug", opts.LogLevel) - } - if len(opts.ResourcesToIgnore) != 1 || opts.ResourcesToIgnore[0] != "configmaps" { - t.Errorf("ResourcesToIgnore = %v, want [configmaps]", opts.ResourcesToIgnore) - } - if len(opts.WorkloadTypesToIgnore) != 1 || opts.WorkloadTypesToIgnore[0] != "jobs" { - t.Errorf("WorkloadTypesToIgnore = %v, want [jobs]", opts.WorkloadTypesToIgnore) - } - if len(opts.NamespacesToIgnore) != 1 || opts.NamespacesToIgnore[0] != "kube-system" { - t.Errorf("NamespacesToIgnore = %v, want [kube-system]", opts.NamespacesToIgnore) + if metaInfo.Config.WebhookURL != "https://example.com/webhook" { + t.Errorf("WebhookURL = %s, want https://example.com/webhook", metaInfo.Config.WebhookURL) } - // Check annotations - if opts.ReloaderAutoAnnotation != "reloader.stakater.com/auto" { - t.Errorf("ReloaderAutoAnnotation = %s, want reloader.stakater.com/auto", opts.ReloaderAutoAnnotation) + if metaInfo.DeploymentInfo.Namespace != "test-ns" { + t.Errorf("DeploymentInfo.Namespace = %s, want test-ns", metaInfo.DeploymentInfo.Namespace) + } + if metaInfo.DeploymentInfo.Name != "test-deploy" { + t.Errorf("DeploymentInfo.Name = %s, want test-deploy", metaInfo.DeploymentInfo.Name) } } func TestMetaInfo_ToConfigMap(t *testing.T) { - // Set environment variables t.Setenv(EnvReloaderNamespace, "reloader-ns") t.Setenv(EnvReloaderDeploymentName, "reloader-deploy") @@ -128,12 +116,11 @@ func TestMetaInfo_ToConfigMap(t *testing.T) { t.Errorf("Label = %s, want %s", cm.Labels[ConfigMapLabelKey], ConfigMapLabelValue) } - // Check data fields exist if _, ok := cm.Data["buildInfo"]; !ok { t.Error("buildInfo data key missing") } - if _, ok := cm.Data["reloaderOptions"]; !ok { - t.Error("reloaderOptions data key missing") + if _, ok := cm.Data["config"]; !ok { + t.Error("config data key missing") } if _, ok := cm.Data["deploymentInfo"]; !ok { t.Error("deploymentInfo data key missing") @@ -145,6 +132,11 @@ func TestMetaInfo_ToConfigMap(t *testing.T) { t.Errorf("buildInfo is not valid JSON: %v", err) } + var parsedConfig config.Config + if err := json.Unmarshal([]byte(cm.Data["config"]), &parsedConfig); err != nil { + t.Errorf("config is not valid JSON: %v", err) + } + // Verify deploymentInfo contains expected values var deployInfo DeploymentInfo if err := json.Unmarshal([]byte(cm.Data["deploymentInfo"]), &deployInfo); err != nil { @@ -159,7 +151,6 @@ func TestMetaInfo_ToConfigMap(t *testing.T) { } func TestPublisher_Publish_NoNamespace(t *testing.T) { - // Ensure RELOADER_NAMESPACE is not set (empty value) t.Setenv(EnvReloaderNamespace, "") scheme := runtime.NewScheme() @@ -176,7 +167,6 @@ func TestPublisher_Publish_NoNamespace(t *testing.T) { } func TestPublisher_Publish_CreateNew(t *testing.T) { - // Set environment variables t.Setenv(EnvReloaderNamespace, "test-ns") t.Setenv(EnvReloaderDeploymentName, "test-deploy") @@ -193,7 +183,6 @@ func TestPublisher_Publish_CreateNew(t *testing.T) { t.Errorf("Publish() error = %v", err) } - // Verify ConfigMap was created cm := &corev1.ConfigMap{} err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) if err != nil { @@ -205,14 +194,12 @@ func TestPublisher_Publish_CreateNew(t *testing.T) { } func TestPublisher_Publish_UpdateExisting(t *testing.T) { - // Set environment variables t.Setenv(EnvReloaderNamespace, "test-ns") t.Setenv(EnvReloaderDeploymentName, "test-deploy") scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) - // Create existing ConfigMap with old data existingCM := &corev1.ConfigMap{} existingCM.Name = ConfigMapName existingCM.Namespace = "test-ns" @@ -234,32 +221,28 @@ func TestPublisher_Publish_UpdateExisting(t *testing.T) { t.Errorf("Publish() error = %v", err) } - // Verify ConfigMap was updated cm := &corev1.ConfigMap{} err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) if err != nil { t.Errorf("Failed to get updated ConfigMap: %v", err) } - // Check that all data keys are present if _, ok := cm.Data["buildInfo"]; !ok { t.Error("buildInfo data key missing after update") } - if _, ok := cm.Data["reloaderOptions"]; !ok { - t.Error("reloaderOptions data key missing after update") + if _, ok := cm.Data["config"]; !ok { + t.Error("config data key missing after update") } if _, ok := cm.Data["deploymentInfo"]; !ok { t.Error("deploymentInfo data key missing after update") } - // Verify labels were added if cm.Labels[ConfigMapLabelKey] != ConfigMapLabelValue { t.Errorf("Label not updated: %s", cm.Labels[ConfigMapLabelKey]) } } func TestPublishMetaInfoConfigMap(t *testing.T) { - // Set environment variables t.Setenv(EnvReloaderNamespace, "test-ns") scheme := runtime.NewScheme() @@ -274,7 +257,6 @@ func TestPublishMetaInfoConfigMap(t *testing.T) { t.Errorf("PublishMetaInfoConfigMap() error = %v", err) } - // Verify ConfigMap was created cm := &corev1.ConfigMap{} err = fakeClient.Get(ctx, client.ObjectKey{Name: ConfigMapName, Namespace: "test-ns"}, cm) if err != nil { @@ -306,17 +288,19 @@ func TestParseUTCTime(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseUTCTime(tt.input) - if tt.wantErr { - if !result.IsZero() { - t.Errorf("parseUTCTime(%s) should return zero time", tt.input) - } - } else { - if result.IsZero() { - t.Errorf("parseUTCTime(%s) should not return zero time", tt.input) + t.Run( + tt.name, func(t *testing.T) { + result := parseUTCTime(tt.input) + if tt.wantErr { + if !result.IsZero() { + t.Errorf("parseUTCTime(%s) should return zero time", tt.input) + } + } else { + if result.IsZero() { + t.Errorf("parseUTCTime(%s) should not return zero time", tt.input) + } } - } - }) + }, + ) } } diff --git a/internal/pkg/metrics/prometheus_test.go b/internal/pkg/metrics/prometheus_test.go index b1b851010..5715c243e 100644 --- a/internal/pkg/metrics/prometheus_test.go +++ b/internal/pkg/metrics/prometheus_test.go @@ -21,7 +21,6 @@ func TestNewCollectors_CreatesCounters(t *testing.T) { func TestNewCollectors_InitializesWithZero(t *testing.T) { collectors := NewCollectors() - // Check that success=true counter is initialized to 0 metric := &dto.Metric{} err := collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Write(metric) if err != nil { @@ -31,7 +30,6 @@ func TestNewCollectors_InitializesWithZero(t *testing.T) { t.Errorf("Initial success=true counter = %v, want 0", metric.Counter.GetValue()) } - // Check that success=false counter is initialized to 0 err = collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Write(metric) if err != nil { t.Fatalf("Failed to get metric: %v", err) @@ -95,17 +93,18 @@ func TestRecordReload_MultipleIncrements(t *testing.T) { } func TestRecordReload_WithNamespaceTracking(t *testing.T) { - // Enable namespace tracking t.Setenv("METRICS_COUNT_BY_NAMESPACE", "enabled") collectors := NewCollectors() collectors.RecordReload(true, "kube-system") metric := &dto.Metric{} - err := collectors.ReloadedByNamespace.With(prometheus.Labels{ - "success": "true", - "namespace": "kube-system", - }).Write(metric) + err := collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "kube-system", + }, + ).Write(metric) if err != nil { t.Fatalf("Failed to get metric: %v", err) } @@ -115,14 +114,11 @@ func TestRecordReload_WithNamespaceTracking(t *testing.T) { } func TestRecordReload_WithoutNamespaceTracking(t *testing.T) { - // Ensure namespace tracking is disabled (t.Setenv to empty resets it) t.Setenv("METRICS_COUNT_BY_NAMESPACE", "") collectors := NewCollectors() collectors.RecordReload(true, "kube-system") - // The ReloadedByNamespace counter should not be incremented - // We can verify by checking countByNamespace is false if collectors.countByNamespace { t.Error("countByNamespace should be false when env var is not set") } @@ -131,7 +127,6 @@ func TestRecordReload_WithoutNamespaceTracking(t *testing.T) { func TestNilCollectors_NoPanic(t *testing.T) { var c *Collectors = nil - // This should not panic c.RecordReload(true, "default") c.RecordReload(false, "default") } @@ -146,11 +141,12 @@ func TestRecordReload_DifferentNamespaces(t *testing.T) { metric := &dto.Metric{} - // Check namespace-a has 2 reloads - err := collectors.ReloadedByNamespace.With(prometheus.Labels{ - "success": "true", - "namespace": "namespace-a", - }).Write(metric) + err := collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "namespace-a", + }, + ).Write(metric) if err != nil { t.Fatalf("Failed to get metric: %v", err) } @@ -158,11 +154,12 @@ func TestRecordReload_DifferentNamespaces(t *testing.T) { t.Errorf("namespace-a counter = %v, want 2", metric.Counter.GetValue()) } - // Check namespace-b has 1 reload - err = collectors.ReloadedByNamespace.With(prometheus.Labels{ - "success": "true", - "namespace": "namespace-b", - }).Write(metric) + err = collectors.ReloadedByNamespace.With( + prometheus.Labels{ + "success": "true", + "namespace": "namespace-b", + }, + ).Write(metric) if err != nil { t.Fatalf("Failed to get metric: %v", err) } @@ -174,7 +171,6 @@ func TestRecordReload_DifferentNamespaces(t *testing.T) { func TestCollectors_MetricNames(t *testing.T) { collectors := NewCollectors() - // Verify the Reloaded metric has correct description ch := make(chan *prometheus.Desc, 10) collectors.Reloaded.Describe(ch) close(ch) diff --git a/internal/pkg/reload/decision_test.go b/internal/pkg/reload/decision_test.go index eb158d1ab..fdc011a6b 100644 --- a/internal/pkg/reload/decision_test.go +++ b/internal/pkg/reload/decision_test.go @@ -9,28 +9,33 @@ import ( ) func TestFilterDecisions(t *testing.T) { - // Create some mock workloads for testing - wl1 := workload.NewDeploymentWorkload(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deploy1", Namespace: "default"}, - }) - wl2 := workload.NewDeploymentWorkload(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deploy2", Namespace: "default"}, - }) - wl3 := workload.NewDeploymentWorkload(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deploy3", Namespace: "default"}, - }) + wl1 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy1", Namespace: "default"}, + }, + ) + wl2 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy2", Namespace: "default"}, + }, + ) + wl3 := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy3", Namespace: "default"}, + }, + ) tests := []struct { - name string - decisions []ReloadDecision - wantCount int - wantNames []string + name string + decisions []ReloadDecision + wantCount int + wantNames []string }{ { - name: "empty list", - decisions: []ReloadDecision{}, - wantCount: 0, - wantNames: nil, + name: "empty list", + decisions: []ReloadDecision{}, + wantCount: 0, + wantNames: nil, }, { name: "all should reload", @@ -63,29 +68,35 @@ func TestFilterDecisions(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := FilterDecisions(tt.decisions) + t.Run( + tt.name, func(t *testing.T) { + result := FilterDecisions(tt.decisions) - if len(result) != tt.wantCount { - t.Errorf("FilterDecisions() returned %d decisions, want %d", len(result), tt.wantCount) - } + if len(result) != tt.wantCount { + t.Errorf("FilterDecisions() returned %d decisions, want %d", len(result), tt.wantCount) + } - if tt.wantNames != nil { - for i, d := range result { - if d.Workload.GetName() != tt.wantNames[i] { - t.Errorf("FilterDecisions()[%d].Workload.GetName() = %s, want %s", - i, d.Workload.GetName(), tt.wantNames[i]) + if tt.wantNames != nil { + for i, d := range result { + if d.Workload.GetName() != tt.wantNames[i] { + t.Errorf( + "FilterDecisions()[%d].Workload.GetName() = %s, want %s", + i, d.Workload.GetName(), tt.wantNames[i], + ) + } } } - } - }) + }, + ) } } func TestReloadDecision_Fields(t *testing.T) { - wl := workload.NewDeploymentWorkload(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, - }) + wl := workload.NewDeploymentWorkload( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }, + ) decision := ReloadDecision{ Workload: wl, diff --git a/internal/pkg/reload/hasher_test.go b/internal/pkg/reload/hasher_test.go index 92edbf34e..ff5693ff7 100644 --- a/internal/pkg/reload/hasher_test.go +++ b/internal/pkg/reload/hasher_test.go @@ -20,7 +20,6 @@ func TestHasher_HashConfigMap(t *testing.T) { Data: nil, BinaryData: nil, }, - // Empty configmap gets a valid hash (hash of empty data) wantHash: hasher.HashConfigMap(&corev1.ConfigMap{}), }, { @@ -31,13 +30,14 @@ func TestHasher_HashConfigMap(t *testing.T) { "key2": "value2", }, }, - // Hash should be deterministic - wantHash: hasher.HashConfigMap(&corev1.ConfigMap{ - Data: map[string]string{ - "key1": "value1", - "key2": "value2", + wantHash: hasher.HashConfigMap( + &corev1.ConfigMap{ + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, }, - }), + ), }, { name: "configmap with binary data", @@ -46,21 +46,25 @@ func TestHasher_HashConfigMap(t *testing.T) { "binary1": []byte("binaryvalue1"), }, }, - wantHash: hasher.HashConfigMap(&corev1.ConfigMap{ - BinaryData: map[string][]byte{ - "binary1": []byte("binaryvalue1"), + wantHash: hasher.HashConfigMap( + &corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "binary1": []byte("binaryvalue1"), + }, }, - }), + ), }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := hasher.HashConfigMap(tt.cm) - if got != tt.wantHash { - t.Errorf("HashConfigMap() = %v, want %v", got, tt.wantHash) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := hasher.HashConfigMap(tt.cm) + if got != tt.wantHash { + t.Errorf("HashConfigMap() = %v, want %v", got, tt.wantHash) + } + }, + ) } } @@ -75,7 +79,6 @@ func TestHasher_HashConfigMap_Deterministic(t *testing.T) { }, } - // Hash should be the same regardless of iteration order hash1 := hasher.HashConfigMap(cm) hash2 := hasher.HashConfigMap(cm) hash3 := hasher.HashConfigMap(cm) @@ -121,7 +124,6 @@ func TestHasher_HashSecret(t *testing.T) { secret: &corev1.Secret{ Data: nil, }, - // Empty secret gets a valid hash (hash of empty data) wantHash: hasher.HashSecret(&corev1.Secret{}), }, { @@ -132,22 +134,26 @@ func TestHasher_HashSecret(t *testing.T) { "key2": []byte("value2"), }, }, - wantHash: hasher.HashSecret(&corev1.Secret{ - Data: map[string][]byte{ - "key1": []byte("value1"), - "key2": []byte("value2"), + wantHash: hasher.HashSecret( + &corev1.Secret{ + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, }, - }), + ), }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := hasher.HashSecret(tt.secret) - if got != tt.wantHash { - t.Errorf("HashSecret() = %v, want %v", got, tt.wantHash) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := hasher.HashSecret(tt.secret) + if got != tt.wantHash { + t.Errorf("HashSecret() = %v, want %v", got, tt.wantHash) + } + }, + ) } } @@ -162,7 +168,6 @@ func TestHasher_HashSecret_Deterministic(t *testing.T) { }, } - // Hash should be the same regardless of iteration order hash1 := hasher.HashSecret(secret) hash2 := hasher.HashSecret(secret) hash3 := hasher.HashSecret(secret) @@ -198,20 +203,17 @@ func TestHasher_HashSecret_DifferentValues(t *testing.T) { func TestHasher_EmptyHash(t *testing.T) { hasher := NewHasher() - // EmptyHash returns empty string to signal deletion emptyHash := hasher.EmptyHash() if emptyHash != "" { t.Errorf("EmptyHash should be empty string, got %s", emptyHash) } - // Empty ConfigMap should have a valid hash (not empty) cm := &corev1.ConfigMap{} cmHash := hasher.HashConfigMap(cm) if cmHash == "" { t.Error("Empty ConfigMap should have a non-empty hash") } - // Empty Secret should have a valid hash (not empty) secret := &corev1.Secret{} secretHash := hasher.HashSecret(secret) if secretHash == "" { @@ -222,13 +224,11 @@ func TestHasher_EmptyHash(t *testing.T) { func TestHasher_NilInput(t *testing.T) { hasher := NewHasher() - // Test nil ConfigMap - returns hash of empty content (not EmptyHash) cmHash := hasher.HashConfigMap(nil) if cmHash == "" { t.Error("nil ConfigMap should return a valid hash") } - // Test nil Secret - returns hash of empty content (not EmptyHash) secretHash := hasher.HashSecret(nil) if secretHash == "" { t.Error("nil Secret should return a valid hash") diff --git a/internal/pkg/reload/matcher_test.go b/internal/pkg/reload/matcher_test.go index 8595d0abf..1c58fd303 100644 --- a/internal/pkg/reload/matcher_test.go +++ b/internal/pkg/reload/matcher_test.go @@ -17,7 +17,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload bool description string }{ - // Ignore annotation tests { name: "ignore annotation on resource skips reload", input: MatchInput{ @@ -46,8 +45,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: true, description: "Resources with ignore=false should allow reload", }, - - // Exclude annotation tests { name: "exclude annotation skips reload", input: MatchInput{ @@ -82,8 +79,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: false, description: "ConfigMaps in comma-separated exclude list should not trigger reload", }, - - // BUG FIX: Explicit annotation checked BEFORE auto { name: "explicit reload annotation with auto enabled - should reload", input: MatchInput{ @@ -133,8 +128,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: false, description: "ConfigMaps not in reload list should not trigger reload", }, - - // Auto annotation tests { name: "auto annotation on workload triggers reload", input: MatchInput{ @@ -205,8 +198,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: false, description: "ConfigMap-specific auto annotation should not match secrets", }, - - // Search/Match annotation tests { name: "search annotation with matching resource", input: MatchInput{ @@ -235,8 +226,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: false, description: "Search annotation without matching resource should not trigger reload", }, - - // No annotations - should not reload { name: "no annotations does not trigger reload", input: MatchInput{ @@ -251,8 +240,6 @@ func TestMatcher_ShouldReload(t *testing.T) { wantAutoReload: false, description: "Without any annotations, should not trigger reload", }, - - // Secret tests { name: "secret reload annotation", input: MatchInput{ @@ -289,19 +276,21 @@ func TestMatcher_ShouldReload(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := matcher.ShouldReload(tt.input) + t.Run( + tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) - if result.ShouldReload != tt.wantReload { - t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) - } + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } - if result.AutoReload != tt.wantAutoReload { - t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) - } + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } - t.Logf("✓ %s", tt.description) - }) + t.Logf("✓ %s", tt.description) + }, + ) } } @@ -364,39 +353,31 @@ func TestMatcher_ShouldReload_AutoReloadAll(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := matcher.ShouldReload(tt.input) + t.Run( + tt.name, func(t *testing.T) { + result := matcher.ShouldReload(tt.input) - if result.ShouldReload != tt.wantReload { - t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) - } + if result.ShouldReload != tt.wantReload { + t.Errorf("ShouldReload = %v, want %v (%s)", result.ShouldReload, tt.wantReload, tt.description) + } - if result.AutoReload != tt.wantAutoReload { - t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) - } + if result.AutoReload != tt.wantAutoReload { + t.Errorf("AutoReload = %v, want %v (%s)", result.AutoReload, tt.wantAutoReload, tt.description) + } - t.Logf("✓ %s", tt.description) - }) + t.Logf("✓ %s", tt.description) + }, + ) } } -// TestMatcher_BugFix_AutoDoesNotIgnoreExplicit tests the fix for the bug where +// TestMatcher_AutoDoesNotIgnoreExplicit tests the fix for the bug where // having reloader.stakater.com/auto: "true" would cause explicit reload annotations // to be ignored due to an early return. -func TestMatcher_BugFix_AutoDoesNotIgnoreExplicit(t *testing.T) { +func TestMatcher_AutoDoesNotIgnoreExplicit(t *testing.T) { cfg := config.NewDefault() matcher := NewMatcher(cfg) - // This is the exact scenario from the bug report: - // Workload has: - // reloader.stakater.com/auto: "true" (watches all referenced CMs) - // configmap.reloader.stakater.com/reload: "external-config" (ALSO watches this one) - // Container references: app-config - // - // When "external-config" changes: - // - Expected: Reload (explicitly listed) - // - Bug behavior: No reload (auto annotation causes early return) - input := MatchInput{ ResourceName: "external-config", // Not referenced by workload ResourceNamespace: "default", @@ -416,12 +397,11 @@ func TestMatcher_BugFix_AutoDoesNotIgnoreExplicit(t *testing.T) { t.Errorf("Expected ShouldReload=true for explicitly listed ConfigMap, got false") } - // Should be marked as non-auto since it matched the explicit list if result.AutoReload { t.Errorf("Expected AutoReload=false for explicit match, got true") } - t.Log("✓ Bug fixed: Explicit reload annotation works even when auto is enabled") + t.Log("✓ Explicit reload annotation works even when auto is enabled") } // TestMatcher_PrecedenceOrder verifies the correct order of precedence: @@ -435,54 +415,60 @@ func TestMatcher_PrecedenceOrder(t *testing.T) { cfg := config.NewDefault() matcher := NewMatcher(cfg) - t.Run("explicit takes precedence over auto", func(t *testing.T) { - input := MatchInput{ - ResourceName: "my-config", - ResourceNamespace: "default", - ResourceType: ResourceTypeConfigMap, - WorkloadAnnotations: map[string]string{ - "reloader.stakater.com/auto": "true", - "configmap.reloader.stakater.com/reload": "my-config", - }, - } - result := matcher.ShouldReload(input) - if result.AutoReload { - t.Error("Expected explicit match (AutoReload=false), got auto match") - } - if !result.ShouldReload { - t.Error("Expected ShouldReload=true") - } - }) + t.Run( + "explicit takes precedence over auto", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "reloader.stakater.com/auto": "true", + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.AutoReload { + t.Error("Expected explicit match (AutoReload=false), got auto match") + } + if !result.ShouldReload { + t.Error("Expected ShouldReload=true") + } + }, + ) - t.Run("ignore takes precedence over explicit", func(t *testing.T) { - input := MatchInput{ - ResourceName: "my-config", - ResourceNamespace: "default", - ResourceType: ResourceTypeConfigMap, - ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, - WorkloadAnnotations: map[string]string{ - "configmap.reloader.stakater.com/reload": "my-config", - }, - } - result := matcher.ShouldReload(input) - if result.ShouldReload { - t.Error("Expected ignore to take precedence, but got ShouldReload=true") - } - }) + t.Run( + "ignore takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + ResourceAnnotations: map[string]string{"reloader.stakater.com/ignore": "true"}, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected ignore to take precedence, but got ShouldReload=true") + } + }, + ) - t.Run("exclude takes precedence over explicit", func(t *testing.T) { - input := MatchInput{ - ResourceName: "my-config", - ResourceNamespace: "default", - ResourceType: ResourceTypeConfigMap, - WorkloadAnnotations: map[string]string{ - "configmap.reloader.stakater.com/reload": "my-config", - "configmaps.exclude.reloader.stakater.com/reload": "my-config", - }, - } - result := matcher.ShouldReload(input) - if result.ShouldReload { - t.Error("Expected exclude to take precedence, but got ShouldReload=true") - } - }) + t.Run( + "exclude takes precedence over explicit", func(t *testing.T) { + input := MatchInput{ + ResourceName: "my-config", + ResourceNamespace: "default", + ResourceType: ResourceTypeConfigMap, + WorkloadAnnotations: map[string]string{ + "configmap.reloader.stakater.com/reload": "my-config", + "configmaps.exclude.reloader.stakater.com/reload": "my-config", + }, + } + result := matcher.ShouldReload(input) + if result.ShouldReload { + t.Error("Expected exclude to take precedence, but got ShouldReload=true") + } + }, + ) } diff --git a/internal/pkg/reload/pause.go b/internal/pkg/reload/pause.go index 1d00d313d..e6e33366a 100644 --- a/internal/pkg/reload/pause.go +++ b/internal/pkg/reload/pause.go @@ -58,10 +58,8 @@ func (h *PauseHandler) ApplyPause(wl workload.WorkloadAccessor) error { deploy := deployWl.GetDeployment() - // Set paused flag deploy.Spec.Paused = true - // Set paused-at annotation if deploy.Annotations == nil { deploy.Annotations = make(map[string]string) } @@ -109,7 +107,6 @@ func (h *PauseHandler) CheckPauseExpired(deploy *appsv1.Deployment) (expired boo func (h *PauseHandler) ClearPause(deploy *appsv1.Deployment) { deploy.Spec.Paused = false delete(deploy.Annotations, h.cfg.Annotations.PausedAt) - // Keep pause-period annotation (user's config) } // IsPausedByReloader checks if a deployment was paused by Reloader. diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index 3582dd54d..f504694a3 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -33,26 +33,30 @@ func resourcePredicates(cfg *config.Config, hashFn func(old, new client.Object) // ConfigMapPredicates returns predicates for filtering ConfigMap events. func ConfigMapPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { - return resourcePredicates(cfg, func(old, new client.Object) (string, string, bool) { - oldCM, okOld := old.(*corev1.ConfigMap) - newCM, okNew := new.(*corev1.ConfigMap) - if !okOld || !okNew { - return "", "", false - } - return hasher.HashConfigMap(oldCM), hasher.HashConfigMap(newCM), true - }) + return resourcePredicates( + cfg, func(old, new client.Object) (string, string, bool) { + oldCM, okOld := old.(*corev1.ConfigMap) + newCM, okNew := new.(*corev1.ConfigMap) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashConfigMap(oldCM), hasher.HashConfigMap(newCM), true + }, + ) } // SecretPredicates returns predicates for filtering Secret events. func SecretPredicates(cfg *config.Config, hasher *Hasher) predicate.Predicate { - return resourcePredicates(cfg, func(old, new client.Object) (string, string, bool) { - oldSecret, okOld := old.(*corev1.Secret) - newSecret, okNew := new.(*corev1.Secret) - if !okOld || !okNew { - return "", "", false - } - return hasher.HashSecret(oldSecret), hasher.HashSecret(newSecret), true - }) + return resourcePredicates( + cfg, func(old, new client.Object) (string, string, bool) { + oldSecret, okOld := old.(*corev1.Secret) + newSecret, okNew := new.(*corev1.Secret) + if !okOld || !okNew { + return "", "", false + } + return hasher.HashSecret(oldSecret), hasher.HashSecret(newSecret), true + }, + ) } // NamespaceChecker defines the interface for checking if a namespace is allowed. @@ -68,47 +72,49 @@ func NamespaceFilterPredicate(cfg *config.Config) predicate.Predicate { // NamespaceFilterPredicateWithCache returns a predicate that filters resources by namespace, // using the provided NamespaceChecker for namespace selector filtering. func NamespaceFilterPredicateWithCache(cfg *config.Config, nsCache NamespaceChecker) predicate.Predicate { - return predicate.NewPredicateFuncs(func(obj client.Object) bool { - namespace := obj.GetNamespace() + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + namespace := obj.GetNamespace() - // Check if namespace should be ignored - if cfg.IsNamespaceIgnored(namespace) { - return false - } + if cfg.IsNamespaceIgnored(namespace) { + return false + } - // Check namespace selector cache if provided - if nsCache != nil && !nsCache.Contains(namespace) { - return false - } + if nsCache != nil && !nsCache.Contains(namespace) { + return false + } - return true - }) + return true + }, + ) } // LabelSelectorPredicate returns a predicate that filters resources by labels. func LabelSelectorPredicate(cfg *config.Config) predicate.Predicate { if len(cfg.ResourceSelectors) == 0 { - // No selectors configured, allow all - return predicate.NewPredicateFuncs(func(obj client.Object) bool { - return true - }) + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + return true + }, + ) } - return predicate.NewPredicateFuncs(func(obj client.Object) bool { - labels := obj.GetLabels() - if labels == nil { - labels = make(map[string]string) - } + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } - // Check if any selector matches - for _, selector := range cfg.ResourceSelectors { - if selector.Matches(LabelsSet(labels)) { - return true + for _, selector := range cfg.ResourceSelectors { + if selector.Matches(LabelsSet(labels)) { + return true + } } - } - return false - }) + return false + }, + ) } // LabelsSet implements the k8s.io/apimachinery/pkg/labels.Labels interface @@ -128,15 +134,16 @@ func (ls LabelsSet) Get(key string) string { // IgnoreAnnotationPredicate returns a predicate that filters out resources with the ignore annotation. func IgnoreAnnotationPredicate(cfg *config.Config) predicate.Predicate { - return predicate.NewPredicateFuncs(func(obj client.Object) bool { - annotations := obj.GetAnnotations() - if annotations == nil { - return true - } + return predicate.NewPredicateFuncs( + func(obj client.Object) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return true + } - // Check for ignore annotation - return annotations[cfg.Annotations.Ignore] != "true" - }) + return annotations[cfg.Annotations.Ignore] != "true" + }, + ) } // CombinedPredicates combines multiple predicates with AND logic. diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go index 285d367b7..1ccb0475a 100644 --- a/internal/pkg/reload/predicate_test.go +++ b/internal/pkg/reload/predicate_test.go @@ -44,25 +44,27 @@ func TestNamespaceFilterPredicate_Create(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.NewDefault() - cfg.IgnoredNamespaces = tt.ignoredNamespaces - predicate := NamespaceFilterPredicate(cfg) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: tt.eventNamespace, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + predicate := NamespaceFilterPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -172,36 +174,37 @@ func TestLabelSelectorPredicate_Create(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.NewDefault() - selector, err := labels.Parse(tt.selector) - if err != nil { - t.Fatalf("Failed to parse selector: %v", err) - } - cfg.ResourceSelectors = []labels.Selector{selector} - predicate := LabelSelectorPredicate(cfg) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - Labels: tt.objectLabels, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + selector, err := labels.Parse(tt.selector) + if err != nil { + t.Fatalf("Failed to parse selector: %v", err) + } + cfg.ResourceSelectors = []labels.Selector{selector} + predicate := LabelSelectorPredicate(cfg) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.objectLabels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } func TestLabelSelectorPredicate_NoSelectors(t *testing.T) { cfg := config.NewDefault() - // No selectors configured predicate := LabelSelectorPredicate(cfg) cm := &corev1.ConfigMap{ @@ -253,22 +256,24 @@ func TestLabelSelectorPredicate_MultipleSelectors(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - Labels: tt.labels, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -392,34 +397,35 @@ func TestCombinedFiltering(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: tt.namespace, - Labels: tt.labels, - }, - } - - e := event.CreateEvent{Object: cm} - - gotNS := nsPredicate.Create(e) - if gotNS != tt.wantNSAllow { - t.Errorf("Namespace predicate Create() = %v, want %v", gotNS, tt.wantNSAllow) - } - - gotLabel := labelPredicate.Create(e) - if gotLabel != tt.wantLabelAllow { - t.Errorf("Label predicate Create() = %v, want %v", gotLabel, tt.wantLabelAllow) - } - - // Both must be true for the event to pass through - combinedAllow := gotNS && gotLabel - expectedCombined := tt.wantNSAllow && tt.wantLabelAllow - if combinedAllow != expectedCombined { - t.Errorf("Combined allow = %v, want %v", combinedAllow, expectedCombined) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + + gotNS := nsPredicate.Create(e) + if gotNS != tt.wantNSAllow { + t.Errorf("Namespace predicate Create() = %v, want %v", gotNS, tt.wantNSAllow) + } + + gotLabel := labelPredicate.Create(e) + if gotLabel != tt.wantLabelAllow { + t.Errorf("Label predicate Create() = %v, want %v", gotLabel, tt.wantLabelAllow) + } + + combinedAllow := gotNS && gotLabel + expectedCombined := tt.wantNSAllow && tt.wantLabelAllow + if combinedAllow != expectedCombined { + t.Errorf("Combined allow = %v, want %v", combinedAllow, expectedCombined) + } + }, + ) } } @@ -449,7 +455,6 @@ func TestFilteringWithSecrets(t *testing.T) { func TestExistsLabelSelector(t *testing.T) { cfg := config.NewDefault() - // Selector that checks if label exists (any value) selector, _ := labels.Parse("managed") cfg.ResourceSelectors = []labels.Selector{selector} predicate := LabelSelectorPredicate(cfg) @@ -482,22 +487,24 @@ func TestExistsLabelSelector(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - Labels: tt.labels, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Labels: tt.labels, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -549,27 +556,29 @@ func TestNamespaceFilterPredicateWithCache(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.NewDefault() - cfg.IgnoredNamespaces = tt.ignoredNamespaces - - cache := &mockNamespaceChecker{allowed: tt.cacheAllowed} - predicate := NamespaceFilterPredicateWithCache(cfg, cache) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: tt.eventNamespace, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.IgnoredNamespaces = tt.ignoredNamespaces + + cache := &mockNamespaceChecker{allowed: tt.cacheAllowed} + predicate := NamespaceFilterPredicateWithCache(cfg, cache) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.eventNamespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -577,7 +586,6 @@ func TestNamespaceFilterPredicateWithCache_NilCache(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredNamespaces = []string{"kube-system"} - // Nil cache should allow all namespaces (only check ignore list) predicate := NamespaceFilterPredicateWithCache(cfg, nil) tests := []struct { @@ -590,21 +598,23 @@ func TestNamespaceFilterPredicateWithCache_NilCache(t *testing.T) { } for _, tt := range tests { - t.Run(tt.namespace, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: tt.namespace, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v for namespace %s", got, tt.wantAllow, tt.namespace) - } - }) + t.Run( + tt.namespace, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v for namespace %s", got, tt.wantAllow, tt.namespace) + } + }, + ) } } @@ -650,22 +660,24 @@ func TestIgnoreAnnotationPredicate_Create(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", - Annotations: tt.annotations, - }, - } - - e := event.CreateEvent{Object: cm} - got := predicate.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := predicate.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -688,7 +700,6 @@ func TestIgnoreAnnotationPredicate_AllEventTypes(t *testing.T) { }, } - // Test Update if predicate.Update(event.UpdateEvent{ObjectNew: ignoredCM}) { t.Error("Update() should block ignored resource") } @@ -696,7 +707,6 @@ func TestIgnoreAnnotationPredicate_AllEventTypes(t *testing.T) { t.Error("Update() should allow non-ignored resource") } - // Test Delete if predicate.Delete(event.DeleteEvent{Object: ignoredCM}) { t.Error("Delete() should block ignored resource") } @@ -704,7 +714,6 @@ func TestIgnoreAnnotationPredicate_AllEventTypes(t *testing.T) { t.Error("Delete() should allow non-ignored resource") } - // Test Generic if predicate.Generic(event.GenericEvent{Object: ignoredCM}) { t.Error("Generic() should block ignored resource") } @@ -755,22 +764,24 @@ func TestCombinedPredicates(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: tt.namespace, - Annotations: tt.annotations, - }, - } - - e := event.CreateEvent{Object: cm} - got := combined.Create(e) - - if got != tt.wantAllow { - t.Errorf("Create() = %v, want %v", got, tt.wantAllow) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: tt.namespace, + Annotations: tt.annotations, + }, + } + + e := event.CreateEvent{Object: cm} + got := combined.Create(e) + + if got != tt.wantAllow { + t.Errorf("Create() = %v, want %v", got, tt.wantAllow) + } + }, + ) } } @@ -792,13 +803,11 @@ func TestConfigMapPredicates_Update(t *testing.T) { Data: map[string]string{"key": "value2"}, } - // Same content should not trigger update e := event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMSameContent} if predicate.Update(e) { t.Error("Update() should return false when content is the same") } - // Different content should trigger update e = event.UpdateEvent{ObjectOld: oldCM, ObjectNew: newCMDifferentContent} if !predicate.Update(e) { t.Error("Update() should return true when content changed") @@ -810,7 +819,6 @@ func TestConfigMapPredicates_InvalidTypes(t *testing.T) { hasher := NewHasher() predicate := ConfigMapPredicates(cfg, hasher) - // Test with non-ConfigMap types secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -818,13 +826,11 @@ func TestConfigMapPredicates_InvalidTypes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } - // Old is secret, new is configmap - should return false e := event.UpdateEvent{ObjectOld: secret, ObjectNew: cm} if predicate.Update(e) { t.Error("Update() should return false for mismatched types") } - // Both are secrets - should return false e = event.UpdateEvent{ObjectOld: secret, ObjectNew: secret} if predicate.Update(e) { t.Error("Update() should return false for non-ConfigMap types") @@ -842,17 +848,14 @@ func TestConfigMapPredicates_CreateDeleteGeneric(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } - // Test Create if !predicate.Create(event.CreateEvent{Object: cm}) { t.Error("Create() should return true when ReloadOnCreate is true") } - // Test Delete if !predicate.Delete(event.DeleteEvent{Object: cm}) { t.Error("Delete() should return true when ReloadOnDelete is true") } - // Test Generic (should always return false) if predicate.Generic(event.GenericEvent{Object: cm}) { t.Error("Generic() should always return false") } @@ -876,13 +879,11 @@ func TestSecretPredicates_Update(t *testing.T) { Data: map[string][]byte{"key": []byte("value2")}, } - // Same content should not trigger update e := event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretSameContent} if predicate.Update(e) { t.Error("Update() should return false when content is the same") } - // Different content should trigger update e = event.UpdateEvent{ObjectOld: oldSecret, ObjectNew: newSecretDifferentContent} if !predicate.Update(e) { t.Error("Update() should return true when content changed") @@ -894,7 +895,6 @@ func TestSecretPredicates_InvalidTypes(t *testing.T) { hasher := NewHasher() predicate := SecretPredicates(cfg, hasher) - // Test with non-Secret types cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -902,13 +902,11 @@ func TestSecretPredicates_InvalidTypes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } - // Old is configmap, new is secret - should return false e := event.UpdateEvent{ObjectOld: cm, ObjectNew: secret} if predicate.Update(e) { t.Error("Update() should return false for mismatched types") } - // Both are configmaps - should return false e = event.UpdateEvent{ObjectOld: cm, ObjectNew: cm} if predicate.Update(e) { t.Error("Update() should return false for non-Secret types") @@ -918,7 +916,6 @@ func TestSecretPredicates_InvalidTypes(t *testing.T) { func TestLabelsSet(t *testing.T) { ls := LabelsSet{"app": "test", "env": "prod"} - // Test Has if !ls.Has("app") { t.Error("Has(app) should return true") } @@ -926,7 +923,6 @@ func TestLabelsSet(t *testing.T) { t.Error("Has(nonexistent) should return false") } - // Test Get if ls.Get("app") != "test" { t.Errorf("Get(app) = %v, want test", ls.Get("app")) } diff --git a/internal/pkg/reload/resource_type_test.go b/internal/pkg/reload/resource_type_test.go index 428f29ed7..e577e82b6 100644 --- a/internal/pkg/reload/resource_type_test.go +++ b/internal/pkg/reload/resource_type_test.go @@ -16,21 +16,13 @@ func TestResourceType_Kind(t *testing.T) { } for _, tt := range tests { - t.Run(string(tt.resourceType), func(t *testing.T) { - got := tt.resourceType.Kind() - if got != tt.want { - t.Errorf("ResourceType(%q).Kind() = %v, want %v", tt.resourceType, got, tt.want) - } - }) - } -} - -func TestResourceTypeConstants(t *testing.T) { - // Verify the constant values are as expected - if ResourceTypeConfigMap != "configmap" { - t.Errorf("ResourceTypeConfigMap = %v, want configmap", ResourceTypeConfigMap) - } - if ResourceTypeSecret != "secret" { - t.Errorf("ResourceTypeSecret = %v, want secret", ResourceTypeSecret) + t.Run( + string(tt.resourceType), func(t *testing.T) { + got := tt.resourceType.Kind() + if got != tt.want { + t.Errorf("ResourceType(%q).Kind() = %v, want %v", tt.resourceType, got, tt.want) + } + }, + ) } } diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index daea21ce1..cd94873fc 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -16,9 +16,11 @@ func TestService_ProcessConfigMap_AutoReload(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation that uses the configmap - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -74,10 +76,11 @@ func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() svc := NewService(cfg) - // Create a deployment with explicit configmap annotation - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "configmap.reloader.stakater.com/reload": "test-cm", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) workloads := []workload.WorkloadAccessor{ workload.NewDeploymentWorkload(deploy), @@ -118,9 +121,11 @@ func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -172,9 +177,11 @@ func TestService_ProcessSecret_AutoReload(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation that uses the secret - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "secret-vol", @@ -226,9 +233,11 @@ func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { svc := NewService(cfg) // Create a deployment with explicit configmap annotation - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "configmap.reloader.stakater.com/reload": "test-cm", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) workloads := []workload.WorkloadAccessor{ workload.NewDeploymentWorkload(deploy), @@ -267,9 +276,11 @@ func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { cfg.ReloadOnDelete = false // Disabled by default svc := NewService(cfg) - deploy := createTestDeployment("test-deploy", "default", map[string]string{ - "configmap.reloader.stakater.com/reload": "test-cm", - }) + deploy := createTestDeployment( + "test-deploy", "default", map[string]string{ + "configmap.reloader.stakater.com/reload": "test-cm", + }, + ) workloads := []workload.WorkloadAccessor{ workload.NewDeploymentWorkload(deploy), @@ -440,9 +451,11 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { svc := NewService(cfg) // Create multiple workloads - deploy1 := createTestDeployment("deploy1", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy1 := createTestDeployment( + "deploy1", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -456,9 +469,11 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { }, } - deploy2 := createTestDeployment("deploy2", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy2 := createTestDeployment( + "deploy2", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -473,9 +488,11 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { } // Deploy3 doesn't use the configmap - deploy3 := createTestDeployment("deploy3", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy3 := createTestDeployment( + "deploy3", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) workloads := []workload.WorkloadAccessor{ workload.NewDeploymentWorkload(deploy1), @@ -521,9 +538,11 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { svc := NewService(cfg) // Create deployments in different namespaces - deploy1 := createTestDeployment("deploy1", "namespace-a", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy1 := createTestDeployment( + "deploy1", "namespace-a", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy1.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -537,9 +556,11 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { }, } - deploy2 := createTestDeployment("deploy2", "namespace-b", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy2 := createTestDeployment( + "deploy2", "namespace-b", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) deploy2.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -624,17 +645,19 @@ func TestService_shouldProcessEvent(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.NewDefault() - cfg.ReloadOnCreate = tt.reloadOnCreate - cfg.ReloadOnDelete = tt.reloadOnDelete - svc := NewService(cfg) - - result := svc.shouldProcessEvent(tt.eventType) - if result != tt.expected { - t.Errorf("shouldProcessEvent(%s) = %v, want %v", tt.eventType, result, tt.expected) - } - }) + t.Run( + tt.name, func(t *testing.T) { + cfg := config.NewDefault() + cfg.ReloadOnCreate = tt.reloadOnCreate + cfg.ReloadOnDelete = tt.reloadOnDelete + svc := NewService(cfg) + + result := svc.shouldProcessEvent(tt.eventType) + if result != tt.expected { + t.Errorf("shouldProcessEvent(%s) = %v, want %v", tt.eventType, result, tt.expected) + } + }, + ) } } @@ -713,12 +736,14 @@ func TestService_findVolumeUsingResource_ConfigMap(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, tt.resourceType) - if got != tt.wantVolume { - t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, tt.resourceType) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }, + ) } } @@ -786,12 +811,14 @@ func TestService_findVolumeUsingResource_Secret(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, ResourceTypeSecret) - if got != tt.wantVolume { - t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) - } - }) + t.Run( + tt.name, func(t *testing.T) { + got := svc.findVolumeUsingResource(tt.volumes, tt.resourceName, ResourceTypeSecret) + if got != tt.wantVolume { + t.Errorf("findVolumeUsingResource() = %q, want %q", got, tt.wantVolume) + } + }, + ) } } @@ -860,20 +887,22 @@ func TestService_findContainerWithVolumeMount(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := svc.findContainerWithVolumeMount(tt.containers, tt.volumeName) - if tt.shouldMatch { - if got == nil { - t.Error("Expected to find a container, got nil") - } else if got.Name != tt.wantName { - t.Errorf("findContainerWithVolumeMount() container name = %q, want %q", got.Name, tt.wantName) + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithVolumeMount(tt.containers, tt.volumeName) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithVolumeMount() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } } - } else { - if got != nil { - t.Errorf("Expected nil, got container %q", got.Name) - } - } - }) + }, + ) } } @@ -882,11 +911,11 @@ func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { svc := NewService(cfg) tests := []struct { - name string - containers []corev1.Container + name string + containers []corev1.Container resourceName string - wantName string - shouldMatch bool + wantName string + shouldMatch bool }{ { name: "container with ConfigMapKeyRef", @@ -935,7 +964,7 @@ func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { Name: "app", Env: []corev1.EnvVar{ { - Name: "SIMPLE_VAR", + Name: "SIMPLE_VAR", Value: "value", }, }, @@ -960,20 +989,22 @@ func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeConfigMap) - if tt.shouldMatch { - if got == nil { - t.Error("Expected to find a container, got nil") - } else if got.Name != tt.wantName { - t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) - } - } else { - if got != nil { - t.Errorf("Expected nil, got container %q", got.Name) + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeConfigMap) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } } - } - }) + }, + ) } } @@ -982,11 +1013,11 @@ func TestService_findContainerWithEnvRef_Secret(t *testing.T) { svc := NewService(cfg) tests := []struct { - name string - containers []corev1.Container + name string + containers []corev1.Container resourceName string - wantName string - shouldMatch bool + wantName string + shouldMatch bool }{ { name: "container with SecretKeyRef", @@ -1047,20 +1078,22 @@ func TestService_findContainerWithEnvRef_Secret(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeSecret) - if tt.shouldMatch { - if got == nil { - t.Error("Expected to find a container, got nil") - } else if got.Name != tt.wantName { - t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) - } - } else { - if got != nil { - t.Errorf("Expected nil, got container %q", got.Name) + t.Run( + tt.name, func(t *testing.T) { + got := svc.findContainerWithEnvRef(tt.containers, tt.resourceName, ResourceTypeSecret) + if tt.shouldMatch { + if got == nil { + t.Error("Expected to find a container, got nil") + } else if got.Name != tt.wantName { + t.Errorf("findContainerWithEnvRef() container name = %q, want %q", got.Name, tt.wantName) + } + } else { + if got != nil { + t.Errorf("Expected nil, got container %q", got.Name) + } } - } - }) + }, + ) } } @@ -1302,9 +1335,11 @@ func TestService_ProcessCreateEventDisabled(t *testing.T) { cfg.ReloadOnCreate = false svc := NewService(cfg) - deploy := createTestDeployment("test", "default", map[string]string{ - "reloader.stakater.com/auto": "true", - }) + deploy := createTestDeployment( + "test", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} cm := &corev1.ConfigMap{ diff --git a/internal/pkg/testutil/rand.go b/internal/pkg/testutil/rand.go new file mode 100644 index 000000000..a20d8ad07 --- /dev/null +++ b/internal/pkg/testutil/rand.go @@ -0,0 +1,21 @@ +package testutil + +import ( + "math/rand" + "time" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz" + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// RandSeq generates a random string of the specified length. +func RandSeq(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go new file mode 100644 index 000000000..9dcde17ed --- /dev/null +++ b/internal/pkg/testutil/testutil.go @@ -0,0 +1,465 @@ +package testutil + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "time" + + "github.com/stakater/Reloader/internal/pkg/config" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +const ( + // ConfigmapResourceType represents ConfigMap resource type + ConfigmapResourceType = "configmap" + // SecretResourceType represents Secret resource type + SecretResourceType = "secret" +) + +// CreateNamespace creates a namespace with the given name. +func CreateNamespace(name string, client kubernetes.Interface) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := client.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + return err +} + +// DeleteNamespace deletes the namespace with the given name. +func DeleteNamespace(name string, client kubernetes.Interface) error { + return client.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateConfigMap creates a ConfigMap with the given name and data. +func CreateConfigMap(client kubernetes.Interface, namespace, name, data string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{ + "url": data, + }, + } + return client.CoreV1().ConfigMaps(namespace).Create(context.Background(), cm, metav1.CreateOptions{}) +} + +// UpdateConfigMap updates the ConfigMap with new label and/or data. +func UpdateConfigMap(cm *corev1.ConfigMap, namespace, name, label, data string) error { + if label != "" { + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels["test-label"] = label + } + if data != "" { + cm.Data["url"] = data + } + // Note: caller must have a client to update + return nil +} + +// UpdateConfigMapWithClient updates the ConfigMap with new label and/or data. +func UpdateConfigMapWithClient(client kubernetes.Interface, namespace, name, label, data string) error { + ctx := context.Background() + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if label != "" { + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels["test-label"] = label + } + if data != "" { + cm.Data["url"] = data + } + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// DeleteConfigMap deletes the ConfigMap with the given name. +func DeleteConfigMap(client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateSecret creates a Secret with the given name and data. +func CreateSecret(client kubernetes.Interface, namespace, name, data string) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + "password": []byte(data), + }, + } + return client.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) +} + +// UpdateSecretWithClient updates the Secret with new label and/or data. +func UpdateSecretWithClient(client kubernetes.Interface, namespace, name, label, data string) error { + ctx := context.Background() + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if label != "" { + if secret.Labels == nil { + secret.Labels = make(map[string]string) + } + secret.Labels["test-label"] = label + } + if data != "" { + secret.Data["password"] = []byte(data) + } + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// DeleteSecret deletes the Secret with the given name. +func DeleteSecret(client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().Secrets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateDeployment creates a Deployment that references a ConfigMap/Secret. +func CreateDeployment(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.Deployment, error) { + replicas := int32(1) + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox:1.36", + Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, + }, + }, + }, + }, + }, + } + + if useConfigMap { + deployment.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } else { + deployment.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } + + return client.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) +} + +// DeleteDeployment deletes the Deployment with the given name. +func DeleteDeployment(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().Deployments(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateDaemonSet creates a DaemonSet that references a ConfigMap/Secret. +func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.DaemonSet, error) { + daemonset := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox:1.36", + Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, + }, + }, + }, + }, + }, + } + + if useConfigMap { + daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } else { + daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } + + return client.AppsV1().DaemonSets(namespace).Create(context.Background(), daemonset, metav1.CreateOptions{}) +} + +// DeleteDaemonSet deletes the DaemonSet with the given name. +func DeleteDaemonSet(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().DaemonSets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateStatefulSet creates a StatefulSet that references a ConfigMap/Secret. +func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.StatefulSet, error) { + replicas := int32(1) + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + ServiceName: name, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox:1.36", + Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, + }, + }, + }, + }, + }, + } + + if useConfigMap { + statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } else { + statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } + + return client.AppsV1().StatefulSets(namespace).Create(context.Background(), statefulset, metav1.CreateOptions{}) +} + +// DeleteStatefulSet deletes the StatefulSet with the given name. +func DeleteStatefulSet(client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().StatefulSets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// CreateCronJob creates a CronJob that references a ConfigMap/Secret. +func CreateCronJob(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*batchv1.CronJob, error) { + cronjob := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*/5 * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox:1.36", + Command: []string{"sh", "-c", "echo hello"}, + }, + }, + }, + }, + }, + }, + }, + } + + if useConfigMap { + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } else { + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + } + + return client.BatchV1().CronJobs(namespace).Create(context.Background(), cronjob, metav1.CreateOptions{}) +} + +// DeleteCronJob deletes the CronJob with the given name. +func DeleteCronJob(client kubernetes.Interface, namespace, name string) error { + return client.BatchV1().CronJobs(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// ConvertResourceToSHA converts a resource data to SHA256 hash. +func ConvertResourceToSHA(resourceType, namespace, name, data string) string { + content := fmt.Sprintf("%s/%s/%s:%s", resourceType, namespace, name, data) + hash := sha256.Sum256([]byte(content)) + return base64.StdEncoding.EncodeToString(hash[:]) +} + +// WaitForDeploymentAnnotation waits for a deployment to have the specified annotation value. +func WaitForDeploymentAnnotation(client kubernetes.Interface, namespace, name, annotation, expectedValue string, timeout time.Duration) error { + return wait.PollImmediate(time.Second, timeout, func() (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + value, ok := deployment.Spec.Template.Annotations[annotation] + if !ok { + return false, nil // Keep waiting + } + return value == expectedValue, nil + }) +} + +// WaitForDeploymentReloadedAnnotation waits for a deployment to have any reloaded annotation. +func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if deployment.Spec.Template.Annotations != nil { + if _, ok := deployment.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { + found = true + return true, nil + } + } + return false, nil + }) + if err == wait.ErrWaitTimeout { + return found, nil + } + return found, err +} + +// WaitForDaemonSetReloadedAnnotation waits for a daemonset to have any reloaded annotation. +func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if daemonset.Spec.Template.Annotations != nil { + if _, ok := daemonset.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { + found = true + return true, nil + } + } + return false, nil + }) + if err == wait.ErrWaitTimeout { + return found, nil + } + return found, err +} + +// WaitForStatefulSetReloadedAnnotation waits for a statefulset to have any reloaded annotation. +func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + // Check for the last-reloaded-from annotation in pod template + if statefulset.Spec.Template.Annotations != nil { + if _, ok := statefulset.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { + found = true + return true, nil + } + } + return false, nil + }) + if err == wait.ErrWaitTimeout { + return found, nil + } + return found, err +} diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 920a7eabd..696b11c55 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -84,18 +84,18 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { // kindAliases maps string representations to Kind constants. // Supports lowercase, title case, and plural forms for user convenience. var kindAliases = map[string]Kind{ - "deployment": KindDeployment, - "deployments": KindDeployment, - "daemonset": KindDaemonSet, - "daemonsets": KindDaemonSet, - "statefulset": KindStatefulSet, + "deployment": KindDeployment, + "deployments": KindDeployment, + "daemonset": KindDaemonSet, + "daemonsets": KindDaemonSet, + "statefulset": KindStatefulSet, "statefulsets": KindStatefulSet, - "rollout": KindArgoRollout, - "rollouts": KindArgoRollout, - "job": KindJob, - "jobs": KindJob, - "cronjob": KindCronJob, - "cronjobs": KindCronJob, + "rollout": KindArgoRollout, + "rollouts": KindArgoRollout, + "job": KindJob, + "jobs": KindJob, + "cronjob": KindCronJob, + "cronjobs": KindCronJob, } // KindFromString converts a string to a Kind. diff --git a/internal/pkg/workload/registry_test.go b/internal/pkg/workload/registry_test.go index 0bb47d14a..4ebfb870c 100644 --- a/internal/pkg/workload/registry_test.go +++ b/internal/pkg/workload/registry_test.go @@ -61,8 +61,8 @@ func TestRegistry_ListerFor_AllKinds(t *testing.T) { r := NewRegistry(true) tests := []struct { - kind Kind - wantNil bool + kind Kind + wantNil bool }{ {KindDeployment, false}, {KindDaemonSet, false}, diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 000000000..32106d49f --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,519 @@ +// Package e2e contains end-to-end tests for Reloader. +// These tests run against a real Kubernetes cluster (or envtest). +// +// To run these tests against a real cluster: +// +// KUBECONFIG=~/.kube/config go test -v ./test/e2e/... -count=1 +// +// To skip these tests when running unit tests: +// +// go test -v ./... -short +package e2e + +import ( + "context" + "flag" + "log" + "os" + "testing" + "time" + + "github.com/go-logr/zerologr" + "github.com/rs/zerolog" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/testutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + testNamespacePrefix = "test-reloader-e2e-" + waitTimeout = 30 * time.Second + setupDelay = 2 * time.Second + negativeTestTimeout = 5 * time.Second +) + +var ( + k8sClient kubernetes.Interface + cfg *config.Config + namespace string + skipE2ETests bool + cancelManager context.CancelFunc + restCfg *rest.Config +) + +// testFixture provides a clean way to set up and tear down test resources. +type testFixture struct { + t *testing.T + name string + configMaps []string + secrets []string + workloads []workloadInfo +} + +type workloadInfo struct { + name string + kind string // "deployment", "daemonset", "statefulset" +} + +// newFixture creates a new test fixture with a unique name prefix. +func newFixture(t *testing.T, prefix string) *testFixture { + t.Helper() + skipIfNoCluster(t) + return &testFixture{ + t: t, + name: prefix + "-" + testutil.RandSeq(5), + } +} + +// createConfigMap creates a ConfigMap and registers it for cleanup. +func (f *testFixture) createConfigMap(name, data string) { + f.t.Helper() + _, err := testutil.CreateConfigMap(k8sClient, namespace, name, data) + if err != nil { + f.t.Fatalf("Failed to create ConfigMap %s: %v", name, err) + } + f.configMaps = append(f.configMaps, name) +} + +// createSecret creates a Secret and registers it for cleanup. +func (f *testFixture) createSecret(name, data string) { + f.t.Helper() + _, err := testutil.CreateSecret(k8sClient, namespace, name, data) + if err != nil { + f.t.Fatalf("Failed to create Secret %s: %v", name, err) + } + f.secrets = append(f.secrets, name) +} + +// createDeployment creates a Deployment and registers it for cleanup. +func (f *testFixture) createDeployment(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateDeployment(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create Deployment %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deployment"}) +} + +// createDaemonSet creates a DaemonSet and registers it for cleanup. +func (f *testFixture) createDaemonSet(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateDaemonSet(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create DaemonSet %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "daemonset"}) +} + +// createStatefulSet creates a StatefulSet and registers it for cleanup. +func (f *testFixture) createStatefulSet(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateStatefulSet(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create StatefulSet %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "statefulset"}) +} + +// waitForReady waits for all workloads to be ready. +func (f *testFixture) waitForReady() { + time.Sleep(setupDelay) +} + +// updateConfigMap updates a ConfigMap's data. +func (f *testFixture) updateConfigMap(name, data string) { + f.t.Helper() + if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, "", data); err != nil { + f.t.Fatalf("Failed to update ConfigMap %s: %v", name, err) + } +} + +// updateConfigMapLabel updates only a ConfigMap's label (not data). +func (f *testFixture) updateConfigMapLabel(name, label string) { + f.t.Helper() + // Get current data first + cm, err := k8sClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + f.t.Fatalf("Failed to get ConfigMap %s: %v", name, err) + } + data := cm.Data["url"] + if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, label, data); err != nil { + f.t.Fatalf("Failed to update ConfigMap label %s: %v", name, err) + } +} + +// updateSecret updates a Secret's data. +func (f *testFixture) updateSecret(name, data string) { + f.t.Helper() + if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, "", data); err != nil { + f.t.Fatalf("Failed to update Secret %s: %v", name, err) + } +} + +// assertDeploymentReloaded asserts that a deployment was reloaded. +func (f *testFixture) assertDeploymentReloaded(name string, testCfg *config.Config) { + f.t.Helper() + if testCfg == nil { + testCfg = cfg + } + updated, err := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for deployment %s update: %v", name, err) + } + if !updated { + f.t.Errorf("Deployment %s was not updated after resource change", name) + } +} + +// assertDeploymentNotReloaded asserts that a deployment was NOT reloaded. +func (f *testFixture) assertDeploymentNotReloaded(name string, testCfg *config.Config) { + f.t.Helper() + if testCfg == nil { + testCfg = cfg + } + time.Sleep(negativeTestTimeout) + updated, _ := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg, negativeTestTimeout) + if updated { + f.t.Errorf("Deployment %s should not have been updated", name) + } +} + +// assertDaemonSetReloaded asserts that a daemonset was reloaded. +func (f *testFixture) assertDaemonSetReloaded(name string) { + f.t.Helper() + updated, err := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for daemonset %s update: %v", name, err) + } + if !updated { + f.t.Errorf("DaemonSet %s was not updated after resource change", name) + } +} + +// assertStatefulSetReloaded asserts that a statefulset was reloaded. +func (f *testFixture) assertStatefulSetReloaded(name string) { + f.t.Helper() + updated, err := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for statefulset %s update: %v", name, err) + } + if !updated { + f.t.Errorf("StatefulSet %s was not updated after resource change", name) + } +} + +// cleanup removes all created resources. +func (f *testFixture) cleanup() { + for _, w := range f.workloads { + switch w.kind { + case "deployment": + _ = testutil.DeleteDeployment(k8sClient, namespace, w.name) + case "daemonset": + _ = testutil.DeleteDaemonSet(k8sClient, namespace, w.name) + case "statefulset": + _ = testutil.DeleteStatefulSet(k8sClient, namespace, w.name) + } + } + for _, name := range f.configMaps { + _ = testutil.DeleteConfigMap(k8sClient, namespace, name) + } + for _, name := range f.secrets { + _ = testutil.DeleteSecret(k8sClient, namespace, name) + } +} + +func TestMain(m *testing.M) { + flag.Parse() + + if testing.Short() { + os.Exit(0) + } + + // Set up zerolog as the controller-runtime logger + zl := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}). + Level(zerolog.WarnLevel). + With(). + Timestamp(). + Logger() + ctrllog.SetLogger(zerologr.New(&zl)) + + kubeconfig := os.Getenv("KUBECONFIG") + if kubeconfig == "" { + kubeconfig = os.Getenv("HOME") + "/.kube/config" + } + + var err error + restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + skipE2ETests = true + os.Exit(0) + } + + k8sClient, err = kubernetes.NewForConfig(restCfg) + if err != nil { + skipE2ETests = true + os.Exit(0) + } + + if _, err = k8sClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}); err != nil { + skipE2ETests = true + os.Exit(0) + } + + namespace = testNamespacePrefix + testutil.RandSeq(5) + if err := testutil.CreateNamespace(namespace, k8sClient); err != nil { + panic(err) + } + + cfg = config.NewDefault() + cfg.AutoReloadAll = false + + _, cancelManager = startManagerWithConfig(cfg, restCfg) + + code := m.Run() + + if cancelManager != nil { + cancelManager() + time.Sleep(2 * time.Second) + } + + _ = testutil.DeleteNamespace(namespace, k8sClient) + os.Exit(code) +} + +func skipIfNoCluster(t *testing.T) { + if skipE2ETests { + t.Skip("Skipping e2e test: no Kubernetes cluster available") + } +} + +// TestConfigMapUpdate tests that updating a ConfigMap triggers a workload reload. +func TestConfigMapUpdate(t *testing.T) { + f := newFixture(t, "cm-update") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment(f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestSecretUpdate tests that updating a Secret triggers a workload reload. +func TestSecretUpdate(t *testing.T) { + f := newFixture(t, "secret-update") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDeployment(f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestAutoReloadAll tests the auto-reload-all feature. +func TestAutoReloadAll(t *testing.T) { + f := newFixture(t, "auto-reload") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment(f.name, true, map[string]string{ + cfg.Annotations.Auto: "true", + }) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestDaemonSetReload tests that DaemonSets are reloaded when ConfigMaps change. +func TestDaemonSetReload(t *testing.T) { + f := newFixture(t, "ds-reload") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDaemonSet(f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDaemonSetReloaded(f.name) +} + +// TestStatefulSetReload tests that StatefulSets are reloaded when Secrets change. +func TestStatefulSetReload(t *testing.T) { + f := newFixture(t, "sts-reload") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createStatefulSet(f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertStatefulSetReloaded(f.name) +} + +// TestLabelOnlyChange tests that label-only changes don't trigger reloads. +func TestLabelOnlyChange(t *testing.T) { + f := newFixture(t, "label-only") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment(f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }) + f.waitForReady() + + f.updateConfigMapLabel(f.name, "new-label") + f.assertDeploymentNotReloaded(f.name, nil) +} + +// TestMultipleConfigMaps tests watching multiple ConfigMaps in a single annotation. +func TestMultipleConfigMaps(t *testing.T) { + f := newFixture(t, "multi-cm") + defer f.cleanup() + + cm1 := f.name + "-a" + cm2 := f.name + "-b" + + f.createConfigMap(cm1, "data-a") + f.createConfigMap(cm2, "data-b") + f.createDeployment(f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: cm1 + "," + cm2, + }) + f.waitForReady() + + f.updateConfigMap(cm1, "updated-data-a") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestAutoAnnotationDisabled tests that auto: "false" disables auto-reload. +func TestAutoAnnotationDisabled(t *testing.T) { + f := newFixture(t, "auto-disabled") + defer f.cleanup() + + testCfg := config.NewDefault() + testCfg.AutoReloadAll = true + + f.createConfigMap(f.name, "initial-data") + f.createDeployment(f.name, true, map[string]string{ + testCfg.Annotations.Auto: "false", + }) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentNotReloaded(f.name, testCfg) +} + +// TestAutoWithExplicitConfigMapAnnotation tests that a deployment with auto=true +// also reloads when an explicitly annotated (non-referenced) ConfigMap changes. +func TestAutoWithExplicitConfigMapAnnotation(t *testing.T) { + f := newFixture(t, "auto-explicit-cm") + defer f.cleanup() + + referencedCM := f.name + "-ref" + explicitCM := f.name + "-explicit" + + f.createConfigMap(referencedCM, "referenced-data") + f.createConfigMap(explicitCM, "explicit-data") + f.createDeployment(referencedCM, true, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.ConfigmapReload: explicitCM, + }) + f.waitForReady() + + // Update the EXPLICIT ConfigMap (not the referenced one) + f.updateConfigMap(explicitCM, "updated-explicit-data") + f.assertDeploymentReloaded(referencedCM, nil) +} + +// TestAutoWithExplicitSecretAnnotation tests that a deployment with auto=true +// also reloads when an explicitly annotated (non-referenced) Secret changes. +func TestAutoWithExplicitSecretAnnotation(t *testing.T) { + f := newFixture(t, "auto-explicit-secret") + defer f.cleanup() + + referencedSecret := f.name + "-ref" + explicitSecret := f.name + "-explicit" + + f.createSecret(referencedSecret, "referenced-secret") + f.createSecret(explicitSecret, "explicit-secret") + f.createDeployment(referencedSecret, false, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.SecretReload: explicitSecret, + }) + f.waitForReady() + + // Update the EXPLICIT Secret (not the referenced one) + f.updateSecret(explicitSecret, "updated-explicit-secret") + f.assertDeploymentReloaded(referencedSecret, nil) +} + +// TestAutoWithBothExplicitAndReferencedChange tests that auto + explicit annotations +// work correctly when the referenced resource changes. +func TestAutoWithBothExplicitAndReferencedChange(t *testing.T) { + f := newFixture(t, "auto-both") + defer f.cleanup() + + referencedCM := f.name + "-ref" + explicitCM := f.name + "-explicit" + + f.createConfigMap(referencedCM, "referenced-data") + f.createConfigMap(explicitCM, "explicit-data") + f.createDeployment(referencedCM, true, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.ConfigmapReload: explicitCM, + }) + f.waitForReady() + + // Update the REFERENCED ConfigMap - should trigger reload via auto + f.updateConfigMap(referencedCM, "updated-referenced-data") + f.assertDeploymentReloaded(referencedCM, nil) +} + +// startManagerWithConfig creates and starts a controller-runtime manager for e2e testing. +func startManagerWithConfig(cfg *config.Config, restConfig *rest.Config) (manager.Manager, context.CancelFunc) { + collectors := metrics.NewCollectors() + mgr, err := controller.NewManagerWithRestConfig( + controller.ManagerOptions{ + Config: cfg, + Log: ctrl.Log.WithName("test-manager"), + Collectors: &collectors, + }, restConfig, + ) + if err != nil { + log.Fatalf("Failed to create manager: %v", err) + } + + if err := controller.SetupReconcilers(mgr, cfg, ctrl.Log.WithName("test-reconcilers"), &collectors); err != nil { + log.Fatalf("Failed to setup reconcilers: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + if err := controller.RunManager(ctx, mgr, ctrl.Log.WithName("test-runner")); err != nil { + log.Printf("Manager exited: %v", err) + } + }() + + time.Sleep(3 * time.Second) + return mgr, cancel +} From ced6ffabd122d001c86398374094e1b53c636afe Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:47:59 +0100 Subject: [PATCH 20/35] refactor: Move test helpers to testutil --- .../controller/configmap_reconciler_test.go | 53 +-- .../controller/namespace_reconciler_test.go | 9 +- internal/pkg/controller/retry_test.go | 21 +- .../pkg/controller/secret_reconciler_test.go | 57 +-- internal/pkg/controller/test_helpers_test.go | 334 +----------------- internal/pkg/reload/service_test.go | 82 ++--- internal/pkg/testutil/fixtures.go | 286 +++++++++++++++ internal/pkg/testutil/testutil.go | 174 ++------- internal/pkg/workload/registry_test.go | 4 - test/e2e/e2e_test.go | 107 +++--- 10 files changed, 486 insertions(+), 641 deletions(-) create mode 100644 internal/pkg/testutil/fixtures.go diff --git a/internal/pkg/controller/configmap_reconciler_test.go b/internal/pkg/controller/configmap_reconciler_test.go index ad457d987..1b1140577 100644 --- a/internal/pkg/controller/configmap_reconciler_test.go +++ b/internal/pkg/controller/configmap_reconciler_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" ) func TestConfigMapReconciler_NotFound(t *testing.T) { @@ -16,7 +17,7 @@ func TestConfigMapReconciler_NotFound_ReloadOnDelete(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = true - deployment := testDeployment("test-deployment", "default", map[string]string{ + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "deleted-cm", }) reconciler := newConfigMapReconciler(t, cfg, deployment) @@ -27,7 +28,7 @@ func TestConfigMapReconciler_IgnoredNamespace(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredNamespaces = []string{"kube-system"} - cm := testConfigMap("test-cm", "kube-system") + cm := testutil.NewConfigMap("test-cm", "kube-system") reconciler := newConfigMapReconciler(t, cfg, cm) assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "kube-system")) } @@ -35,8 +36,8 @@ func TestConfigMapReconciler_IgnoredNamespace(t *testing.T) { func TestConfigMapReconciler_NoMatchingWorkloads(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("test-cm", "default") - deployment := testDeployment("test-deployment", "default", nil) + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", nil) reconciler := newConfigMapReconciler(t, cfg, cm, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } @@ -45,8 +46,8 @@ func TestConfigMapReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := testConfigMap("test-cm", "default") - deployment := testDeploymentWithEnvFrom("test-deployment", "default", "test-cm", "") + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "test-cm", "") reconciler := newConfigMapReconciler(t, cfg, cm, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("test-cm", "default")) } @@ -54,8 +55,8 @@ func TestConfigMapReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { func TestConfigMapReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("test-cm", "default") - deployment := testDeployment("test-deployment", "default", map[string]string{ + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "test-cm", }) reconciler := newConfigMapReconciler(t, cfg, cm, deployment) @@ -65,8 +66,8 @@ func TestConfigMapReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) func TestConfigMapReconciler_WorkloadInDifferentNamespace(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("test-cm", "namespace-a") - deployment := testDeployment("test-deployment", "namespace-b", map[string]string{ + cm := testutil.NewConfigMap("test-cm", "namespace-a") + deployment := testutil.NewDeployment("test-deployment", "namespace-b", map[string]string{ cfg.Annotations.ConfigmapReload: "test-cm", }) reconciler := newConfigMapReconciler(t, cfg, cm, deployment) @@ -77,8 +78,8 @@ func TestConfigMapReconciler_IgnoredWorkloadType(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredWorkloads = []string{"deployment"} - cm := testConfigMap("test-cm", "default") - deployment := testDeployment("test-deployment", "default", map[string]string{ + cm := testutil.NewConfigMap("test-cm", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "test-cm", }) reconciler := newConfigMapReconciler(t, cfg, cm, deployment) @@ -88,8 +89,8 @@ func TestConfigMapReconciler_IgnoredWorkloadType(t *testing.T) { func TestConfigMapReconciler_DaemonSet(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("test-cm", "default") - daemonset := testDaemonSet("test-daemonset", "default", map[string]string{ + cm := testutil.NewConfigMap("test-cm", "default") + daemonset := testutil.NewDaemonSet("test-daemonset", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "test-cm", }) reconciler := newConfigMapReconciler(t, cfg, cm, daemonset) @@ -99,8 +100,8 @@ func TestConfigMapReconciler_DaemonSet(t *testing.T) { func TestConfigMapReconciler_StatefulSet(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("test-cm", "default") - statefulset := testStatefulSet("test-statefulset", "default", map[string]string{ + cm := testutil.NewConfigMap("test-cm", "default") + statefulset := testutil.NewStatefulSet("test-statefulset", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "test-cm", }) reconciler := newConfigMapReconciler(t, cfg, cm, statefulset) @@ -110,14 +111,14 @@ func TestConfigMapReconciler_StatefulSet(t *testing.T) { func TestConfigMapReconciler_MultipleWorkloads(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMap("shared-cm", "default") - deployment1 := testDeployment("deployment-1", "default", map[string]string{ + cm := testutil.NewConfigMap("shared-cm", "default") + deployment1 := testutil.NewDeployment("deployment-1", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "shared-cm", }) - deployment2 := testDeployment("deployment-2", "default", map[string]string{ + deployment2 := testutil.NewDeployment("deployment-2", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "shared-cm", }) - daemonset := testDaemonSet("daemonset-1", "default", map[string]string{ + daemonset := testutil.NewDaemonSet("daemonset-1", "default", map[string]string{ cfg.Annotations.ConfigmapReload: "shared-cm", }) @@ -129,8 +130,8 @@ func TestConfigMapReconciler_VolumeMount(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := testConfigMap("volume-cm", "default") - deployment := testDeploymentWithVolume("test-deployment", "default", "volume-cm", "") + cm := testutil.NewConfigMap("volume-cm", "default") + deployment := testutil.NewDeploymentWithVolume("test-deployment", "default", "volume-cm", "") reconciler := newConfigMapReconciler(t, cfg, cm, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("volume-cm", "default")) } @@ -139,8 +140,8 @@ func TestConfigMapReconciler_ProjectedVolume(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - cm := testConfigMap("projected-cm", "default") - deployment := testDeploymentWithProjectedVolume("test-deployment", "default", "projected-cm", "") + cm := testutil.NewConfigMap("projected-cm", "default") + deployment := testutil.NewDeploymentWithProjectedVolume("test-deployment", "default", "projected-cm", "") reconciler := newConfigMapReconciler(t, cfg, cm, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("projected-cm", "default")) } @@ -148,10 +149,10 @@ func TestConfigMapReconciler_ProjectedVolume(t *testing.T) { func TestConfigMapReconciler_SearchAnnotation(t *testing.T) { cfg := config.NewDefault() - cm := testConfigMapWithAnnotations("test-cm", "default", map[string]string{ + cm := testutil.NewConfigMapWithAnnotations("test-cm", "default", map[string]string{ cfg.Annotations.Match: "true", }) - deployment := testDeployment("test-deployment", "default", map[string]string{ + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.Search: "true", }) reconciler := newConfigMapReconciler(t, cfg, cm, deployment) diff --git a/internal/pkg/controller/namespace_reconciler_test.go b/internal/pkg/controller/namespace_reconciler_test.go index 3d4fcc657..516b58161 100644 --- a/internal/pkg/controller/namespace_reconciler_test.go +++ b/internal/pkg/controller/namespace_reconciler_test.go @@ -5,6 +5,7 @@ import ( "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/testutil" "k8s.io/apimachinery/pkg/labels" ) @@ -70,7 +71,7 @@ func TestNamespaceReconciler_Add(t *testing.T) { cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - ns := testNamespace("test-ns", map[string]string{"env": "production"}) + ns := testutil.NewNamespace("test-ns", map[string]string{"env": "production"}) reconciler := newNamespaceReconciler(t, cfg, cache, ns) assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) @@ -88,7 +89,7 @@ func TestNamespaceReconciler_Remove_LabelChange(t *testing.T) { cache := controller.NewNamespaceCache(true) cache.Add("test-ns") // Pre-populate - ns := testNamespace("test-ns", map[string]string{"env": "staging"}) // Non-matching + ns := testutil.NewNamespace("test-ns", map[string]string{"env": "staging"}) // Non-matching reconciler := newNamespaceReconciler(t, cfg, cache, ns) assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) @@ -122,7 +123,7 @@ func TestNamespaceReconciler_MultipleSelectors(t *testing.T) { cfg.NamespaceSelectors = []labels.Selector{selector1, selector2} cache := controller.NewNamespaceCache(true) - ns := testNamespace("test-ns", map[string]string{"team": "platform"}) + ns := testutil.NewNamespace("test-ns", map[string]string{"team": "platform"}) reconciler := newNamespaceReconciler(t, cfg, cache, ns) assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) @@ -138,7 +139,7 @@ func TestNamespaceReconciler_NoLabels(t *testing.T) { cfg.NamespaceSelectors = []labels.Selector{selector} cache := controller.NewNamespaceCache(true) - ns := testNamespace("test-ns", nil) // No labels + ns := testutil.NewNamespace("test-ns", nil) // No labels reconciler := newNamespaceReconciler(t, cfg, cache, ns) assertReconcileSuccess(t, reconciler, namespaceRequest("test-ns")) diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go index a6237586c..b62bc7a4f 100644 --- a/internal/pkg/controller/retry_test.go +++ b/internal/pkg/controller/retry_test.go @@ -7,6 +7,7 @@ import ( "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -27,7 +28,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { }{ { name: "Deployment", - object: testDeployment("test-deployment", "default", nil), + object: testutil.NewDeployment("test-deployment", "default", nil), workload: func(o runtime.Object) workload.WorkloadAccessor { return workload.NewDeploymentWorkload(o.(*appsv1.Deployment)) }, @@ -44,7 +45,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { }, { name: "DaemonSet", - object: testDaemonSet("test-daemonset", "default", nil), + object: testutil.NewDaemonSet("test-daemonset", "default", nil), workload: func(o runtime.Object) workload.WorkloadAccessor { return workload.NewDaemonSetWorkload(o.(*appsv1.DaemonSet)) }, @@ -61,7 +62,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { }, { name: "StatefulSet", - object: testStatefulSet("test-statefulset", "default", nil), + object: testutil.NewStatefulSet("test-statefulset", "default", nil), workload: func(o runtime.Object) workload.WorkloadAccessor { return workload.NewStatefulSetWorkload(o.(*appsv1.StatefulSet)) }, @@ -78,7 +79,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { }, { name: "Job", - object: testJob("test-job", "default"), + object: testutil.NewJob("test-job", "default"), workload: func(o runtime.Object) workload.WorkloadAccessor { return workload.NewJobWorkload(o.(*batchv1.Job)) }, @@ -95,7 +96,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { }, { name: "CronJob", - object: testCronJob("test-cronjob", "default"), + object: testutil.NewCronJob("test-cronjob", "default"), workload: func(o runtime.Object) workload.WorkloadAccessor { return workload.NewCronJobWorkload(o.(*batchv1.CronJob)) }, @@ -122,7 +123,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { reloadService := reload.NewService(cfg) fakeClient := fake.NewClientBuilder(). - WithScheme(testScheme()). + WithScheme(testutil.NewScheme()). WithRuntimeObjects(tt.object). Build() @@ -201,9 +202,9 @@ func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { cfg.ReloadStrategy = tt.strategy reloadService := reload.NewService(cfg) - deployment := testDeployment("test-deployment", "default", nil) + deployment := testutil.NewDeployment("test-deployment", "default", nil) fakeClient := fake.NewClientBuilder(). - WithScheme(testScheme()). + WithScheme(testutil.NewScheme()). WithObjects(deployment). Build() @@ -245,7 +246,7 @@ func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { cfg := config.NewDefault() reloadService := reload.NewService(cfg) - deployment := testDeployment("test-deployment", "default", nil) + deployment := testutil.NewDeployment("test-deployment", "default", nil) deployment.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ { Name: "STAKATER_TEST_CM_CONFIGMAP", @@ -254,7 +255,7 @@ func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { } fakeClient := fake.NewClientBuilder(). - WithScheme(testScheme()). + WithScheme(testutil.NewScheme()). WithObjects(deployment). Build() diff --git a/internal/pkg/controller/secret_reconciler_test.go b/internal/pkg/controller/secret_reconciler_test.go index 11a879b46..f55e84a80 100644 --- a/internal/pkg/controller/secret_reconciler_test.go +++ b/internal/pkg/controller/secret_reconciler_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" ) func TestSecretReconciler_NotFound(t *testing.T) { @@ -16,7 +17,7 @@ func TestSecretReconciler_NotFound_ReloadOnDelete(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = true - deployment := testDeployment("test-deployment", "default", map[string]string{ + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.SecretReload: "deleted-secret", }) reconciler := newSecretReconciler(t, cfg, deployment) @@ -27,7 +28,7 @@ func TestSecretReconciler_IgnoredNamespace(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredNamespaces = []string{"kube-system"} - secret := testSecret("test-secret", "kube-system") + secret := testutil.NewSecret("test-secret", "kube-system") reconciler := newSecretReconciler(t, cfg, secret) assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "kube-system")) } @@ -35,8 +36,8 @@ func TestSecretReconciler_IgnoredNamespace(t *testing.T) { func TestSecretReconciler_NoMatchingWorkloads(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("test-secret", "default") - deployment := testDeployment("test-deployment", "default", nil) + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", nil) reconciler := newSecretReconciler(t, cfg, secret, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } @@ -45,8 +46,8 @@ func TestSecretReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - secret := testSecret("test-secret", "default") - deployment := testDeploymentWithEnvFrom("test-deployment", "default", "", "test-secret") + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "", "test-secret") reconciler := newSecretReconciler(t, cfg, secret, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("test-secret", "default")) } @@ -54,8 +55,8 @@ func TestSecretReconciler_MatchingDeployment_AutoAnnotation(t *testing.T) { func TestSecretReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("test-secret", "default") - deployment := testDeployment("test-deployment", "default", map[string]string{ + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.SecretReload: "test-secret", }) reconciler := newSecretReconciler(t, cfg, secret, deployment) @@ -65,8 +66,8 @@ func TestSecretReconciler_MatchingDeployment_ExplicitAnnotation(t *testing.T) { func TestSecretReconciler_WorkloadInDifferentNamespace(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("test-secret", "namespace-a") - deployment := testDeployment("test-deployment", "namespace-b", map[string]string{ + secret := testutil.NewSecret("test-secret", "namespace-a") + deployment := testutil.NewDeployment("test-deployment", "namespace-b", map[string]string{ cfg.Annotations.SecretReload: "test-secret", }) reconciler := newSecretReconciler(t, cfg, secret, deployment) @@ -77,8 +78,8 @@ func TestSecretReconciler_IgnoredWorkloadType(t *testing.T) { cfg := config.NewDefault() cfg.IgnoredWorkloads = []string{"deployment"} - secret := testSecret("test-secret", "default") - deployment := testDeployment("test-deployment", "default", map[string]string{ + secret := testutil.NewSecret("test-secret", "default") + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.SecretReload: "test-secret", }) reconciler := newSecretReconciler(t, cfg, secret, deployment) @@ -88,8 +89,8 @@ func TestSecretReconciler_IgnoredWorkloadType(t *testing.T) { func TestSecretReconciler_DaemonSet(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("test-secret", "default") - daemonset := testDaemonSet("test-daemonset", "default", map[string]string{ + secret := testutil.NewSecret("test-secret", "default") + daemonset := testutil.NewDaemonSet("test-daemonset", "default", map[string]string{ cfg.Annotations.SecretReload: "test-secret", }) reconciler := newSecretReconciler(t, cfg, secret, daemonset) @@ -99,8 +100,8 @@ func TestSecretReconciler_DaemonSet(t *testing.T) { func TestSecretReconciler_StatefulSet(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("test-secret", "default") - statefulset := testStatefulSet("test-statefulset", "default", map[string]string{ + secret := testutil.NewSecret("test-secret", "default") + statefulset := testutil.NewStatefulSet("test-statefulset", "default", map[string]string{ cfg.Annotations.SecretReload: "test-secret", }) reconciler := newSecretReconciler(t, cfg, secret, statefulset) @@ -110,14 +111,14 @@ func TestSecretReconciler_StatefulSet(t *testing.T) { func TestSecretReconciler_MultipleWorkloads(t *testing.T) { cfg := config.NewDefault() - secret := testSecret("shared-secret", "default") - deployment1 := testDeployment("deployment-1", "default", map[string]string{ + secret := testutil.NewSecret("shared-secret", "default") + deployment1 := testutil.NewDeployment("deployment-1", "default", map[string]string{ cfg.Annotations.SecretReload: "shared-secret", }) - deployment2 := testDeployment("deployment-2", "default", map[string]string{ + deployment2 := testutil.NewDeployment("deployment-2", "default", map[string]string{ cfg.Annotations.SecretReload: "shared-secret", }) - daemonset := testDaemonSet("daemonset-1", "default", map[string]string{ + daemonset := testutil.NewDaemonSet("daemonset-1", "default", map[string]string{ cfg.Annotations.SecretReload: "shared-secret", }) @@ -129,8 +130,8 @@ func TestSecretReconciler_VolumeMount(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - secret := testSecret("volume-secret", "default") - deployment := testDeploymentWithVolume("test-deployment", "default", "", "volume-secret") + secret := testutil.NewSecret("volume-secret", "default") + deployment := testutil.NewDeploymentWithVolume("test-deployment", "default", "", "volume-secret") reconciler := newSecretReconciler(t, cfg, secret, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("volume-secret", "default")) } @@ -139,8 +140,8 @@ func TestSecretReconciler_ProjectedVolume(t *testing.T) { cfg := config.NewDefault() cfg.AutoReloadAll = true - secret := testSecret("projected-secret", "default") - deployment := testDeploymentWithProjectedVolume("test-deployment", "default", "", "projected-secret") + secret := testutil.NewSecret("projected-secret", "default") + deployment := testutil.NewDeploymentWithProjectedVolume("test-deployment", "default", "", "projected-secret") reconciler := newSecretReconciler(t, cfg, secret, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("projected-secret", "default")) } @@ -148,10 +149,10 @@ func TestSecretReconciler_ProjectedVolume(t *testing.T) { func TestSecretReconciler_SearchAnnotation(t *testing.T) { cfg := config.NewDefault() - secret := testSecretWithAnnotations("test-secret", "default", map[string]string{ + secret := testutil.NewSecretWithAnnotations("test-secret", "default", map[string]string{ cfg.Annotations.Match: "true", }) - deployment := testDeployment("test-deployment", "default", map[string]string{ + deployment := testutil.NewDeployment("test-deployment", "default", map[string]string{ cfg.Annotations.Search: "true", }) reconciler := newSecretReconciler(t, cfg, secret, deployment) @@ -163,10 +164,10 @@ func TestSecretReconciler_ServiceAccountTokenIgnored(t *testing.T) { cfg.AutoReloadAll = true // Service account tokens should be ignored - secret := testSecret("sa-token", "default") + secret := testutil.NewSecret("sa-token", "default") secret.Type = "kubernetes.io/service-account-token" - deployment := testDeploymentWithEnvFrom("test-deployment", "default", "", "sa-token") + deployment := testutil.NewDeploymentWithEnvFrom("test-deployment", "default", "", "sa-token") reconciler := newSecretReconciler(t, cfg, secret, deployment) assertReconcileSuccess(t, reconciler, reconcileRequest("sa-token", "default")) } diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index b33152329..317039ea5 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -11,32 +11,21 @@ import ( "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -// testScheme is a shared scheme for all controller tests. -func testScheme() *runtime.Scheme { - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = batchv1.AddToScheme(scheme) - return scheme -} - // newConfigMapReconciler creates a ConfigMapReconciler for testing. func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { t.Helper() fakeClient := fake.NewClientBuilder(). - WithScheme(testScheme()). + WithScheme(testutil.NewScheme()). WithRuntimeObjects(objects...). Build() @@ -59,7 +48,7 @@ func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { t.Helper() fakeClient := fake.NewClientBuilder(). - WithScheme(testScheme()). + WithScheme(testutil.NewScheme()). WithRuntimeObjects(objects...). Build() @@ -78,230 +67,22 @@ func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Ob } } -// testConfigMap creates a ConfigMap for testing. -func testConfigMap(name, namespace string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Data: map[string]string{"key": "value"}, - } -} - -// testConfigMapWithAnnotations creates a ConfigMap with annotations. -func testConfigMapWithAnnotations(name, namespace string, annotations map[string]string) *corev1.ConfigMap { - cm := testConfigMap(name, namespace) - cm.Annotations = annotations - return cm -} - -// testSecret creates a Secret for testing. -func testSecret(name, namespace string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Data: map[string][]byte{"key": []byte("value")}, - } -} - -// testSecretWithAnnotations creates a Secret with annotations. -func testSecretWithAnnotations(name, namespace string, annotations map[string]string) *corev1.Secret { - secret := testSecret(name, namespace) - secret.Annotations = annotations - return secret -} - -// testDeployment creates a minimal Deployment for testing. -func testDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "nginx", - }, - }, - }, - }, - }, - } -} - -// testDeploymentWithEnvFrom creates a Deployment with EnvFrom referencing a ConfigMap or Secret. -func testDeploymentWithEnvFrom(name, namespace string, configMapName, secretName string) *appsv1.Deployment { - d := testDeployment(name, namespace, nil) - if configMapName != "" { - d.Spec.Template.Spec.Containers[0].EnvFrom = append( - d.Spec.Template.Spec.Containers[0].EnvFrom, - corev1.EnvFromSource{ - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, - }, - }, - ) - } - if secretName != "" { - d.Spec.Template.Spec.Containers[0].EnvFrom = append( - d.Spec.Template.Spec.Containers[0].EnvFrom, - corev1.EnvFromSource{ - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, - }, - }, - ) - } - return d -} - -// testDeploymentWithVolume creates a Deployment with a volume from ConfigMap or Secret. -func testDeploymentWithVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { - d := testDeployment(name, namespace, nil) - d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ - { - Name: "config", - MountPath: "/etc/config", - }, - } - - if configMapName != "" { - d.Spec.Template.Spec.Volumes = []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, - }, - }, - }, - } - } - if secretName != "" { - d.Spec.Template.Spec.Volumes = []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: secretName, - }, - }, - }, - } - } - return d -} - -// testDeploymentWithProjectedVolume creates a Deployment with a projected volume. -func testDeploymentWithProjectedVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { - d := testDeployment(name, namespace, nil) - d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ - { - Name: "config", - MountPath: "/etc/config", - }, - } - - var sources []corev1.VolumeProjection - if configMapName != "" { - sources = append( - sources, corev1.VolumeProjection{ - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, - }, - }, - ) - } - if secretName != "" { - sources = append( - sources, corev1.VolumeProjection{ - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, - }, - }, - ) - } - - d.Spec.Template.Spec.Volumes = []corev1.Volume{ - { - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{Sources: sources}, - }, - }, - } - return d -} +// newNamespaceReconciler creates a NamespaceReconciler for testing. +func newNamespaceReconciler(t *testing.T, cfg *config.Config, cache *controller.NamespaceCache, objects ...runtime.Object) *controller.NamespaceReconciler { + t.Helper() + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) -// testDaemonSet creates a minimal DaemonSet for testing. -func testDaemonSet(name, namespace string, annotations map[string]string) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "nginx", - }, - }, - }, - }, - }, - } -} + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(objects...). + Build() -// testStatefulSet creates a minimal StatefulSet for testing. -func testStatefulSet(name, namespace string, annotations map[string]string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "nginx", - }, - }, - }, - }, - }, + return &controller.NamespaceReconciler{ + Client: fakeClient, + Log: testr.New(t), + Config: cfg, + Cache: cache, } } @@ -322,35 +103,6 @@ func namespaceRequest(name string) ctrl.Request { } } -// testNamespace creates a Namespace with optional labels. -func testNamespace(name string, labels map[string]string) *corev1.Namespace { - return &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - } -} - -// newNamespaceReconciler creates a NamespaceReconciler for testing. -func newNamespaceReconciler(t *testing.T, cfg *config.Config, cache *controller.NamespaceCache, objects ...runtime.Object) *controller.NamespaceReconciler { - t.Helper() - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects(objects...). - Build() - - return &controller.NamespaceReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - Cache: cache, - } -} - // assertReconcileSuccess runs reconcile and asserts no error and no requeue. func assertReconcileSuccess(t *testing.T, reconciler interface { Reconcile(context.Context, ctrl.Request) (ctrl.Result, error) @@ -364,55 +116,3 @@ func assertReconcileSuccess(t *testing.T, reconciler interface { t.Error("Should not requeue") } } - -// testJob creates a minimal Job for testing. -func testJob(name, namespace string) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox", - }, - }, - }, - }, - }, - } -} - -// testCronJob creates a minimal CronJob for testing. -func testCronJob(name, namespace string) *batchv1.CronJob { - return &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - UID: "test-uid", - }, - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox", - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index cd94873fc..dae653f72 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -16,7 +16,7 @@ func TestService_ProcessConfigMap_AutoReload(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation that uses the configmap - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -76,7 +76,7 @@ func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() svc := NewService(cfg) - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "configmap.reloader.stakater.com/reload": "test-cm", }, @@ -121,7 +121,7 @@ func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -177,7 +177,7 @@ func TestService_ProcessSecret_AutoReload(t *testing.T) { svc := NewService(cfg) // Create a deployment with auto annotation that uses the secret - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -233,7 +233,7 @@ func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { svc := NewService(cfg) // Create a deployment with explicit configmap annotation - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "configmap.reloader.stakater.com/reload": "test-cm", }, @@ -276,7 +276,7 @@ func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { cfg.ReloadOnDelete = false // Disabled by default svc := NewService(cfg) - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ "configmap.reloader.stakater.com/reload": "test-cm", }, @@ -311,7 +311,7 @@ func TestService_ApplyReload_EnvVarStrategy(t *testing.T) { cfg.ReloadStrategy = config.ReloadStrategyEnvVars svc := NewService(cfg) - deploy := createTestDeployment("test-deploy", "default", nil) + deploy := testutil.NewDeployment("test-deploy", "default", nil) accessor := workload.NewDeploymentWorkload(deploy) ctx := context.Background() @@ -355,7 +355,7 @@ func TestService_ApplyReload_AnnotationStrategy(t *testing.T) { cfg.ReloadStrategy = config.ReloadStrategyAnnotations svc := NewService(cfg) - deploy := createTestDeployment("test-deploy", "default", nil) + deploy := testutil.NewDeployment("test-deploy", "default", nil) accessor := workload.NewDeploymentWorkload(deploy) ctx := context.Background() @@ -381,7 +381,7 @@ func TestService_ApplyReload_EnvVarDeletion(t *testing.T) { cfg.ReloadStrategy = config.ReloadStrategyEnvVars svc := NewService(cfg) - deploy := createTestDeployment("test-deploy", "default", nil) + deploy := testutil.NewDeployment("test-deploy", "default", nil) // Pre-add an env var deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "oldhash"}, @@ -427,7 +427,7 @@ func TestService_ApplyReload_NoChangeIfSameHash(t *testing.T) { cfg.ReloadStrategy = config.ReloadStrategyEnvVars svc := NewService(cfg) - deploy := createTestDeployment("test-deploy", "default", nil) + deploy := testutil.NewDeployment("test-deploy", "default", nil) // Pre-add env var with same hash deploy.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ {Name: "STAKATER_TEST_CM_CONFIGMAP", Value: "abc123hash"}, @@ -451,7 +451,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { svc := NewService(cfg) // Create multiple workloads - deploy1 := createTestDeployment( + deploy1 := testutil.NewDeployment( "deploy1", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -469,7 +469,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { }, } - deploy2 := createTestDeployment( + deploy2 := testutil.NewDeployment( "deploy2", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -488,7 +488,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { } // Deploy3 doesn't use the configmap - deploy3 := createTestDeployment( + deploy3 := testutil.NewDeployment( "deploy3", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -538,7 +538,7 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { svc := NewService(cfg) // Create deployments in different namespaces - deploy1 := createTestDeployment( + deploy1 := testutil.NewDeployment( "deploy1", "namespace-a", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -556,7 +556,7 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { }, } - deploy2 := createTestDeployment( + deploy2 := testutil.NewDeployment( "deploy2", "namespace-b", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -1102,7 +1102,7 @@ func TestService_findTargetContainer_AutoReload(t *testing.T) { svc := NewService(cfg) // Test with autoReload=true and volume mount - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -1138,7 +1138,7 @@ func TestService_findTargetContainer_AutoReload_EnvRef(t *testing.T) { svc := NewService(cfg) // Test with autoReload=true and env ref (no volume) - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{ { Name: "sidecar", @@ -1176,7 +1176,7 @@ func TestService_findTargetContainer_AutoReload_InitContainer(t *testing.T) { svc := NewService(cfg) // Test with autoReload=true where init container uses the volume - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "config-vol", @@ -1219,7 +1219,7 @@ func TestService_findTargetContainer_AutoReload_InitContainerEnvRef(t *testing.T svc := NewService(cfg) // Test with autoReload=true where init container has env ref - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ { Name: "init", @@ -1259,7 +1259,7 @@ func TestService_findTargetContainer_NoContainers(t *testing.T) { cfg := config.NewDefault() svc := NewService(cfg) - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{} accessor := workload.NewDeploymentWorkload(deploy) @@ -1273,7 +1273,7 @@ func TestService_findTargetContainer_NonAutoReload(t *testing.T) { cfg := config.NewDefault() svc := NewService(cfg) - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{ {Name: "first", Image: "nginx"}, {Name: "second", Image: "busybox"}, @@ -1295,7 +1295,7 @@ func TestService_findTargetContainer_AutoReload_FallbackToFirst(t *testing.T) { svc := NewService(cfg) // autoReload=true but no matching volume or env ref - should fallback to first container - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{ {Name: "first", Image: "nginx"}, {Name: "second", Image: "busybox"}, @@ -1315,7 +1315,7 @@ func TestService_ProcessNilChange(t *testing.T) { cfg := config.NewDefault() svc := NewService(cfg) - deploy := createTestDeployment("test", "default", nil) + deploy := testutil.NewDeployment("test", "default", nil) workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} // Test with nil ConfigMap @@ -1335,7 +1335,7 @@ func TestService_ProcessCreateEventDisabled(t *testing.T) { cfg.ReloadOnCreate = false svc := NewService(cfg) - deploy := createTestDeployment( + deploy := testutil.NewDeployment( "test", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, @@ -1357,35 +1357,3 @@ func TestService_ProcessCreateEventDisabled(t *testing.T) { t.Errorf("Expected nil decisions when create events disabled, got %v", decisions) } } - -// Helper function to create a test deployment -func createTestDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { - replicas := int32(1) - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - Annotations: map[string]string{}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "nginx:latest", - }, - }, - }, - }, - }, - } -} diff --git a/internal/pkg/testutil/fixtures.go b/internal/pkg/testutil/fixtures.go new file mode 100644 index 000000000..1deb85a19 --- /dev/null +++ b/internal/pkg/testutil/fixtures.go @@ -0,0 +1,286 @@ +package testutil + +import ( + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewScheme creates a scheme with common types for testing. +func NewScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = batchv1.AddToScheme(scheme) + return scheme +} + +// NewDeployment creates a minimal Deployment for unit testing. +func NewDeployment(name, namespace string, annotations map[string]string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } +} + +// NewDeploymentWithEnvFrom creates a Deployment with EnvFrom referencing a ConfigMap or Secret. +func NewDeploymentWithEnvFrom(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + if configMapName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + return d +} + +// NewDeploymentWithVolume creates a Deployment with a volume from ConfigMap or Secret. +func NewDeploymentWithVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ + Name: "config", + MountPath: "/etc/config", + }} + + if configMapName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{{ + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + }} + } + if secretName != "" { + d.Spec.Template.Spec.Volumes = []corev1.Volume{{ + Name: "config", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }} + } + return d +} + +// NewDeploymentWithProjectedVolume creates a Deployment with a projected volume. +func NewDeploymentWithProjectedVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { + d := NewDeployment(name, namespace, nil) + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ + Name: "config", + MountPath: "/etc/config", + }} + + sources := []corev1.VolumeProjection{} + if configMapName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.Volumes = []corev1.Volume{{ + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + }, + }} + return d +} + +// NewDaemonSet creates a minimal DaemonSet for unit testing. +func NewDaemonSet(name, namespace string, annotations map[string]string) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } +} + +// NewStatefulSet creates a minimal StatefulSet for unit testing. +func NewStatefulSet(name, namespace string, annotations map[string]string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "nginx", + }}, + }, + }, + }, + } +} + +// NewJob creates a minimal Job for unit testing. +func NewJob(name, namespace string) *batchv1.Job { + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "main", + Image: "busybox", + }}, + }, + }, + }, + } +} + +// NewCronJob creates a minimal CronJob for unit testing. +func NewCronJob(name, namespace string) *batchv1.CronJob { + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: "test-uid", + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*/5 * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "main", + Image: "busybox", + }}, + }, + }, + }, + }, + }, + } +} + +// NewConfigMap creates a ConfigMap for unit testing. +func NewConfigMap(name, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string]string{"key": "value"}, + } +} + +// NewConfigMapWithAnnotations creates a ConfigMap with annotations. +func NewConfigMapWithAnnotations(name, namespace string, annotations map[string]string) *corev1.ConfigMap { + cm := NewConfigMap(name, namespace) + cm.Annotations = annotations + return cm +} + +// NewSecret creates a Secret for unit testing. +func NewSecret(name, namespace string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{"key": []byte("value")}, + } +} + +// NewSecretWithAnnotations creates a Secret with annotations. +func NewSecretWithAnnotations(name, namespace string, annotations map[string]string) *corev1.Secret { + secret := NewSecret(name, namespace) + secret.Annotations = annotations + return secret +} + +// NewNamespace creates a Namespace with optional labels. +func NewNamespace(name string, labels map[string]string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go index 9dcde17ed..b7f7e0dd3 100644 --- a/internal/pkg/testutil/testutil.go +++ b/internal/pkg/testutil/testutil.go @@ -134,56 +134,16 @@ func DeleteSecret(client kubernetes.Interface, namespace, name string) error { // CreateDeployment creates a Deployment that references a ConfigMap/Secret. func CreateDeployment(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.Deployment, error) { - replicas := int32(1) - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox:1.36", - Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, - }, - }, - }, - }, - }, - } - + var deployment *appsv1.Deployment if useConfigMap { - deployment.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, - }, - }, - } + deployment = NewDeploymentWithEnvFrom(name, namespace, name, "") } else { - deployment.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, - }, - }, - } + deployment = NewDeploymentWithEnvFrom(name, namespace, "", name) } + deployment.Annotations = annotations + // Override image for integration tests + deployment.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + deployment.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} return client.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) } @@ -195,40 +155,16 @@ func DeleteDeployment(client kubernetes.Interface, namespace, name string) error // CreateDaemonSet creates a DaemonSet that references a ConfigMap/Secret. func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.DaemonSet, error) { - daemonset := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox:1.36", - Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, - }, - }, - }, - }, - }, - } + daemonset := NewDaemonSet(name, namespace, annotations) + // Override image for integration tests + daemonset.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + daemonset.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} if useConfigMap { daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } @@ -236,9 +172,7 @@ func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useCon daemonset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } @@ -254,43 +188,17 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace, name string) error // CreateStatefulSet creates a StatefulSet that references a ConfigMap/Secret. func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.StatefulSet, error) { - replicas := int32(1) - statefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": name}, - }, - ServiceName: name, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox:1.36", - Command: []string{"sh", "-c", "while true; do sleep 3600; done"}, - }, - }, - }, - }, - }, - } + statefulset := NewStatefulSet(name, namespace, annotations) + statefulset.Spec.ServiceName = name + // Override image for integration tests + statefulset.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + statefulset.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} if useConfigMap { statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } @@ -298,9 +206,7 @@ func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useC statefulset.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } @@ -316,40 +222,18 @@ func DeleteStatefulSet(client kubernetes.Interface, namespace, name string) erro // CreateCronJob creates a CronJob that references a ConfigMap/Secret. func CreateCronJob(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*batchv1.CronJob, error) { - cronjob := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: batchv1.CronJobSpec{ - Schedule: "*/5 * * * *", - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - Containers: []corev1.Container{ - { - Name: "main", - Image: "busybox:1.36", - Command: []string{"sh", "-c", "echo hello"}, - }, - }, - }, - }, - }, - }, - }, - } + cronjob := NewCronJob(name, namespace) + cronjob.Annotations = annotations + // Override image for integration tests + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "echo hello"} + cronjob.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure if useConfigMap { cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } @@ -357,9 +241,7 @@ func CreateCronJob(client kubernetes.Interface, name, namespace string, useConfi cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ { SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: name, - }, + LocalObjectReference: corev1.LocalObjectReference{Name: name}, }, }, } diff --git a/internal/pkg/workload/registry_test.go b/internal/pkg/workload/registry_test.go index 4ebfb870c..e681438d1 100644 --- a/internal/pkg/workload/registry_test.go +++ b/internal/pkg/workload/registry_test.go @@ -18,14 +18,12 @@ func TestNewRegistry_WithoutArgoRollouts(t *testing.T) { t.Errorf("SupportedKinds() = %d kinds, want 5", len(kinds)) } - // Should not include ArgoRollout for _, k := range kinds { if k == KindArgoRollout { t.Error("SupportedKinds() should not include ArgoRollout when disabled") } } - // ListerFor should return nil for ArgoRollout if r.ListerFor(KindArgoRollout) != nil { t.Error("ListerFor(KindArgoRollout) should return nil when disabled") } @@ -39,7 +37,6 @@ func TestNewRegistry_WithArgoRollouts(t *testing.T) { t.Errorf("SupportedKinds() = %d kinds, want 6", len(kinds)) } - // Should include ArgoRollout found := false for _, k := range kinds { if k == KindArgoRollout { @@ -51,7 +48,6 @@ func TestNewRegistry_WithArgoRollouts(t *testing.T) { t.Error("SupportedKinds() should include ArgoRollout when enabled") } - // ListerFor should return a function for ArgoRollout if r.ListerFor(KindArgoRollout) == nil { t.Error("ListerFor(KindArgoRollout) should return a function when enabled") } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 32106d49f..6d3dcc4c5 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1,13 +1,4 @@ // Package e2e contains end-to-end tests for Reloader. -// These tests run against a real Kubernetes cluster (or envtest). -// -// To run these tests against a real cluster: -// -// KUBECONFIG=~/.kube/config go test -v ./test/e2e/... -count=1 -// -// To skip these tests when running unit tests: -// -// go test -v ./... -short package e2e import ( @@ -237,7 +228,6 @@ func TestMain(m *testing.M) { os.Exit(0) } - // Set up zerolog as the controller-runtime logger zl := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}). Level(zerolog.WarnLevel). With(). @@ -301,9 +291,11 @@ func TestConfigMapUpdate(t *testing.T) { defer f.cleanup() f.createConfigMap(f.name, "initial-data") - f.createDeployment(f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }) + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) f.waitForReady() f.updateConfigMap(f.name, "updated-data") @@ -316,9 +308,11 @@ func TestSecretUpdate(t *testing.T) { defer f.cleanup() f.createSecret(f.name, "initial-secret") - f.createDeployment(f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }) + f.createDeployment( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) f.waitForReady() f.updateSecret(f.name, "updated-secret") @@ -331,9 +325,11 @@ func TestAutoReloadAll(t *testing.T) { defer f.cleanup() f.createConfigMap(f.name, "initial-data") - f.createDeployment(f.name, true, map[string]string{ - cfg.Annotations.Auto: "true", - }) + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.Auto: "true", + }, + ) f.waitForReady() f.updateConfigMap(f.name, "updated-data") @@ -346,9 +342,11 @@ func TestDaemonSetReload(t *testing.T) { defer f.cleanup() f.createConfigMap(f.name, "initial-data") - f.createDaemonSet(f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }) + f.createDaemonSet( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) f.waitForReady() f.updateConfigMap(f.name, "updated-data") @@ -361,9 +359,11 @@ func TestStatefulSetReload(t *testing.T) { defer f.cleanup() f.createSecret(f.name, "initial-secret") - f.createStatefulSet(f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }) + f.createStatefulSet( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) f.waitForReady() f.updateSecret(f.name, "updated-secret") @@ -376,9 +376,11 @@ func TestLabelOnlyChange(t *testing.T) { defer f.cleanup() f.createConfigMap(f.name, "initial-data") - f.createDeployment(f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }) + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) f.waitForReady() f.updateConfigMapLabel(f.name, "new-label") @@ -395,9 +397,11 @@ func TestMultipleConfigMaps(t *testing.T) { f.createConfigMap(cm1, "data-a") f.createConfigMap(cm2, "data-b") - f.createDeployment(f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: cm1 + "," + cm2, - }) + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: cm1 + "," + cm2, + }, + ) f.waitForReady() f.updateConfigMap(cm1, "updated-data-a") @@ -413,9 +417,11 @@ func TestAutoAnnotationDisabled(t *testing.T) { testCfg.AutoReloadAll = true f.createConfigMap(f.name, "initial-data") - f.createDeployment(f.name, true, map[string]string{ - testCfg.Annotations.Auto: "false", - }) + f.createDeployment( + f.name, true, map[string]string{ + testCfg.Annotations.Auto: "false", + }, + ) f.waitForReady() f.updateConfigMap(f.name, "updated-data") @@ -433,13 +439,14 @@ func TestAutoWithExplicitConfigMapAnnotation(t *testing.T) { f.createConfigMap(referencedCM, "referenced-data") f.createConfigMap(explicitCM, "explicit-data") - f.createDeployment(referencedCM, true, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.ConfigmapReload: explicitCM, - }) + f.createDeployment( + referencedCM, true, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.ConfigmapReload: explicitCM, + }, + ) f.waitForReady() - // Update the EXPLICIT ConfigMap (not the referenced one) f.updateConfigMap(explicitCM, "updated-explicit-data") f.assertDeploymentReloaded(referencedCM, nil) } @@ -455,13 +462,14 @@ func TestAutoWithExplicitSecretAnnotation(t *testing.T) { f.createSecret(referencedSecret, "referenced-secret") f.createSecret(explicitSecret, "explicit-secret") - f.createDeployment(referencedSecret, false, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.SecretReload: explicitSecret, - }) + f.createDeployment( + referencedSecret, false, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.SecretReload: explicitSecret, + }, + ) f.waitForReady() - // Update the EXPLICIT Secret (not the referenced one) f.updateSecret(explicitSecret, "updated-explicit-secret") f.assertDeploymentReloaded(referencedSecret, nil) } @@ -477,13 +485,14 @@ func TestAutoWithBothExplicitAndReferencedChange(t *testing.T) { f.createConfigMap(referencedCM, "referenced-data") f.createConfigMap(explicitCM, "explicit-data") - f.createDeployment(referencedCM, true, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.ConfigmapReload: explicitCM, - }) + f.createDeployment( + referencedCM, true, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.ConfigmapReload: explicitCM, + }, + ) f.waitForReady() - // Update the REFERENCED ConfigMap - should trigger reload via auto f.updateConfigMap(referencedCM, "updated-referenced-data") f.assertDeploymentReloaded(referencedCM, nil) } From cca7383dafeb259f0fa59813ba45104ffbae3554 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 08:59:01 +0100 Subject: [PATCH 21/35] fix: Linting issues --- internal/pkg/testutil/rand.go | 9 ++------- internal/pkg/testutil/testutil.go | 30 +++++++++++++++++++----------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/internal/pkg/testutil/rand.go b/internal/pkg/testutil/rand.go index a20d8ad07..bf88d4261 100644 --- a/internal/pkg/testutil/rand.go +++ b/internal/pkg/testutil/rand.go @@ -1,21 +1,16 @@ package testutil import ( - "math/rand" - "time" + "math/rand/v2" ) const letterBytes = "abcdefghijklmnopqrstuvwxyz" -func init() { - rand.Seed(time.Now().UnixNano()) -} - // RandSeq generates a random string of the specified length. func RandSeq(n int) string { b := make([]byte, n) for i := range b { - b[i] = letterBytes[rand.Intn(len(letterBytes))] + b[i] = letterBytes[rand.IntN(len(letterBytes))] } return string(b) } diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go index b7f7e0dd3..96347e4cb 100644 --- a/internal/pkg/testutil/testutil.go +++ b/internal/pkg/testutil/testutil.go @@ -264,8 +264,10 @@ func ConvertResourceToSHA(resourceType, namespace, name, data string) string { // WaitForDeploymentAnnotation waits for a deployment to have the specified annotation value. func WaitForDeploymentAnnotation(client kubernetes.Interface, namespace, name, annotation, expectedValue string, timeout time.Duration) error { - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, nil // Keep waiting } @@ -280,8 +282,10 @@ func WaitForDeploymentAnnotation(client kubernetes.Interface, namespace, name, a // WaitForDeploymentReloadedAnnotation waits for a deployment to have any reloaded annotation. func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { var found bool - err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, nil // Keep waiting } @@ -294,7 +298,7 @@ func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, } return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return found, nil } return found, err @@ -303,8 +307,10 @@ func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, // WaitForDaemonSetReloadedAnnotation waits for a daemonset to have any reloaded annotation. func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { var found bool - err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - daemonset, err := client.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, nil // Keep waiting } @@ -317,7 +323,7 @@ func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, } return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return found, nil } return found, err @@ -326,8 +332,10 @@ func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, // WaitForStatefulSetReloadedAnnotation waits for a statefulset to have any reloaded annotation. func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { var found bool - err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - statefulset, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, nil // Keep waiting } @@ -340,7 +348,7 @@ func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace } return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return found, nil } return found, err From 612006c0910aee4997afa36cd9ee8a4e7c29fc60 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 09:02:51 +0100 Subject: [PATCH 22/35] feat: Upgrade all go packages --- go.mod | 95 ++++---- go.sum | 221 +++++++++++-------- internal/pkg/controller/test_helpers_test.go | 2 +- internal/pkg/reload/predicate.go | 6 + 4 files changed, 188 insertions(+), 136 deletions(-) diff --git a/go.mod b/go.mod index eb84625cc..3ef99f1f8 100644 --- a/go.mod +++ b/go.mod @@ -4,69 +4,80 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/go-logr/zerologr v1.2.3 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/rs/zerolog v1.34.0 - github.com/spf13/cobra v1.10.1 - github.com/spf13/pflag v1.0.9 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - sigs.k8s.io/controller-runtime v0.19.4 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.35.0 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/evanphx/json-patch/v5 v5.9.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect - github.com/go-openapi/jsonpointer v0.21.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/common v0.63.0 // indirect - github.com/prometheus/procfs v0.16.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.67.4 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.11.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.40.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect + k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) // Replacements for argo-rollouts diff --git a/go.sum b/go.sum index 02ee8daab..0e81e0d94 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA= github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -6,55 +8,81 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= -github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= -github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -67,10 +95,9 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -78,41 +105,44 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= -github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= -github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -121,87 +151,92 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= -sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index 317039ea5..778abac7b 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -112,7 +112,7 @@ func assertReconcileSuccess(t *testing.T, reconciler interface { if err != nil { t.Fatalf("Reconcile failed: %v", err) } - if result.Requeue { + if result.RequeueAfter > 0 { t.Error("Should not requeue") } } diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index f504694a3..0c913f87c 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -132,6 +132,12 @@ func (ls LabelsSet) Get(key string) string { return ls[key] } +// Lookup returns the value for the provided label key and whether it exists. +func (ls LabelsSet) Lookup(key string) (string, bool) { + value, ok := ls[key] + return value, ok +} + // IgnoreAnnotationPredicate returns a predicate that filters out resources with the ignore annotation. func IgnoreAnnotationPredicate(cfg *config.Config) predicate.Predicate { return predicate.NewPredicateFuncs( From 2eeb44dd18f0bdc3d11ec6ac4ba33c4dd054c8e3 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 09:05:32 +0100 Subject: [PATCH 23/35] fix: Add missing cmd for reloader due to gitignore issues --- .gitignore | 3 +- cmd/reloader/main.go | 219 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 221 insertions(+), 1 deletion(-) create mode 100644 cmd/reloader/main.go diff --git a/.gitignore b/.gitignore index 73da63e55..defc67d2d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,8 @@ _gopath/ .vscode vendor dist -Reloader +/reloader +/Reloader !**/chart/reloader *.tgz styles/ diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go new file mode 100644 index 000000000..1d8c76ec0 --- /dev/null +++ b/cmd/reloader/main.go @@ -0,0 +1,219 @@ +package main + +import ( + "context" + "fmt" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "syscall" + "time" + + "github.com/go-logr/logr" + "github.com/go-logr/zerologr" + "github.com/rs/zerolog" + "github.com/spf13/cobra" + controllerruntime "sigs.k8s.io/controller-runtime" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/metadata" + "github.com/stakater/Reloader/internal/pkg/metrics" +) + +// Environment variable names for pod identity in HA mode. +const ( + podNameEnv = "POD_NAME" + podNamespaceEnv = "POD_NAMESPACE" +) + +// cfg holds the configuration for this reloader instance. +var cfg *config.Config + +func main() { + if err := newReloaderCommand().Execute(); err != nil { + os.Exit(1) + } +} + +func newReloaderCommand() *cobra.Command { + cfg = config.NewDefault() + + cmd := &cobra.Command{ + Use: "reloader", + Short: "A watcher for your Kubernetes cluster", + RunE: run, + } + + config.BindFlags(cmd.PersistentFlags(), cfg) + return cmd +} + +func run(cmd *cobra.Command, args []string) error { + // Apply post-parse flag processing + if err := config.ApplyFlags(cfg); err != nil { + return fmt.Errorf("applying flags: %w", err) + } + + // Validate the configuration + if err := cfg.Validate(); err != nil { + return fmt.Errorf("validating config: %w", err) + } + + // Validate HA environment variables + if cfg.EnableHA { + if err := validateHAEnvs(); err != nil { + return err + } + cfg.LeaderElection.Identity = os.Getenv(podNameEnv) + if cfg.LeaderElection.Namespace == "" { + cfg.LeaderElection.Namespace = os.Getenv(podNamespaceEnv) + } + } + + // Configure logging + log, err := configureLogging(cfg.LogFormat, cfg.LogLevel) + if err != nil { + return fmt.Errorf("configuring logging: %w", err) + } + + controllerruntime.SetLogger(log) + + log.Info("Starting Reloader") + + // Log configuration + if ns := os.Getenv("KUBERNETES_NAMESPACE"); ns == "" { + log.Info("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces") + } + + if len(cfg.NamespaceSelectors) > 0 { + log.Info("namespace-selector is set", "selectors", cfg.NamespaceSelectorStrings) + } + + if len(cfg.ResourceSelectors) > 0 { + log.Info("resource-label-selector is set", "selectors", cfg.ResourceSelectorStrings) + } + + if cfg.WebhookURL != "" { + log.Info("webhook-url is set, will only send webhook, no resources will be reloaded", "url", cfg.WebhookURL) + } + + if cfg.EnableHA { + log.Info( + "high-availability mode enabled", + "leaderElectionID", cfg.LeaderElection.LockName, + "leaderElectionNamespace", cfg.LeaderElection.Namespace, + ) + } + + // Setup Prometheus metrics + collectors := metrics.SetupPrometheusEndpoint() + + // Create the controller-runtime manager + mgr, err := controller.NewManager( + controller.ManagerOptions{ + Config: cfg, + Log: log, + Collectors: &collectors, + }, + ) + if err != nil { + return fmt.Errorf("creating manager: %w", err) + } + + // Setup all reconcilers + if err := controller.SetupReconcilers(mgr, cfg, log, &collectors); err != nil { + return fmt.Errorf("setting up reconcilers: %w", err) + } + + // Create metadata ConfigMap + if err := metadata.CreateOrUpdate(mgr.GetClient(), cfg, log); err != nil { + log.Error(err, "Failed to create metadata ConfigMap") + // Non-fatal, continue starting + } + + // Start pprof server if enabled + if cfg.EnablePProf { + go startPProfServer(log) + } + + // Setup signal handling for graceful shutdown + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + log.Info("Received signal, shutting down", "signal", sig) + cancel() + }() + + // Start the manager + log.Info("Starting controller manager") + if err := controller.RunManager(ctx, mgr, log); err != nil { + return fmt.Errorf("manager exited with error: %w", err) + } + + log.Info("Reloader shutdown complete") + return nil +} + +func configureLogging(logFormat, logLevel string) (logr.Logger, error) { + // Parse log level + var level zerolog.Level + switch logLevel { + case "trace": + level = zerolog.TraceLevel + case "debug": + level = zerolog.DebugLevel + case "info", "": + level = zerolog.InfoLevel + case "warn", "warning": + level = zerolog.WarnLevel + case "error": + level = zerolog.ErrorLevel + default: + return logr.Logger{}, fmt.Errorf("unsupported log level: %q", logLevel) + } + + // Configure output format + var zl zerolog.Logger + switch logFormat { + case "json": + zl = zerolog.New(os.Stdout).Level(level).With().Timestamp().Logger() + case "": + // Human-readable console output + zl = zerolog.New( + zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: time.RFC3339, + }, + ).Level(level).With().Timestamp().Logger() + default: + return logr.Logger{}, fmt.Errorf("unsupported log format: %q", logFormat) + } + + return zerologr.New(&zl), nil +} + +func validateHAEnvs() error { + podName := os.Getenv(podNameEnv) + podNamespace := os.Getenv(podNamespaceEnv) + + if podName == "" { + return fmt.Errorf("%s not set, cannot run in HA mode", podNameEnv) + } + if podNamespace == "" { + return fmt.Errorf("%s not set, cannot run in HA mode", podNamespaceEnv) + } + return nil +} + +func startPProfServer(log logr.Logger) { + log.Info("Starting pprof server", "addr", cfg.PProfAddr) + if err := http.ListenAndServe(cfg.PProfAddr, nil); err != nil { + log.Error(err, "Failed to start pprof server") + } +} From 9a5fbf190da636d00e9ebfba12634b7d944a1e9e Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:31:04 +0100 Subject: [PATCH 24/35] feat: Improve slack alerts --- internal/pkg/alerting/alerter.go | 2 +- internal/pkg/alerting/alerter_test.go | 68 +++++++++++++++++------- internal/pkg/alerting/http.go | 12 ++++- internal/pkg/alerting/raw.go | 39 ++++++++++++-- internal/pkg/alerting/slack.go | 75 ++++++++++++++++++++++++++- internal/pkg/config/config.go | 1 + 6 files changed, 172 insertions(+), 25 deletions(-) diff --git a/internal/pkg/alerting/alerter.go b/internal/pkg/alerting/alerter.go index 5213d382a..edbc22812 100644 --- a/internal/pkg/alerting/alerter.go +++ b/internal/pkg/alerting/alerter.go @@ -39,7 +39,7 @@ func NewAlerter(cfg *config.Config) Alerter { case "gchat": return NewGChatAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) default: - return NewRawAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional) + return NewRawAlerter(alertCfg.WebhookURL, alertCfg.Proxy, alertCfg.Additional, alertCfg.Structured) } } diff --git a/internal/pkg/alerting/alerter_test.go b/internal/pkg/alerting/alerter_test.go index c7dd2e8d7..d6ae4ad40 100644 --- a/internal/pkg/alerting/alerter_test.go +++ b/internal/pkg/alerting/alerter_test.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -14,15 +15,15 @@ import ( // testServer creates a test HTTP server that captures the request body. // Returns the server and a function to retrieve the captured body. -func testServer(t *testing.T) (*httptest.Server, func() []byte) { +func testServer(t *testing.T, expectedContentType string) (*httptest.Server, func() []byte) { t.Helper() var body []byte server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { t.Errorf("Expected POST request, got %s", r.Method) } - if r.Header.Get("Content-Type") != "application/json" { - t.Errorf("Expected Content-Type application/json, got %s", r.Header.Get("Content-Type")) + if r.Header.Get("Content-Type") != expectedContentType { + t.Errorf("Expected Content-Type %s, got %s", expectedContentType, r.Header.Get("Content-Type")) } body, _ = io.ReadAll(r.Body) w.WriteHeader(http.StatusOK) @@ -150,26 +151,38 @@ func TestNoOpAlerter_Send(t *testing.T) { func TestAlerter_Send(t *testing.T) { tests := []struct { - name string - newAlert func(url string) Alerter - validate func(t *testing.T, body []byte) + name string + contentType string + newAlert func(url string) Alerter + validate func(t *testing.T, body []byte) }{ { - name: "slack", - newAlert: func(url string) Alerter { return NewSlackAlerter(url, "", "Test Cluster") }, + name: "slack", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewSlackAlerter(url, "", "Test Cluster") }, validate: func(t *testing.T, body []byte) { var msg slackMessage if err := json.Unmarshal(body, &msg); err != nil { t.Fatalf("Failed to unmarshal: %v", err) } - if msg.Text == "" { - t.Error("Expected non-empty text") + if len(msg.Attachments) != 1 { + t.Fatalf("Expected 1 attachment, got %d", len(msg.Attachments)) + } + if msg.Attachments[0].Text == "" { + t.Error("Expected non-empty attachment text") + } + if msg.Attachments[0].Color != "good" { + t.Errorf("Expected color 'good', got %s", msg.Attachments[0].Color) + } + if msg.Attachments[0].AuthorName != "Reloader" { + t.Errorf("Expected author_name 'Reloader', got %s", msg.Attachments[0].AuthorName) } }, }, { - name: "teams", - newAlert: func(url string) Alerter { return NewTeamsAlerter(url, "", "") }, + name: "teams", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewTeamsAlerter(url, "", "") }, validate: func(t *testing.T, body []byte) { var msg teamsMessage if err := json.Unmarshal(body, &msg); err != nil { @@ -181,8 +194,9 @@ func TestAlerter_Send(t *testing.T) { }, }, { - name: "gchat", - newAlert: func(url string) Alerter { return NewGChatAlerter(url, "", "") }, + name: "gchat", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewGChatAlerter(url, "", "") }, validate: func(t *testing.T, body []byte) { var msg gchatMessage if err := json.Unmarshal(body, &msg); err != nil { @@ -194,8 +208,26 @@ func TestAlerter_Send(t *testing.T) { }, }, { - name: "raw", - newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info") }, + name: "raw plain text (default)", + contentType: "text/plain", + newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info", false) }, + validate: func(t *testing.T, body []byte) { + text := string(body) + if text == "" { + t.Error("Expected non-empty text") + } + if !strings.Contains(text, "custom-info") { + t.Error("Expected text to contain 'custom-info'") + } + if !strings.Contains(text, "nginx") { + t.Error("Expected text to contain workload name 'nginx'") + } + }, + }, + { + name: "raw structured JSON", + contentType: "application/json", + newAlert: func(url string) Alerter { return NewRawAlerter(url, "", "custom-info", true) }, validate: func(t *testing.T, body []byte) { var msg rawMessage if err := json.Unmarshal(body, &msg); err != nil { @@ -216,7 +248,7 @@ func TestAlerter_Send(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - server, getBody := testServer(t) + server, getBody := testServer(t, tt.contentType) defer server.Close() alerter := tt.newAlert(server.URL) @@ -234,7 +266,7 @@ func TestAlerter_WebhookError(t *testing.T) { })) defer server.Close() - alerter := NewRawAlerter(server.URL, "", "") + alerter := NewRawAlerter(server.URL, "", "", false) if err := alerter.Send(context.Background(), AlertMessage{}); err == nil { t.Error("Expected error for non-2xx response") } diff --git a/internal/pkg/alerting/http.go b/internal/pkg/alerting/http.go index ab086e576..e5bb3890c 100644 --- a/internal/pkg/alerting/http.go +++ b/internal/pkg/alerting/http.go @@ -36,11 +36,21 @@ func newHTTPClient(proxyURL string) *httpClient { // post sends a POST request with JSON body. func (c *httpClient) post(ctx context.Context, url string, body []byte) error { + return c.doPost(ctx, url, body, "application/json") +} + +// postText sends a POST request with plain text body. +func (c *httpClient) postText(ctx context.Context, url string, text string) error { + return c.doPost(ctx, url, []byte(text), "text/plain") +} + +// doPost sends a POST request with the specified content type. +func (c *httpClient) doPost(ctx context.Context, url string, body []byte, contentType string) error { req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return fmt.Errorf("creating request: %w", err) } - req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Type", contentType) resp, err := c.client.Do(req) if err != nil { diff --git a/internal/pkg/alerting/raw.go b/internal/pkg/alerting/raw.go index ad0add08e..d8ea3046a 100644 --- a/internal/pkg/alerting/raw.go +++ b/internal/pkg/alerting/raw.go @@ -4,25 +4,29 @@ import ( "context" "encoding/json" "fmt" + "strings" ) -// RawAlerter sends alerts as raw JSON to a webhook. +// RawAlerter sends alerts to a webhook as plain text (default) or structured JSON. type RawAlerter struct { webhookURL string additional string + structured bool client *httpClient } // NewRawAlerter creates a new RawAlerter. -func NewRawAlerter(webhookURL, proxyURL, additional string) *RawAlerter { +// If structured is true, sends JSON; otherwise sends plain text. +func NewRawAlerter(webhookURL, proxyURL, additional string, structured bool) *RawAlerter { return &RawAlerter{ webhookURL: webhookURL, additional: additional, + structured: structured, client: newHTTPClient(proxyURL), } } -// rawMessage is the JSON payload for raw webhook alerts. +// rawMessage is the JSON payload for structured raw webhook alerts. type rawMessage struct { Event string `json:"event"` WorkloadKind string `json:"workloadKind"` @@ -36,6 +40,13 @@ type rawMessage struct { } func (a *RawAlerter) Send(ctx context.Context, message AlertMessage) error { + if a.structured { + return a.sendStructured(ctx, message) + } + return a.sendPlainText(ctx, message) +} + +func (a *RawAlerter) sendStructured(ctx context.Context, message AlertMessage) error { msg := rawMessage{ Event: "reload", WorkloadKind: message.WorkloadKind, @@ -55,3 +66,25 @@ func (a *RawAlerter) Send(ctx context.Context, message AlertMessage) error { return a.client.post(ctx, a.webhookURL, body) } + +func (a *RawAlerter) sendPlainText(ctx context.Context, message AlertMessage) error { + text := a.formatMessage(message) + // Strip markdown formatting for plain text + text = strings.ReplaceAll(text, "*", "") + return a.client.postText(ctx, a.webhookURL, text) +} + +func (a *RawAlerter) formatMessage(msg AlertMessage) string { + text := fmt.Sprintf( + "Reloader triggered reload - Workload: %s/%s (%s), Resource: %s/%s (%s), Time: %s", + msg.WorkloadNamespace, msg.WorkloadName, msg.WorkloadKind, + msg.ResourceNamespace, msg.ResourceName, msg.ResourceKind, + msg.Timestamp.Format("2006-01-02 15:04:05 UTC"), + ) + + if a.additional != "" { + text = a.additional + " : " + text + } + + return text +} diff --git a/internal/pkg/alerting/slack.go b/internal/pkg/alerting/slack.go index 1b9171180..68df2ac00 100644 --- a/internal/pkg/alerting/slack.go +++ b/internal/pkg/alerting/slack.go @@ -22,13 +22,84 @@ func NewSlackAlerter(webhookURL, proxyURL, additional string) *SlackAlerter { } } +// slackMessage represents a Slack webhook message. type slackMessage struct { - Text string `json:"text"` + Username string `json:"username,omitempty"` + IconEmoji string `json:"icon_emoji,omitempty"` + IconURL string `json:"icon_url,omitempty"` + Channel string `json:"channel,omitempty"` + ThreadTimestamp string `json:"thread_ts,omitempty"` + Text string `json:"text,omitempty"` + Attachments []slackAttachment `json:"attachments,omitempty"` + Parse string `json:"parse,omitempty"` + ResponseType string `json:"response_type,omitempty"` + ReplaceOriginal bool `json:"replace_original,omitempty"` + DeleteOriginal bool `json:"delete_original,omitempty"` + ReplyBroadcast bool `json:"reply_broadcast,omitempty"` +} + +// slackAttachment represents a Slack message attachment. +type slackAttachment struct { + Color string `json:"color,omitempty"` + Fallback string `json:"fallback,omitempty"` + + CallbackID string `json:"callback_id,omitempty"` + ID int `json:"id,omitempty"` + + AuthorID string `json:"author_id,omitempty"` + AuthorName string `json:"author_name,omitempty"` + AuthorSubname string `json:"author_subname,omitempty"` + AuthorLink string `json:"author_link,omitempty"` + AuthorIcon string `json:"author_icon,omitempty"` + + Title string `json:"title,omitempty"` + TitleLink string `json:"title_link,omitempty"` + Pretext string `json:"pretext,omitempty"` + Text string `json:"text,omitempty"` + + ImageURL string `json:"image_url,omitempty"` + ThumbURL string `json:"thumb_url,omitempty"` + + ServiceName string `json:"service_name,omitempty"` + ServiceIcon string `json:"service_icon,omitempty"` + FromURL string `json:"from_url,omitempty"` + OriginalURL string `json:"original_url,omitempty"` + + Fields []slackField `json:"fields,omitempty"` + MarkdownIn []string `json:"mrkdwn_in,omitempty"` + + Footer string `json:"footer,omitempty"` + FooterIcon string `json:"footer_icon,omitempty"` + + Actions []slackAction `json:"actions,omitempty"` +} + +// slackField represents a field in a Slack attachment. +type slackField struct { + Title string `json:"title"` + Value string `json:"value"` + Short bool `json:"short"` +} + +// slackAction represents an action button in a Slack attachment. +type slackAction struct { + Type string `json:"type"` + Text string `json:"text"` + URL string `json:"url"` + Style string `json:"style"` } func (a *SlackAlerter) Send(ctx context.Context, message AlertMessage) error { text := a.formatMessage(message) - msg := slackMessage{Text: text} + msg := slackMessage{ + Attachments: []slackAttachment{ + { + Text: text, + Color: "good", + AuthorName: "Reloader", + }, + }, + } body, err := json.Marshal(msg) if err != nil { diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 0bb972d90..583b4dca0 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -84,6 +84,7 @@ type AlertingConfig struct { Sink string `json:"sink,omitempty"` Proxy string `json:"proxy,omitempty"` Additional string `json:"additional,omitempty"` + Structured bool `json:"structured,omitempty"` // For raw sink: send structured JSON instead of plain text } // LeaderElectionConfig holds configuration for leader election. From 2f9633c3f135d8a7c864313c6e20c081f04afeb6 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:31:27 +0100 Subject: [PATCH 25/35] feat: Use viper for config handling and flags --- go.mod | 8 + go.sum | 16 ++ internal/pkg/config/flags.go | 342 ++++++++++++++++-------------- internal/pkg/config/flags_test.go | 212 +++++++++++++----- 4 files changed, 362 insertions(+), 216 deletions(-) diff --git a/go.mod b/go.mod index 3ef99f1f8..39587a789 100644 --- a/go.mod +++ b/go.mod @@ -39,6 +39,7 @@ require ( github.com/go-openapi/swag/stringutils v0.25.4 // indirect github.com/go-openapi/swag/typeutils v0.25.4 // indirect github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.7.1 // indirect @@ -52,9 +53,16 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/procfs v0.19.2 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect diff --git a/go.sum b/go.sum index 0e81e0d94..f7e22e633 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,8 @@ github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6 github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -114,6 +116,8 @@ github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -132,17 +136,29 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index e30be2ffd..e37af2099 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -2,215 +2,211 @@ package config import ( "strings" + "time" "github.com/spf13/pflag" + "github.com/spf13/viper" ) -// flagValues holds intermediate string values from CLI flags -// that need further parsing into the Config struct. -type flagValues struct { - namespaceSelectors string - resourceSelectors string - ignoredResources string - ignoredWorkloads string - ignoredNamespaces string - isArgoRollouts string - reloadOnCreate string - reloadOnDelete string -} +// v is the viper instance for configuration. +var v *viper.Viper -var fv flagValues +func init() { + v = viper.New() + // Convert flag names like "alert-webhook-url" to env vars like "ALERT_WEBHOOK_URL" + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() +} // BindFlags binds configuration flags to the provided flag set. // Call this before parsing flags, then call ApplyFlags after parsing. func BindFlags(fs *pflag.FlagSet, cfg *Config) { // Auto reload - fs.BoolVar( - &cfg.AutoReloadAll, "auto-reload-all", cfg.AutoReloadAll, - "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations", - ) + fs.Bool("auto-reload-all", cfg.AutoReloadAll, + "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations") // Reload strategy - fs.StringVar( - (*string)(&cfg.ReloadStrategy), "reload-strategy", string(cfg.ReloadStrategy), - "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'", - ) + fs.String("reload-strategy", string(cfg.ReloadStrategy), + "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'") // Argo Rollouts - fs.StringVar( - &fv.isArgoRollouts, "is-Argo-Rollouts", "false", - "Enable Argo Rollouts support (true/false)", - ) + fs.String("is-Argo-Rollouts", "false", + "Enable Argo Rollouts support (true/false)") // Event watching - fs.StringVar( - &fv.reloadOnCreate, "reload-on-create", "false", - "Reload when configmaps/secrets are created (true/false)", - ) - fs.StringVar( - &fv.reloadOnDelete, "reload-on-delete", "false", - "Reload when configmaps/secrets are deleted (true/false)", - ) + fs.String("reload-on-create", "false", + "Reload when configmaps/secrets are created (true/false)") + fs.String("reload-on-delete", "false", + "Reload when configmaps/secrets are deleted (true/false)") // Sync after restart - fs.BoolVar( - &cfg.SyncAfterRestart, "sync-after-restart", cfg.SyncAfterRestart, - "Trigger sync operation after restart", - ) + fs.Bool("sync-after-restart", cfg.SyncAfterRestart, + "Trigger sync operation after restart") // High availability / Leader election - fs.BoolVar( - &cfg.EnableHA, "enable-ha", cfg.EnableHA, - "Enable high-availability mode with leader election", - ) - fs.StringVar( - &cfg.LeaderElection.LockName, "leader-election-id", cfg.LeaderElection.LockName, - "Name of the lease resource for leader election", - ) - fs.StringVar( - &cfg.LeaderElection.Namespace, "leader-election-namespace", cfg.LeaderElection.Namespace, - "Namespace for the leader election lease (defaults to pod namespace)", - ) - fs.DurationVar( - &cfg.LeaderElection.LeaseDuration, "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, - "Duration that non-leader candidates will wait before attempting to acquire leadership", - ) - fs.DurationVar( - &cfg.LeaderElection.RenewDeadline, "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, - "Duration that the acting leader will retry refreshing leadership before giving up", - ) - fs.DurationVar( - &cfg.LeaderElection.RetryPeriod, "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, - "Duration between leader election retries", - ) - fs.BoolVar( - &cfg.LeaderElection.ReleaseOnCancel, "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, - "Release the leader lock when the manager is stopped", - ) + fs.Bool("enable-ha", cfg.EnableHA, + "Enable high-availability mode with leader election") + fs.String("leader-election-id", cfg.LeaderElection.LockName, + "Name of the lease resource for leader election") + fs.String("leader-election-namespace", cfg.LeaderElection.Namespace, + "Namespace for the leader election lease (defaults to pod namespace)") + fs.Duration("leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, + "Duration that non-leader candidates will wait before attempting to acquire leadership") + fs.Duration("leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, + "Duration that the acting leader will retry refreshing leadership before giving up") + fs.Duration("leader-election-retry-period", cfg.LeaderElection.RetryPeriod, + "Duration between leader election retries") + fs.Bool("leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, + "Release the leader lock when the manager is stopped") // Webhook - fs.StringVar( - &cfg.WebhookURL, "webhook-url", cfg.WebhookURL, - "URL to send notification instead of triggering reload", - ) - - // Filtering - resources (use StringVar not StringSliceVar for simpler parsing) - fs.StringVar( - &fv.ignoredResources, "resources-to-ignore", "", - "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')", - ) - fs.StringVar( - &fv.ignoredWorkloads, "ignored-workload-types", "", - "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)", - ) - fs.StringVar( - &fv.ignoredNamespaces, "namespaces-to-ignore", "", - "Comma-separated list of namespaces to ignore", - ) + fs.String("webhook-url", cfg.WebhookURL, + "URL to send notification instead of triggering reload") + + // Filtering - resources + fs.String("resources-to-ignore", "", + "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')") + fs.String("ignored-workload-types", "", + "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") + fs.String("namespaces-to-ignore", "", + "Comma-separated list of namespaces to ignore") // Filtering - selectors - fs.StringVar( - &fv.namespaceSelectors, "namespace-selector", "", - "Comma-separated list of namespace label selectors", - ) - fs.StringVar( - &fv.resourceSelectors, "resource-label-selector", "", - "Comma-separated list of resource label selectors", - ) + fs.String("namespace-selector", "", + "Comma-separated list of namespace label selectors") + fs.String("resource-label-selector", "", + "Comma-separated list of resource label selectors") // Logging - fs.StringVar( - &cfg.LogFormat, "log-format", cfg.LogFormat, - "Log format: 'json' or empty for default", - ) - fs.StringVar( - &cfg.LogLevel, "log-level", cfg.LogLevel, - "Log level: trace, debug, info, warning, error, fatal, panic", - ) + fs.String("log-format", cfg.LogFormat, + "Log format: 'json' or empty for default") + fs.String("log-level", cfg.LogLevel, + "Log level: trace, debug, info, warning, error, fatal, panic") // Metrics - fs.StringVar( - &cfg.MetricsAddr, "metrics-addr", cfg.MetricsAddr, - "Address to serve metrics on", - ) + fs.String("metrics-addr", cfg.MetricsAddr, + "Address to serve metrics on") // Health probes - fs.StringVar( - &cfg.HealthAddr, "health-addr", cfg.HealthAddr, - "Address to serve health probes on", - ) + fs.String("health-addr", cfg.HealthAddr, + "Address to serve health probes on") // Profiling - fs.BoolVar( - &cfg.EnablePProf, "enable-pprof", cfg.EnablePProf, - "Enable pprof profiling server", - ) - fs.StringVar( - &cfg.PProfAddr, "pprof-addr", cfg.PProfAddr, - "Address for pprof server", - ) + fs.Bool("enable-pprof", cfg.EnablePProf, + "Enable pprof profiling server") + fs.String("pprof-addr", cfg.PProfAddr, + "Address for pprof server") // Annotation customization (flag names match v1 for backward compatibility) - fs.StringVar( - &cfg.Annotations.Auto, "auto-annotation", cfg.Annotations.Auto, - "Annotation to detect changes in secrets/configmaps", - ) - fs.StringVar( - &cfg.Annotations.ConfigmapAuto, "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, - "Annotation to detect changes in configmaps", - ) - fs.StringVar( - &cfg.Annotations.SecretAuto, "secret-auto-annotation", cfg.Annotations.SecretAuto, - "Annotation to detect changes in secrets", - ) - fs.StringVar( - &cfg.Annotations.ConfigmapReload, "configmap-annotation", cfg.Annotations.ConfigmapReload, - "Annotation to detect changes in configmaps, specified by name", - ) - fs.StringVar( - &cfg.Annotations.SecretReload, "secret-annotation", cfg.Annotations.SecretReload, - "Annotation to detect changes in secrets, specified by name", - ) - fs.StringVar( - &cfg.Annotations.Search, "auto-search-annotation", cfg.Annotations.Search, - "Annotation to detect changes in configmaps or secrets tagged with special match annotation", - ) - fs.StringVar( - &cfg.Annotations.Match, "search-match-annotation", cfg.Annotations.Match, - "Annotation to mark secrets or configmaps to match the search", - ) - fs.StringVar( - &cfg.Annotations.PausePeriod, "pause-deployment-annotation", cfg.Annotations.PausePeriod, - "Annotation to define the time period to pause a deployment after a configmap/secret change", - ) - fs.StringVar( - &cfg.Annotations.PausedAt, "pause-deployment-time-annotation", cfg.Annotations.PausedAt, - "Annotation to indicate when a deployment was paused by Reloader", - ) + fs.String("auto-annotation", cfg.Annotations.Auto, + "Annotation to detect changes in secrets/configmaps") + fs.String("configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, + "Annotation to detect changes in configmaps") + fs.String("secret-auto-annotation", cfg.Annotations.SecretAuto, + "Annotation to detect changes in secrets") + fs.String("configmap-annotation", cfg.Annotations.ConfigmapReload, + "Annotation to detect changes in configmaps, specified by name") + fs.String("secret-annotation", cfg.Annotations.SecretReload, + "Annotation to detect changes in secrets, specified by name") + fs.String("auto-search-annotation", cfg.Annotations.Search, + "Annotation to detect changes in configmaps or secrets tagged with special match annotation") + fs.String("search-match-annotation", cfg.Annotations.Match, + "Annotation to mark secrets or configmaps to match the search") + fs.String("pause-deployment-annotation", cfg.Annotations.PausePeriod, + "Annotation to define the time period to pause a deployment after a configmap/secret change") + fs.String("pause-deployment-time-annotation", cfg.Annotations.PausedAt, + "Annotation to indicate when a deployment was paused by Reloader") // Watched namespace (for single-namespace mode) - fs.StringVar( - &cfg.WatchedNamespace, "watch-namespace", cfg.WatchedNamespace, - "Namespace to watch (empty for all namespaces)", - ) + fs.String("watch-namespace", cfg.WatchedNamespace, + "Namespace to watch (empty for all namespaces)") + + // Alerting + fs.Bool("alert-on-reload", cfg.Alerting.Enabled, + "Enable sending alerts when resources are reloaded") + fs.String("alert-webhook-url", cfg.Alerting.WebhookURL, + "Webhook URL to send alerts to") + fs.String("alert-sink", cfg.Alerting.Sink, + "Alert sink type: 'slack', 'teams', 'gchat', or 'raw' (default)") + fs.String("alert-proxy", cfg.Alerting.Proxy, + "Proxy URL for alert webhook requests") + fs.String("alert-additional-info", cfg.Alerting.Additional, + "Additional info to include in alerts (e.g., cluster name)") + fs.Bool("alert-structured", cfg.Alerting.Structured, + "For raw sink: send structured JSON instead of plain text") + + // Bind pflags to viper + _ = v.BindPFlags(fs) + + // Bind legacy env var names that don't match the automatic conversion + // (flag "alert-proxy" -> env "ALERT_PROXY", but legacy is "ALERT_WEBHOOK_PROXY") + _ = v.BindEnv("alert-proxy", "ALERT_PROXY", "ALERT_WEBHOOK_PROXY") } -// ApplyFlags applies flag values that need post-processing. +// ApplyFlags applies flag values from viper to the config struct. // Call this after parsing flags. func ApplyFlags(cfg *Config) error { - // Parse boolean string flags - cfg.ArgoRolloutsEnabled = parseBoolString(fv.isArgoRollouts) - cfg.ReloadOnCreate = parseBoolString(fv.reloadOnCreate) - cfg.ReloadOnDelete = parseBoolString(fv.reloadOnDelete) + // Boolean flags + cfg.AutoReloadAll = v.GetBool("auto-reload-all") + cfg.SyncAfterRestart = v.GetBool("sync-after-restart") + cfg.EnableHA = v.GetBool("enable-ha") + cfg.EnablePProf = v.GetBool("enable-pprof") + + // Boolean string flags (legacy format: "true"/"false" strings) + cfg.ArgoRolloutsEnabled = parseBoolString(v.GetString("is-Argo-Rollouts")) + cfg.ReloadOnCreate = parseBoolString(v.GetString("reload-on-create")) + cfg.ReloadOnDelete = parseBoolString(v.GetString("reload-on-delete")) + + // String flags + cfg.ReloadStrategy = ReloadStrategy(v.GetString("reload-strategy")) + cfg.WebhookURL = v.GetString("webhook-url") + cfg.LogFormat = v.GetString("log-format") + cfg.LogLevel = v.GetString("log-level") + cfg.MetricsAddr = v.GetString("metrics-addr") + cfg.HealthAddr = v.GetString("health-addr") + cfg.PProfAddr = v.GetString("pprof-addr") + cfg.WatchedNamespace = v.GetString("watch-namespace") + + // Leader election + cfg.LeaderElection.LockName = v.GetString("leader-election-id") + cfg.LeaderElection.Namespace = v.GetString("leader-election-namespace") + cfg.LeaderElection.LeaseDuration = v.GetDuration("leader-election-lease-duration") + cfg.LeaderElection.RenewDeadline = v.GetDuration("leader-election-renew-deadline") + cfg.LeaderElection.RetryPeriod = v.GetDuration("leader-election-retry-period") + cfg.LeaderElection.ReleaseOnCancel = v.GetBool("leader-election-release-on-cancel") + + // Annotations + cfg.Annotations.Auto = v.GetString("auto-annotation") + cfg.Annotations.ConfigmapAuto = v.GetString("configmap-auto-annotation") + cfg.Annotations.SecretAuto = v.GetString("secret-auto-annotation") + cfg.Annotations.ConfigmapReload = v.GetString("configmap-annotation") + cfg.Annotations.SecretReload = v.GetString("secret-annotation") + cfg.Annotations.Search = v.GetString("auto-search-annotation") + cfg.Annotations.Match = v.GetString("search-match-annotation") + cfg.Annotations.PausePeriod = v.GetString("pause-deployment-annotation") + cfg.Annotations.PausedAt = v.GetString("pause-deployment-time-annotation") + + // Alerting + cfg.Alerting.Enabled = v.GetBool("alert-on-reload") + cfg.Alerting.WebhookURL = v.GetString("alert-webhook-url") + cfg.Alerting.Sink = strings.ToLower(v.GetString("alert-sink")) + cfg.Alerting.Proxy = v.GetString("alert-proxy") + cfg.Alerting.Additional = v.GetString("alert-additional-info") + cfg.Alerting.Structured = v.GetBool("alert-structured") + + // Special case: if webhook URL is set, auto-enable alerting + if cfg.Alerting.WebhookURL != "" { + cfg.Alerting.Enabled = true + } // Parse comma-separated lists - cfg.IgnoredResources = splitAndTrim(fv.ignoredResources) - cfg.IgnoredWorkloads = splitAndTrim(fv.ignoredWorkloads) - cfg.IgnoredNamespaces = splitAndTrim(fv.ignoredNamespaces) + cfg.IgnoredResources = splitAndTrim(v.GetString("resources-to-ignore")) + cfg.IgnoredWorkloads = splitAndTrim(v.GetString("ignored-workload-types")) + cfg.IgnoredNamespaces = splitAndTrim(v.GetString("namespaces-to-ignore")) // Store raw selector strings - cfg.NamespaceSelectorStrings = splitAndTrim(fv.namespaceSelectors) - cfg.ResourceSelectorStrings = splitAndTrim(fv.resourceSelectors) + cfg.NamespaceSelectorStrings = splitAndTrim(v.GetString("namespace-selector")) + cfg.ResourceSelectorStrings = splitAndTrim(v.GetString("resource-label-selector")) // Parse selectors into labels.Selector var err error @@ -223,9 +219,25 @@ func ApplyFlags(cfg *Config) error { return err } + // Ensure duration defaults are preserved if not set + if cfg.LeaderElection.LeaseDuration == 0 { + cfg.LeaderElection.LeaseDuration = 15 * time.Second + } + if cfg.LeaderElection.RenewDeadline == 0 { + cfg.LeaderElection.RenewDeadline = 10 * time.Second + } + if cfg.LeaderElection.RetryPeriod == 0 { + cfg.LeaderElection.RetryPeriod = 2 * time.Second + } + return nil } +// GetViper returns the viper instance for testing or advanced configuration. +func GetViper() *viper.Viper { + return v +} + // parseBoolString parses a string as a boolean, defaulting to false. func parseBoolString(s string) bool { s = strings.ToLower(strings.TrimSpace(s)) diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go index 06638d6bb..76b088c50 100644 --- a/internal/pkg/config/flags_test.go +++ b/internal/pkg/config/flags_test.go @@ -1,12 +1,23 @@ package config import ( + "os" + "strings" "testing" "github.com/spf13/pflag" + "github.com/spf13/viper" ) +// resetViper resets the viper instance for testing. +func resetViper() { + v = viper.New() + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() +} + func TestBindFlags(t *testing.T) { + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) @@ -48,6 +59,12 @@ func TestBindFlags(t *testing.T) { "pause-deployment-annotation", "pause-deployment-time-annotation", "watch-namespace", + "alert-on-reload", + "alert-webhook-url", + "alert-sink", + "alert-proxy", + "alert-additional-info", + "alert-structured", } for _, flagName := range expectedFlags { @@ -58,6 +75,7 @@ func TestBindFlags(t *testing.T) { } func TestBindFlags_DefaultValues(t *testing.T) { + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) @@ -67,6 +85,10 @@ func TestBindFlags_DefaultValues(t *testing.T) { t.Fatalf("Parse() error = %v", err) } + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + if cfg.ReloadStrategy != ReloadStrategyEnvVars { t.Errorf("ReloadStrategy = %v, want %v", cfg.ReloadStrategy, ReloadStrategyEnvVars) } @@ -77,6 +99,7 @@ func TestBindFlags_DefaultValues(t *testing.T) { } func TestBindFlags_CustomValues(t *testing.T) { + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) @@ -96,6 +119,10 @@ func TestBindFlags_CustomValues(t *testing.T) { t.Fatalf("Parse() error = %v", err) } + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + if !cfg.AutoReloadAll { t.Error("AutoReloadAll should be true") } @@ -143,35 +170,31 @@ func TestApplyFlags_BooleanStrings(t *testing.T) { } for _, tt := range tests { - t.Run( - tt.name, func(t *testing.T) { - fv = flagValues{} - - cfg := NewDefault() - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - BindFlags(fs, cfg) - - if err := fs.Parse(tt.args); err != nil { - t.Fatalf("Parse() error = %v", err) - } - - err := ApplyFlags(cfg) - if (err != nil) != tt.wantErr { - t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if cfg.ArgoRolloutsEnabled != tt.want { - t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) - } - }, - ) + t.Run(tt.name, func(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if cfg.ArgoRolloutsEnabled != tt.want { + t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) + } + }) } } func TestApplyFlags_CommaSeparatedLists(t *testing.T) { - fv = flagValues{} - + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) BindFlags(fs, cfg) @@ -201,15 +224,13 @@ func TestApplyFlags_CommaSeparatedLists(t *testing.T) { t.Errorf("IgnoredWorkloads length = %d, want 2", len(cfg.IgnoredWorkloads)) } - // Check ignored namespaces if len(cfg.IgnoredNamespaces) != 2 { t.Errorf("IgnoredNamespaces length = %d, want 2", len(cfg.IgnoredNamespaces)) } } func TestApplyFlags_Selectors(t *testing.T) { - fv = flagValues{} - + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) BindFlags(fs, cfg) @@ -241,8 +262,7 @@ func TestApplyFlags_Selectors(t *testing.T) { } func TestApplyFlags_InvalidSelector(t *testing.T) { - fv = flagValues{} - + resetViper() cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) BindFlags(fs, cfg) @@ -261,6 +281,100 @@ func TestApplyFlags_InvalidSelector(t *testing.T) { } } +func TestApplyFlags_AlertingEnvVars(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + wantURL string + wantSink string + wantEnable bool + }{ + { + name: "ALERT_WEBHOOK_URL enables alerting", + envVars: map[string]string{ + "ALERT_WEBHOOK_URL": "https://hooks.example.com", + }, + wantURL: "https://hooks.example.com", + wantEnable: true, + }, + { + name: "all alert env vars", + envVars: map[string]string{ + "ALERT_WEBHOOK_URL": "https://hooks.example.com", + "ALERT_SINK": "slack", + "ALERT_WEBHOOK_PROXY": "http://proxy:8080", + }, + wantURL: "https://hooks.example.com", + wantSink: "slack", + wantEnable: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Clear env and reset viper + for k := range tt.envVars { + os.Unsetenv(k) + } + resetViper() + + // Set env vars + for k, val := range tt.envVars { + os.Setenv(k, val) + defer os.Unsetenv(k) + } + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.Alerting.WebhookURL != tt.wantURL { + t.Errorf("Alerting.WebhookURL = %q, want %q", cfg.Alerting.WebhookURL, tt.wantURL) + } + + if tt.wantSink != "" && cfg.Alerting.Sink != tt.wantSink { + t.Errorf("Alerting.Sink = %q, want %q", cfg.Alerting.Sink, tt.wantSink) + } + + if cfg.Alerting.Enabled != tt.wantEnable { + t.Errorf("Alerting.Enabled = %v, want %v", cfg.Alerting.Enabled, tt.wantEnable) + } + }) + } +} + +func TestApplyFlags_LegacyProxyEnvVar(t *testing.T) { + resetViper() + + // Set legacy env var + os.Setenv("ALERT_WEBHOOK_PROXY", "http://legacy-proxy:8080") + defer os.Unsetenv("ALERT_WEBHOOK_PROXY") + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.Alerting.Proxy != "http://legacy-proxy:8080" { + t.Errorf("Alerting.Proxy = %q, want %q", cfg.Alerting.Proxy, "http://legacy-proxy:8080") + } +} + func TestParseBoolString(t *testing.T) { tests := []struct { input string @@ -282,14 +396,12 @@ func TestParseBoolString(t *testing.T) { } for _, tt := range tests { - t.Run( - tt.input, func(t *testing.T) { - got := parseBoolString(tt.input) - if got != tt.want { - t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) - } - }, - ) + t.Run(tt.input, func(t *testing.T) { + got := parseBoolString(tt.input) + if got != tt.want { + t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) + } + }) } } @@ -308,19 +420,17 @@ func TestSplitAndTrim(t *testing.T) { } for _, tt := range tests { - t.Run( - tt.name, func(t *testing.T) { - got := splitAndTrim(tt.input) - if len(got) != len(tt.want) { - t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) - return - } - for i := range got { - if got[i] != tt.want[i] { - t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) - } + t.Run(tt.name, func(t *testing.T) { + got := splitAndTrim(tt.input) + if len(got) != len(tt.want) { + t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) } - }, - ) + } + }) } } From 4db9e5974e7b902b93225ca7b3219bcd21bc6718 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 11:38:25 +0100 Subject: [PATCH 26/35] fix: Linting issues --- internal/pkg/config/flags_test.go | 155 +++++++++++++++--------------- 1 file changed, 77 insertions(+), 78 deletions(-) diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go index 76b088c50..0bdb86083 100644 --- a/internal/pkg/config/flags_test.go +++ b/internal/pkg/config/flags_test.go @@ -1,7 +1,6 @@ package config import ( - "os" "strings" "testing" @@ -170,26 +169,28 @@ func TestApplyFlags_BooleanStrings(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resetViper() - cfg := NewDefault() - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - BindFlags(fs, cfg) - - if err := fs.Parse(tt.args); err != nil { - t.Fatalf("Parse() error = %v", err) - } - - err := ApplyFlags(cfg) - if (err != nil) != tt.wantErr { - t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if cfg.ArgoRolloutsEnabled != tt.want { - t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) - } - }) + t.Run( + tt.name, func(t *testing.T) { + resetViper() + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + err := ApplyFlags(cfg) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyFlags() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if cfg.ArgoRolloutsEnabled != tt.want { + t.Errorf("ArgoRolloutsEnabled = %v, want %v", cfg.ArgoRolloutsEnabled, tt.want) + } + }, + ) } } @@ -311,52 +312,46 @@ func TestApplyFlags_AlertingEnvVars(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Clear env and reset viper - for k := range tt.envVars { - os.Unsetenv(k) - } - resetViper() - - // Set env vars - for k, val := range tt.envVars { - os.Setenv(k, val) - defer os.Unsetenv(k) - } - - cfg := NewDefault() - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - BindFlags(fs, cfg) - - if err := fs.Parse([]string{}); err != nil { - t.Fatalf("Parse() error = %v", err) - } - - if err := ApplyFlags(cfg); err != nil { - t.Fatalf("ApplyFlags() error = %v", err) - } - - if cfg.Alerting.WebhookURL != tt.wantURL { - t.Errorf("Alerting.WebhookURL = %q, want %q", cfg.Alerting.WebhookURL, tt.wantURL) - } - - if tt.wantSink != "" && cfg.Alerting.Sink != tt.wantSink { - t.Errorf("Alerting.Sink = %q, want %q", cfg.Alerting.Sink, tt.wantSink) - } - - if cfg.Alerting.Enabled != tt.wantEnable { - t.Errorf("Alerting.Enabled = %v, want %v", cfg.Alerting.Enabled, tt.wantEnable) - } - }) + t.Run( + tt.name, func(t *testing.T) { + resetViper() + + for k, val := range tt.envVars { + t.Setenv(k, val) + } + + cfg := NewDefault() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + BindFlags(fs, cfg) + + if err := fs.Parse([]string{}); err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if err := ApplyFlags(cfg); err != nil { + t.Fatalf("ApplyFlags() error = %v", err) + } + + if cfg.Alerting.WebhookURL != tt.wantURL { + t.Errorf("Alerting.WebhookURL = %q, want %q", cfg.Alerting.WebhookURL, tt.wantURL) + } + + if tt.wantSink != "" && cfg.Alerting.Sink != tt.wantSink { + t.Errorf("Alerting.Sink = %q, want %q", cfg.Alerting.Sink, tt.wantSink) + } + + if cfg.Alerting.Enabled != tt.wantEnable { + t.Errorf("Alerting.Enabled = %v, want %v", cfg.Alerting.Enabled, tt.wantEnable) + } + }, + ) } } func TestApplyFlags_LegacyProxyEnvVar(t *testing.T) { resetViper() - // Set legacy env var - os.Setenv("ALERT_WEBHOOK_PROXY", "http://legacy-proxy:8080") - defer os.Unsetenv("ALERT_WEBHOOK_PROXY") + t.Setenv("ALERT_WEBHOOK_PROXY", "http://legacy-proxy:8080") cfg := NewDefault() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) @@ -396,12 +391,14 @@ func TestParseBoolString(t *testing.T) { } for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - got := parseBoolString(tt.input) - if got != tt.want { - t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) - } - }) + t.Run( + tt.input, func(t *testing.T) { + got := parseBoolString(tt.input) + if got != tt.want { + t.Errorf("parseBoolString(%q) = %v, want %v", tt.input, got, tt.want) + } + }, + ) } } @@ -420,17 +417,19 @@ func TestSplitAndTrim(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := splitAndTrim(tt.input) - if len(got) != len(tt.want) { - t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) - return - } - for i := range got { - if got[i] != tt.want[i] { - t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + t.Run( + tt.name, func(t *testing.T) { + got := splitAndTrim(tt.input) + if len(got) != len(tt.want) { + t.Errorf("splitAndTrim(%q) length = %d, want %d", tt.input, len(got), len(tt.want)) + return + } + for i := range got { + if got[i] != tt.want[i] { + t.Errorf("splitAndTrim(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i]) + } } - } - }) + }, + ) } } From 8bce8e9b38afc78319f683363a2e6e7a32c3056f Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 14:29:45 +0100 Subject: [PATCH 27/35] chore: Cleanup code --- cmd/reloader/main.go | 13 -- internal/pkg/config/flags.go | 251 +++++++++++++++++++----------- internal/pkg/config/validation.go | 16 +- 3 files changed, 170 insertions(+), 110 deletions(-) diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go index 1d8c76ec0..712a9d5ad 100644 --- a/cmd/reloader/main.go +++ b/cmd/reloader/main.go @@ -51,17 +51,14 @@ func newReloaderCommand() *cobra.Command { } func run(cmd *cobra.Command, args []string) error { - // Apply post-parse flag processing if err := config.ApplyFlags(cfg); err != nil { return fmt.Errorf("applying flags: %w", err) } - // Validate the configuration if err := cfg.Validate(); err != nil { return fmt.Errorf("validating config: %w", err) } - // Validate HA environment variables if cfg.EnableHA { if err := validateHAEnvs(); err != nil { return err @@ -72,7 +69,6 @@ func run(cmd *cobra.Command, args []string) error { } } - // Configure logging log, err := configureLogging(cfg.LogFormat, cfg.LogLevel) if err != nil { return fmt.Errorf("configuring logging: %w", err) @@ -82,7 +78,6 @@ func run(cmd *cobra.Command, args []string) error { log.Info("Starting Reloader") - // Log configuration if ns := os.Getenv("KUBERNETES_NAMESPACE"); ns == "" { log.Info("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces") } @@ -107,10 +102,8 @@ func run(cmd *cobra.Command, args []string) error { ) } - // Setup Prometheus metrics collectors := metrics.SetupPrometheusEndpoint() - // Create the controller-runtime manager mgr, err := controller.NewManager( controller.ManagerOptions{ Config: cfg, @@ -122,23 +115,19 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("creating manager: %w", err) } - // Setup all reconcilers if err := controller.SetupReconcilers(mgr, cfg, log, &collectors); err != nil { return fmt.Errorf("setting up reconcilers: %w", err) } - // Create metadata ConfigMap if err := metadata.CreateOrUpdate(mgr.GetClient(), cfg, log); err != nil { log.Error(err, "Failed to create metadata ConfigMap") // Non-fatal, continue starting } - // Start pprof server if enabled if cfg.EnablePProf { go startPProfServer(log) } - // Setup signal handling for graceful shutdown ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -150,7 +139,6 @@ func run(cmd *cobra.Command, args []string) error { cancel() }() - // Start the manager log.Info("Starting controller manager") if err := controller.RunManager(ctx, mgr, log); err != nil { return fmt.Errorf("manager exited with error: %w", err) @@ -178,7 +166,6 @@ func configureLogging(logFormat, logLevel string) (logr.Logger, error) { return logr.Logger{}, fmt.Errorf("unsupported log level: %q", logLevel) } - // Configure output format var zl zerolog.Logger switch logFormat { case "json": diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index e37af2099..5de13cf37 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -22,118 +22,200 @@ func init() { // Call this before parsing flags, then call ApplyFlags after parsing. func BindFlags(fs *pflag.FlagSet, cfg *Config) { // Auto reload - fs.Bool("auto-reload-all", cfg.AutoReloadAll, - "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations") + fs.Bool( + "auto-reload-all", cfg.AutoReloadAll, + "Automatically reload all resources when their configmaps/secrets are updated, without requiring annotations", + ) // Reload strategy - fs.String("reload-strategy", string(cfg.ReloadStrategy), - "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'") + fs.String( + "reload-strategy", string(cfg.ReloadStrategy), + "Strategy for triggering workload restart: 'env-vars' (default, GitOps friendly) or 'annotations'", + ) // Argo Rollouts - fs.String("is-Argo-Rollouts", "false", - "Enable Argo Rollouts support (true/false)") + fs.String( + "is-Argo-Rollouts", "false", + "Enable Argo Rollouts support (true/false)", + ) // Event watching - fs.String("reload-on-create", "false", - "Reload when configmaps/secrets are created (true/false)") - fs.String("reload-on-delete", "false", - "Reload when configmaps/secrets are deleted (true/false)") + fs.String( + "reload-on-create", "false", + "Reload when configmaps/secrets are created (true/false)", + ) + fs.String( + "reload-on-delete", "false", + "Reload when configmaps/secrets are deleted (true/false)", + ) // Sync after restart - fs.Bool("sync-after-restart", cfg.SyncAfterRestart, - "Trigger sync operation after restart") + fs.Bool( + "sync-after-restart", cfg.SyncAfterRestart, + "Trigger sync operation after restart", + ) // High availability / Leader election - fs.Bool("enable-ha", cfg.EnableHA, - "Enable high-availability mode with leader election") - fs.String("leader-election-id", cfg.LeaderElection.LockName, - "Name of the lease resource for leader election") - fs.String("leader-election-namespace", cfg.LeaderElection.Namespace, - "Namespace for the leader election lease (defaults to pod namespace)") - fs.Duration("leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, - "Duration that non-leader candidates will wait before attempting to acquire leadership") - fs.Duration("leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, - "Duration that the acting leader will retry refreshing leadership before giving up") - fs.Duration("leader-election-retry-period", cfg.LeaderElection.RetryPeriod, - "Duration between leader election retries") - fs.Bool("leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, - "Release the leader lock when the manager is stopped") + fs.Bool( + "enable-ha", cfg.EnableHA, + "Enable high-availability mode with leader election", + ) + fs.String( + "leader-election-id", cfg.LeaderElection.LockName, + "Name of the lease resource for leader election", + ) + fs.String( + "leader-election-namespace", cfg.LeaderElection.Namespace, + "Namespace for the leader election lease (defaults to pod namespace)", + ) + fs.Duration( + "leader-election-lease-duration", cfg.LeaderElection.LeaseDuration, + "Duration that non-leader candidates will wait before attempting to acquire leadership", + ) + fs.Duration( + "leader-election-renew-deadline", cfg.LeaderElection.RenewDeadline, + "Duration that the acting leader will retry refreshing leadership before giving up", + ) + fs.Duration( + "leader-election-retry-period", cfg.LeaderElection.RetryPeriod, + "Duration between leader election retries", + ) + fs.Bool( + "leader-election-release-on-cancel", cfg.LeaderElection.ReleaseOnCancel, + "Release the leader lock when the manager is stopped", + ) // Webhook - fs.String("webhook-url", cfg.WebhookURL, - "URL to send notification instead of triggering reload") + fs.String( + "webhook-url", cfg.WebhookURL, + "URL to send notification instead of triggering reload", + ) // Filtering - resources - fs.String("resources-to-ignore", "", - "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')") - fs.String("ignored-workload-types", "", - "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)") - fs.String("namespaces-to-ignore", "", - "Comma-separated list of namespaces to ignore") + fs.String( + "resources-to-ignore", "", + "Comma-separated list of resources to ignore (valid options: 'configMaps' or 'secrets')", + ) + fs.String( + "ignored-workload-types", "", + "Comma-separated list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)", + ) + fs.String( + "namespaces-to-ignore", "", + "Comma-separated list of namespaces to ignore", + ) // Filtering - selectors - fs.String("namespace-selector", "", - "Comma-separated list of namespace label selectors") - fs.String("resource-label-selector", "", - "Comma-separated list of resource label selectors") + fs.String( + "namespace-selector", "", + "Comma-separated list of namespace label selectors", + ) + fs.String( + "resource-label-selector", "", + "Comma-separated list of resource label selectors", + ) // Logging - fs.String("log-format", cfg.LogFormat, - "Log format: 'json' or empty for default") - fs.String("log-level", cfg.LogLevel, - "Log level: trace, debug, info, warning, error, fatal, panic") + fs.String( + "log-format", cfg.LogFormat, + "Log format: 'json' or empty for default", + ) + fs.String( + "log-level", cfg.LogLevel, + "Log level: trace, debug, info, warning, error, fatal, panic", + ) // Metrics - fs.String("metrics-addr", cfg.MetricsAddr, - "Address to serve metrics on") + fs.String( + "metrics-addr", cfg.MetricsAddr, + "Address to serve metrics on", + ) // Health probes - fs.String("health-addr", cfg.HealthAddr, - "Address to serve health probes on") + fs.String( + "health-addr", cfg.HealthAddr, + "Address to serve health probes on", + ) // Profiling - fs.Bool("enable-pprof", cfg.EnablePProf, - "Enable pprof profiling server") - fs.String("pprof-addr", cfg.PProfAddr, - "Address for pprof server") + fs.Bool( + "enable-pprof", cfg.EnablePProf, + "Enable pprof profiling server", + ) + fs.String( + "pprof-addr", cfg.PProfAddr, + "Address for pprof server", + ) // Annotation customization (flag names match v1 for backward compatibility) - fs.String("auto-annotation", cfg.Annotations.Auto, - "Annotation to detect changes in secrets/configmaps") - fs.String("configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, - "Annotation to detect changes in configmaps") - fs.String("secret-auto-annotation", cfg.Annotations.SecretAuto, - "Annotation to detect changes in secrets") - fs.String("configmap-annotation", cfg.Annotations.ConfigmapReload, - "Annotation to detect changes in configmaps, specified by name") - fs.String("secret-annotation", cfg.Annotations.SecretReload, - "Annotation to detect changes in secrets, specified by name") - fs.String("auto-search-annotation", cfg.Annotations.Search, - "Annotation to detect changes in configmaps or secrets tagged with special match annotation") - fs.String("search-match-annotation", cfg.Annotations.Match, - "Annotation to mark secrets or configmaps to match the search") - fs.String("pause-deployment-annotation", cfg.Annotations.PausePeriod, - "Annotation to define the time period to pause a deployment after a configmap/secret change") - fs.String("pause-deployment-time-annotation", cfg.Annotations.PausedAt, - "Annotation to indicate when a deployment was paused by Reloader") + fs.String( + "auto-annotation", cfg.Annotations.Auto, + "Annotation to detect changes in secrets/configmaps", + ) + fs.String( + "configmap-auto-annotation", cfg.Annotations.ConfigmapAuto, + "Annotation to detect changes in configmaps", + ) + fs.String( + "secret-auto-annotation", cfg.Annotations.SecretAuto, + "Annotation to detect changes in secrets", + ) + fs.String( + "configmap-annotation", cfg.Annotations.ConfigmapReload, + "Annotation to detect changes in configmaps, specified by name", + ) + fs.String( + "secret-annotation", cfg.Annotations.SecretReload, + "Annotation to detect changes in secrets, specified by name", + ) + fs.String( + "auto-search-annotation", cfg.Annotations.Search, + "Annotation to detect changes in configmaps or secrets tagged with special match annotation", + ) + fs.String( + "search-match-annotation", cfg.Annotations.Match, + "Annotation to mark secrets or configmaps to match the search", + ) + fs.String( + "pause-deployment-annotation", cfg.Annotations.PausePeriod, + "Annotation to define the time period to pause a deployment after a configmap/secret change", + ) + fs.String( + "pause-deployment-time-annotation", cfg.Annotations.PausedAt, + "Annotation to indicate when a deployment was paused by Reloader", + ) // Watched namespace (for single-namespace mode) - fs.String("watch-namespace", cfg.WatchedNamespace, - "Namespace to watch (empty for all namespaces)") + fs.String( + "watch-namespace", cfg.WatchedNamespace, + "Namespace to watch (empty for all namespaces)", + ) // Alerting - fs.Bool("alert-on-reload", cfg.Alerting.Enabled, - "Enable sending alerts when resources are reloaded") - fs.String("alert-webhook-url", cfg.Alerting.WebhookURL, - "Webhook URL to send alerts to") - fs.String("alert-sink", cfg.Alerting.Sink, - "Alert sink type: 'slack', 'teams', 'gchat', or 'raw' (default)") - fs.String("alert-proxy", cfg.Alerting.Proxy, - "Proxy URL for alert webhook requests") - fs.String("alert-additional-info", cfg.Alerting.Additional, - "Additional info to include in alerts (e.g., cluster name)") - fs.Bool("alert-structured", cfg.Alerting.Structured, - "For raw sink: send structured JSON instead of plain text") + fs.Bool( + "alert-on-reload", cfg.Alerting.Enabled, + "Enable sending alerts when resources are reloaded", + ) + fs.String( + "alert-webhook-url", cfg.Alerting.WebhookURL, + "Webhook URL to send alerts to", + ) + fs.String( + "alert-sink", cfg.Alerting.Sink, + "Alert sink type: 'slack', 'teams', 'gchat', or 'raw' (default)", + ) + fs.String( + "alert-proxy", cfg.Alerting.Proxy, + "Proxy URL for alert webhook requests", + ) + fs.String( + "alert-additional-info", cfg.Alerting.Additional, + "Additional info to include in alerts (e.g., cluster name)", + ) + fs.Bool( + "alert-structured", cfg.Alerting.Structured, + "For raw sink: send structured JSON instead of plain text", + ) // Bind pflags to viper _ = v.BindPFlags(fs) @@ -233,11 +315,6 @@ func ApplyFlags(cfg *Config) error { return nil } -// GetViper returns the viper instance for testing or advanced configuration. -func GetViper() *viper.Viper { - return v -} - // parseBoolString parses a string as a boolean, defaulting to false. func parseBoolString(s string) bool { s = strings.ToLower(strings.TrimSpace(s)) diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 85aa0c770..7d559fc6d 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -42,7 +42,6 @@ func (e ValidationErrors) Error() string { func (c *Config) Validate() error { var errs ValidationErrors - // Validate ReloadStrategy switch c.ReloadStrategy { case ReloadStrategyEnvVars, ReloadStrategyAnnotations: // valid @@ -57,7 +56,6 @@ func (c *Config) Validate() error { ) } - // Validate ArgoRolloutStrategy switch c.ArgoRolloutStrategy { case ArgoRolloutStrategyRestart, ArgoRolloutStrategyRollout: // valid @@ -74,7 +72,6 @@ func (c *Config) Validate() error { ) } - // Validate LogLevel switch strings.ToLower(c.LogLevel) { case "trace", "debug", "info", "warn", "warning", "error", "fatal", "panic", "": // valid @@ -87,7 +84,6 @@ func (c *Config) Validate() error { ) } - // Validate LogFormat switch strings.ToLower(c.LogFormat) { case "json", "": // valid @@ -100,17 +96,17 @@ func (c *Config) Validate() error { ) } - // Normalize IgnoredResources to lowercase for consistent comparison c.IgnoredResources = normalizeToLower(c.IgnoredResources) - // Validate and normalize IgnoredWorkloads c.IgnoredWorkloads = normalizeToLower(c.IgnoredWorkloads) for _, w := range c.IgnoredWorkloads { if _, err := workload.KindFromString(w); err != nil { - errs = append(errs, ValidationError{ - Field: "IgnoredWorkloads", - Message: fmt.Sprintf("unknown workload type %q", w), - }) + errs = append( + errs, ValidationError{ + Field: "IgnoredWorkloads", + Message: fmt.Sprintf("unknown workload type %q", w), + }, + ) } } From 9f331cac4f76cb275557e369c9ee8c5309a99d5c Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 14:52:44 +0100 Subject: [PATCH 28/35] refactor(reloader): replace CreateOrUpdate with Runnable for metadata publishing and update RBAC to include watch permission --- cmd/reloader/main.go | 4 +-- .../kubernetes/chart/reloader/Chart.yaml | 2 +- .../chart/reloader/templates/clusterrole.yaml | 1 + .../chart/reloader/templates/role.yaml | 1 + internal/pkg/metadata/publisher.go | 33 ++++++++++++++----- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go index 712a9d5ad..f89a3375a 100644 --- a/cmd/reloader/main.go +++ b/cmd/reloader/main.go @@ -119,8 +119,8 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("setting up reconcilers: %w", err) } - if err := metadata.CreateOrUpdate(mgr.GetClient(), cfg, log); err != nil { - log.Error(err, "Failed to create metadata ConfigMap") + if err := mgr.Add(metadata.Runnable(mgr.GetClient(), cfg, log)); err != nil { + log.Error(err, "Failed to add metadata publisher") // Non-fatal, continue starting } diff --git a/deployments/kubernetes/chart/reloader/Chart.yaml b/deployments/kubernetes/chart/reloader/Chart.yaml index 536fd6be5..8c4c4508f 100644 --- a/deployments/kubernetes/chart/reloader/Chart.yaml +++ b/deployments/kubernetes/chart/reloader/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: reloader description: Reloader chart that runs on kubernetes -version: 2.2.7 +version: 2.3.0 appVersion: v1.4.12 keywords: - Reloader diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml index 9f655aa91..c229c113a 100644 --- a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml +++ b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml @@ -76,6 +76,7 @@ rules: - get - update - patch + - watch {{- if .Values.reloader.ignoreCronJobs }}{{- else }} - apiGroups: - "batch" diff --git a/deployments/kubernetes/chart/reloader/templates/role.yaml b/deployments/kubernetes/chart/reloader/templates/role.yaml index 70a681571..860cf895f 100644 --- a/deployments/kubernetes/chart/reloader/templates/role.yaml +++ b/deployments/kubernetes/chart/reloader/templates/role.yaml @@ -67,6 +67,7 @@ rules: - get - update - patch + - watch - apiGroups: - "batch" resources: diff --git a/internal/pkg/metadata/publisher.go b/internal/pkg/metadata/publisher.go index 78bfa92ab..b92cc8c71 100644 --- a/internal/pkg/metadata/publisher.go +++ b/internal/pkg/metadata/publisher.go @@ -40,10 +40,12 @@ func (p *Publisher) Publish(ctx context.Context) error { configMap := metaInfo.ToConfigMap() existing := &corev1.ConfigMap{} - err := p.client.Get(ctx, client.ObjectKey{ - Name: ConfigMapName, - Namespace: namespace, - }, existing) + err := p.client.Get( + ctx, client.ObjectKey{ + Name: ConfigMapName, + Namespace: namespace, + }, existing, + ) if err != nil { if !errors.IsNotFound(err) { @@ -73,8 +75,23 @@ func PublishMetaInfoConfigMap(ctx context.Context, c client.Client, cfg *config. return publisher.Publish(ctx) } -// CreateOrUpdate creates or updates the metadata ConfigMap using the provided client. -func CreateOrUpdate(c client.Client, cfg *config.Config, log logr.Logger) error { - ctx := context.Background() - return PublishMetaInfoConfigMap(ctx, c, cfg, log) +// Runnable returns a controller-runtime Runnable that publishes the metadata ConfigMap +// when the manager starts. This ensures the cache is ready before accessing the API. +func Runnable(c client.Client, cfg *config.Config, log logr.Logger) RunnableFunc { + return func(ctx context.Context) error { + if err := PublishMetaInfoConfigMap(ctx, c, cfg, log); err != nil { + log.Error(err, "Failed to create metadata ConfigMap") + // Non-fatal, don't return error to avoid crashing the manager + } + <-ctx.Done() + return nil + } +} + +// RunnableFunc is a function that implements the controller-runtime Runnable interface. +type RunnableFunc func(context.Context) error + +// Start implements the Runnable interface. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) } From fa5f1858f14ed167d2d4ca8b1f0848c8c75178a0 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 16:43:28 +0100 Subject: [PATCH 29/35] feat: DeploymentConfig support --- cmd/reloader/main.go | 12 + go.mod | 7 +- go.sum | 11 +- internal/pkg/config/config.go | 66 +- internal/pkg/config/flags.go | 21 + internal/pkg/controller/manager.go | 7 +- internal/pkg/controller/test_helpers_test.go | 10 +- internal/pkg/openshift/detect.go | 34 + internal/pkg/testutil/fixtures.go | 208 ++- internal/pkg/testutil/testutil.go | 197 ++- internal/pkg/workload/deploymentconfig.go | 143 ++ internal/pkg/workload/interface.go | 13 +- internal/pkg/workload/lister.go | 13 + internal/pkg/workload/registry.go | 56 +- internal/pkg/workload/registry_test.go | 144 +- internal/pkg/workload/workload_test.go | 1396 +++++++----------- test/e2e/e2e_test.go | 138 +- 17 files changed, 1439 insertions(+), 1037 deletions(-) create mode 100644 internal/pkg/openshift/detect.go create mode 100644 internal/pkg/workload/deploymentconfig.go diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go index f89a3375a..da23618c1 100644 --- a/cmd/reloader/main.go +++ b/cmd/reloader/main.go @@ -14,12 +14,14 @@ import ( "github.com/go-logr/zerologr" "github.com/rs/zerolog" "github.com/spf13/cobra" + "k8s.io/client-go/discovery" controllerruntime "sigs.k8s.io/controller-runtime" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/metadata" "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/openshift" ) // Environment variable names for pod identity in HA mode. @@ -115,6 +117,16 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("creating manager: %w", err) } + if config.ShouldAutoDetectOpenShift() { + restConfig := controllerruntime.GetConfigOrDie() + discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) + if err != nil { + log.V(1).Info("Failed to create discovery client for DeploymentConfig detection", "error", err) + } else if openshift.HasDeploymentConfigSupport(discoveryClient, log) { + cfg.DeploymentConfigEnabled = true + } + } + if err := controller.SetupReconcilers(mgr, cfg, log, &collectors); err != nil { return fmt.Errorf("setting up reconcilers: %w", err) } diff --git a/go.mod b/go.mod index 39587a789..ece1cd8a8 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,14 @@ require ( github.com/argoproj/argo-rollouts v1.8.3 github.com/go-logr/logr v1.4.3 github.com/go-logr/zerologr v1.2.3 + github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a + github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 + github.com/spf13/viper v1.21.0 k8s.io/api v0.35.0 k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.35.0 @@ -47,7 +50,6 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/text v0.2.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -57,11 +59,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/procfs v0.19.2 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/viper v1.21.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect diff --git a/go.sum b/go.sum index f7e22e633..caceb32c8 100644 --- a/go.sum +++ b/go.sum @@ -8,7 +8,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -19,6 +18,8 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= @@ -116,6 +117,12 @@ github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/openshift/api v0.0.0-20251222154221-d4b2fef98af2 h1:q7fp/9fnJuXWCzpBzbHZne6aMLGYPKhzPy1uULfaJqA= +github.com/openshift/api v0.0.0-20251222154221-d4b2fef98af2/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a h1:lz22938uOBlzTHjGpobGeVWkcxGu6fDQ7oZWheClTHE= +github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8= +github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc/go.mod h1:cs9BwTu96sm2vQvy7r9rOiltgu90M6ju2qIHFG9WU+o= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -138,6 +145,8 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 583b4dca0..ead11abcd 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -26,16 +26,17 @@ const ( // Config holds all configuration for Reloader. type Config struct { - Annotations AnnotationConfig `json:"annotations"` - AutoReloadAll bool `json:"autoReloadAll"` - ReloadStrategy ReloadStrategy `json:"reloadStrategy"` - ArgoRolloutsEnabled bool `json:"argoRolloutsEnabled"` - ArgoRolloutStrategy ArgoRolloutStrategy `json:"argoRolloutStrategy"` - ReloadOnCreate bool `json:"reloadOnCreate"` - ReloadOnDelete bool `json:"reloadOnDelete"` - SyncAfterRestart bool `json:"syncAfterRestart"` - EnableHA bool `json:"enableHA"` - WebhookURL string `json:"webhookUrl,omitempty"` + Annotations AnnotationConfig `json:"annotations"` + AutoReloadAll bool `json:"autoReloadAll"` + ReloadStrategy ReloadStrategy `json:"reloadStrategy"` + ArgoRolloutsEnabled bool `json:"argoRolloutsEnabled"` + ArgoRolloutStrategy ArgoRolloutStrategy `json:"argoRolloutStrategy"` + DeploymentConfigEnabled bool `json:"deploymentConfigEnabled"` + ReloadOnCreate bool `json:"reloadOnCreate"` + ReloadOnDelete bool `json:"reloadOnDelete"` + SyncAfterRestart bool `json:"syncAfterRestart"` + EnableHA bool `json:"enableHA"` + WebhookURL string `json:"webhookUrl,omitempty"` IgnoredResources []string `json:"ignoredResources,omitempty"` IgnoredWorkloads []string `json:"ignoredWorkloads,omitempty"` @@ -101,28 +102,29 @@ type LeaderElectionConfig struct { // NewDefault creates a Config with default values. func NewDefault() *Config { return &Config{ - Annotations: DefaultAnnotations(), - AutoReloadAll: false, - ReloadStrategy: ReloadStrategyEnvVars, - ArgoRolloutsEnabled: false, - ArgoRolloutStrategy: ArgoRolloutStrategyRollout, - ReloadOnCreate: false, - ReloadOnDelete: false, - SyncAfterRestart: false, - EnableHA: false, - WebhookURL: "", - IgnoredResources: []string{}, - IgnoredWorkloads: []string{}, - IgnoredNamespaces: []string{}, - NamespaceSelectors: []labels.Selector{}, - ResourceSelectors: []labels.Selector{}, - LogFormat: "", - LogLevel: "info", - MetricsAddr: ":9090", - HealthAddr: ":8081", - EnablePProf: false, - PProfAddr: ":6060", - Alerting: AlertingConfig{}, + Annotations: DefaultAnnotations(), + AutoReloadAll: false, + ReloadStrategy: ReloadStrategyEnvVars, + ArgoRolloutsEnabled: false, + ArgoRolloutStrategy: ArgoRolloutStrategyRollout, + DeploymentConfigEnabled: false, + ReloadOnCreate: false, + ReloadOnDelete: false, + SyncAfterRestart: false, + EnableHA: false, + WebhookURL: "", + IgnoredResources: []string{}, + IgnoredWorkloads: []string{}, + IgnoredNamespaces: []string{}, + NamespaceSelectors: []labels.Selector{}, + ResourceSelectors: []labels.Selector{}, + LogFormat: "", + LogLevel: "info", + MetricsAddr: ":9090", + HealthAddr: ":8081", + EnablePProf: false, + PProfAddr: ":6060", + Alerting: AlertingConfig{}, LeaderElection: LeaderElectionConfig{ LockName: "reloader-leader-election", LeaseDuration: 15 * time.Second, diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index 5de13cf37..9c2556ddb 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -39,6 +39,12 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { "Enable Argo Rollouts support (true/false)", ) + // OpenShift DeploymentConfig + fs.String( + "is-openshift", "", + "Enable OpenShift DeploymentConfig support (true/false/auto). Empty or 'auto' enables auto-detection", + ) + // Event watching fs.String( "reload-on-create", "false", @@ -239,6 +245,14 @@ func ApplyFlags(cfg *Config) error { cfg.ReloadOnCreate = parseBoolString(v.GetString("reload-on-create")) cfg.ReloadOnDelete = parseBoolString(v.GetString("reload-on-delete")) + switch strings.ToLower(strings.TrimSpace(v.GetString("is-openshift"))) { + case "true": + cfg.DeploymentConfigEnabled = true + case "false": + cfg.DeploymentConfigEnabled = false + default: + } + // String flags cfg.ReloadStrategy = ReloadStrategy(v.GetString("reload-strategy")) cfg.WebhookURL = v.GetString("webhook-url") @@ -321,6 +335,13 @@ func parseBoolString(s string) bool { return s == "true" || s == "1" || s == "yes" } +// ShouldAutoDetectOpenShift returns true if OpenShift DeploymentConfig support +// should be auto-detected (i.e., the --is-openshift flag was not explicitly set). +func ShouldAutoDetectOpenShift() bool { + val := strings.ToLower(strings.TrimSpace(v.GetString("is-openshift"))) + return val == "" || val == "auto" +} + // splitAndTrim splits a comma-separated string and trims whitespace. func splitAndTrim(s string) []string { if s == "" { diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index bc83ca705..07ffcfbf3 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -6,6 +6,7 @@ import ( argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/go-logr/logr" + openshiftv1 "github.com/openshift/api/apps/v1" "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" @@ -27,6 +28,7 @@ var runtimeScheme = runtime.NewScheme() func init() { utilruntime.Must(clientgoscheme.AddToScheme(runtimeScheme)) utilruntime.Must(argorolloutsv1alpha1.AddToScheme(runtimeScheme)) + utilruntime.Must(openshiftv1.AddToScheme(runtimeScheme)) } // ManagerOptions contains options for creating a new Manager. @@ -115,7 +117,10 @@ func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctr // SetupReconcilers sets up all reconcilers with the manager. func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { - registry := workload.NewRegistry(cfg.ArgoRolloutsEnabled) + registry := workload.NewRegistry(workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + }) reloadService := reload.NewService(cfg) eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) pauseHandler := reload.NewPauseHandler(cfg) diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index 778abac7b..916696ab6 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -36,7 +36,10 @@ func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime Log: testr.New(t), Config: cfg, ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Registry: workload.NewRegistry(workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + }), Collectors: &collectors, EventRecorder: events.NewRecorder(nil), WebhookClient: webhook.NewClient("", testr.New(t)), @@ -59,7 +62,10 @@ func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Ob Log: testr.New(t), Config: cfg, ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(cfg.ArgoRolloutsEnabled), + Registry: workload.NewRegistry(workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + }), Collectors: &collectors, EventRecorder: events.NewRecorder(nil), WebhookClient: webhook.NewClient("", testr.New(t)), diff --git a/internal/pkg/openshift/detect.go b/internal/pkg/openshift/detect.go new file mode 100644 index 000000000..403c0d27f --- /dev/null +++ b/internal/pkg/openshift/detect.go @@ -0,0 +1,34 @@ +package openshift + +import ( + "github.com/go-logr/logr" + "k8s.io/client-go/discovery" +) + +const ( + // DeploymentConfigAPIGroup is the API group for DeploymentConfig. + DeploymentConfigAPIGroup = "apps.openshift.io" + // DeploymentConfigAPIVersion is the API version for DeploymentConfig. + DeploymentConfigAPIVersion = "v1" + // DeploymentConfigResource is the resource name for DeploymentConfig. + DeploymentConfigResource = "deploymentconfigs" +) + +// HasDeploymentConfigSupport checks if the cluster supports DeploymentConfig +func HasDeploymentConfigSupport(client discovery.DiscoveryInterface, log logr.Logger) bool { + resources, err := client.ServerResourcesForGroupVersion(DeploymentConfigAPIGroup + "/" + DeploymentConfigAPIVersion) + if err != nil { + log.V(1).Info("DeploymentConfig API not available", "error", err) + return false + } + + for _, r := range resources.APIResources { + if r.Name == DeploymentConfigResource { + log.Info("DeploymentConfig API detected, enabling support") + return true + } + } + + log.V(1).Info("DeploymentConfig resource not found in apps.openshift.io/v1") + return false +} diff --git a/internal/pkg/testutil/fixtures.go b/internal/pkg/testutil/fixtures.go index 1deb85a19..6ba8587a7 100644 --- a/internal/pkg/testutil/fixtures.go +++ b/internal/pkg/testutil/fixtures.go @@ -1,6 +1,7 @@ package testutil import ( + openshiftv1 "github.com/openshift/api/apps/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -8,12 +9,69 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// NewDeploymentConfig creates a minimal DeploymentConfig for unit testing. +func NewDeploymentConfig(name, namespace string, annotations map[string]string) *openshiftv1.DeploymentConfig { + replicas := int32(1) + return &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: openshiftv1.DeploymentConfigSpec{ + Replicas: replicas, + Selector: map[string]string{"app": name}, + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, + }, + }, + }, + } +} + +// NewDeploymentConfigWithEnvFrom creates a DeploymentConfig with EnvFrom referencing a ConfigMap or Secret. +func NewDeploymentConfigWithEnvFrom(name, namespace string, configMapName, secretName string) *openshiftv1.DeploymentConfig { + dc := NewDeploymentConfig(name, namespace, nil) + if configMapName != "" { + dc.Spec.Template.Spec.Containers[0].EnvFrom = append( + dc.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, + }, + ) + } + if secretName != "" { + dc.Spec.Template.Spec.Containers[0].EnvFrom = append( + dc.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }, + ) + } + return dc +} + // NewScheme creates a scheme with common types for testing. func NewScheme() *runtime.Scheme { scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) _ = batchv1.AddToScheme(scheme) + _ = openshiftv1.AddToScheme(scheme) return scheme } @@ -35,10 +93,12 @@ func NewDeployment(name, namespace string, annotations map[string]string) *appsv Annotations: map[string]string{}, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, }, }, }, @@ -74,30 +134,36 @@ func NewDeploymentWithEnvFrom(name, namespace string, configMapName, secretName // NewDeploymentWithVolume creates a Deployment with a volume from ConfigMap or Secret. func NewDeploymentWithVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { d := NewDeployment(name, namespace, nil) - d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ - Name: "config", - MountPath: "/etc/config", - }} + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } if configMapName != "" { - d.Spec.Template.Spec.Volumes = []corev1.Volume{{ - Name: "config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, }, }, - }} + } } if secretName != "" { - d.Spec.Template.Spec.Volumes = []corev1.Volume{{ - Name: "config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: secretName, + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, }, }, - }} + } } return d } @@ -105,33 +171,41 @@ func NewDeploymentWithVolume(name, namespace string, configMapName, secretName s // NewDeploymentWithProjectedVolume creates a Deployment with a projected volume. func NewDeploymentWithProjectedVolume(name, namespace string, configMapName, secretName string) *appsv1.Deployment { d := NewDeployment(name, namespace, nil) - d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ - Name: "config", - MountPath: "/etc/config", - }} + d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/config", + }, + } sources := []corev1.VolumeProjection{} if configMapName != "" { - sources = append(sources, corev1.VolumeProjection{ - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + sources = append( + sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + }, }, - }) + ) } if secretName != "" { - sources = append(sources, corev1.VolumeProjection{ - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + sources = append( + sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, }, - }) + ) } - d.Spec.Template.Spec.Volumes = []corev1.Volume{{ - Name: "config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + d.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + }, }, - }} + } return d } @@ -153,10 +227,12 @@ func NewDaemonSet(name, namespace string, annotations map[string]string) *appsv1 Annotations: map[string]string{}, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, }, }, }, @@ -181,10 +257,12 @@ func NewStatefulSet(name, namespace string, annotations map[string]string) *apps Annotations: map[string]string{}, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "main", - Image: "nginx", - }}, + Containers: []corev1.Container{ + { + Name: "main", + Image: "nginx", + }, + }, }, }, }, @@ -200,18 +278,30 @@ func NewJob(name, namespace string) *batchv1.Job { }, Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{{ - Name: "main", - Image: "busybox", - }}, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, }, }, }, } } +// NewJobWithAnnotations creates a Job with annotations. +func NewJobWithAnnotations(name, namespace string, annotations map[string]string) *batchv1.Job { + job := NewJob(name, namespace) + job.Annotations = annotations + return job +} + // NewCronJob creates a minimal CronJob for unit testing. func NewCronJob(name, namespace string) *batchv1.CronJob { return &batchv1.CronJob{ @@ -225,12 +315,17 @@ func NewCronJob(name, namespace string) *batchv1.CronJob { JobTemplate: batchv1.JobTemplateSpec{ Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{{ - Name: "main", - Image: "busybox", - }}, + Containers: []corev1.Container{ + { + Name: "main", + Image: "busybox", + }, + }, }, }, }, @@ -239,6 +334,13 @@ func NewCronJob(name, namespace string) *batchv1.CronJob { } } +// NewCronJobWithAnnotations creates a CronJob with annotations. +func NewCronJobWithAnnotations(name, namespace string, annotations map[string]string) *batchv1.CronJob { + cj := NewCronJob(name, namespace) + cj.Annotations = annotations + return cj +} + // NewConfigMap creates a ConfigMap for unit testing. func NewConfigMap(name, namespace string) *corev1.ConfigMap { return &corev1.ConfigMap{ diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go index 96347e4cb..630155348 100644 --- a/internal/pkg/testutil/testutil.go +++ b/internal/pkg/testutil/testutil.go @@ -7,13 +7,15 @@ import ( "fmt" "time" - "github.com/stakater/Reloader/internal/pkg/config" + openshiftv1 "github.com/openshift/api/apps/v1" + openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) const ( @@ -133,7 +135,9 @@ func DeleteSecret(client kubernetes.Interface, namespace, name string) error { } // CreateDeployment creates a Deployment that references a ConfigMap/Secret. -func CreateDeployment(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.Deployment, error) { +func CreateDeployment(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.Deployment, error, +) { var deployment *appsv1.Deployment if useConfigMap { deployment = NewDeploymentWithEnvFrom(name, namespace, name, "") @@ -154,7 +158,9 @@ func DeleteDeployment(client kubernetes.Interface, namespace, name string) error } // CreateDaemonSet creates a DaemonSet that references a ConfigMap/Secret. -func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.DaemonSet, error) { +func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.DaemonSet, error, +) { daemonset := NewDaemonSet(name, namespace, annotations) // Override image for integration tests daemonset.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" @@ -187,7 +193,9 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace, name string) error } // CreateStatefulSet creates a StatefulSet that references a ConfigMap/Secret. -func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) (*appsv1.StatefulSet, error) { +func CreateStatefulSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *appsv1.StatefulSet, error, +) { statefulset := NewStatefulSet(name, namespace, annotations) statefulset.Spec.ServiceName = name // Override image for integration tests @@ -266,88 +274,157 @@ func ConvertResourceToSHA(resourceType, namespace, name, data string) string { func WaitForDeploymentAnnotation(client kubernetes.Interface, namespace, name, annotation, expectedValue string, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { - deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return false, nil // Keep waiting - } - value, ok := deployment.Spec.Template.Annotations[annotation] - if !ok { - return false, nil // Keep waiting - } - return value == expectedValue, nil - }) + return wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + value, ok := deployment.Spec.Template.Annotations[annotation] + if !ok { + return false, nil // Keep waiting + } + return value == expectedValue, nil + }, + ) } -// WaitForDeploymentReloadedAnnotation waits for a deployment to have any reloaded annotation. -func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { +// WaitForDeploymentReloadedAnnotation waits for a deployment to have the specified reloaded annotation. +func WaitForDeploymentReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { var found bool ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { - deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return false, nil // Keep waiting - } - // Check for the last-reloaded-from annotation in pod template - if deployment.Spec.Template.Annotations != nil { - if _, ok := deployment.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { - found = true - return true, nil + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting } - } - return false, nil - }) + // Check for the last-reloaded-from annotation in pod template + if deployment.Spec.Template.Annotations != nil { + if _, ok := deployment.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) if wait.Interrupted(err) { return found, nil } return found, err } -// WaitForDaemonSetReloadedAnnotation waits for a daemonset to have any reloaded annotation. -func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { +// WaitForDaemonSetReloadedAnnotation waits for a daemonset to have the specified reloaded annotation. +func WaitForDaemonSetReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { var found bool ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { - daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return false, nil // Keep waiting - } - // Check for the last-reloaded-from annotation in pod template - if daemonset.Spec.Template.Annotations != nil { - if _, ok := daemonset.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { - found = true - return true, nil + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting } - } - return false, nil - }) + // Check for the last-reloaded-from annotation in pod template + if daemonset.Spec.Template.Annotations != nil { + if _, ok := daemonset.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) if wait.Interrupted(err) { return found, nil } return found, err } -// WaitForStatefulSetReloadedAnnotation waits for a statefulset to have any reloaded annotation. -func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace, name string, cfg *config.Config, timeout time.Duration) (bool, error) { +// WaitForStatefulSetReloadedAnnotation waits for a statefulset to have the specified reloaded annotation. +func WaitForStatefulSetReloadedAnnotation(client kubernetes.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { var found bool ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { - statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return false, nil // Keep waiting - } - // Check for the last-reloaded-from annotation in pod template - if statefulset.Spec.Template.Annotations != nil { - if _, ok := statefulset.Spec.Template.Annotations[cfg.Annotations.LastReloadedFrom]; ok { - found = true - return true, nil + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting } - } - return false, nil - }) + // Check for the last-reloaded-from annotation in pod template + if statefulset.Spec.Template.Annotations != nil { + if _, ok := statefulset.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// NewOpenshiftClient creates an OpenShift client from the given rest config. +func NewOpenshiftClient(restCfg *rest.Config) (openshiftclient.Interface, error) { + return openshiftclient.NewForConfig(restCfg) +} + +// CreateDeploymentConfig creates a DeploymentConfig that references a ConfigMap/Secret. +func CreateDeploymentConfig(client openshiftclient.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( + *openshiftv1.DeploymentConfig, error, +) { + var dc *openshiftv1.DeploymentConfig + if useConfigMap { + dc = NewDeploymentConfigWithEnvFrom(name, namespace, name, "") + } else { + dc = NewDeploymentConfigWithEnvFrom(name, namespace, "", name) + } + dc.Annotations = annotations + dc.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + dc.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + return client.AppsV1().DeploymentConfigs(namespace).Create(context.Background(), dc, metav1.CreateOptions{}) +} + +// DeleteDeploymentConfig deletes the DeploymentConfig with the given name. +func DeleteDeploymentConfig(client openshiftclient.Interface, namespace, name string) error { + return client.AppsV1().DeploymentConfigs(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +// WaitForDeploymentConfigReloadedAnnotation waits for a DeploymentConfig to have the specified reloaded annotation. +func WaitForDeploymentConfigReloadedAnnotation(client openshiftclient.Interface, namespace, name, annotationName string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + dc, err := client.AppsV1().DeploymentConfigs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if dc.Spec.Template != nil && dc.Spec.Template.Annotations != nil { + if _, ok := dc.Spec.Template.Annotations[annotationName]; ok { + found = true + return true, nil + } + } + return false, nil + }, + ) if wait.Interrupted(err) { return found, nil } diff --git a/internal/pkg/workload/deploymentconfig.go b/internal/pkg/workload/deploymentconfig.go new file mode 100644 index 000000000..14c60469a --- /dev/null +++ b/internal/pkg/workload/deploymentconfig.go @@ -0,0 +1,143 @@ +package workload + +import ( + "context" + + openshiftv1 "github.com/openshift/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DeploymentConfigWorkload wraps an OpenShift DeploymentConfig. +type DeploymentConfigWorkload struct { + dc *openshiftv1.DeploymentConfig +} + +// NewDeploymentConfigWorkload creates a new DeploymentConfigWorkload. +func NewDeploymentConfigWorkload(dc *openshiftv1.DeploymentConfig) *DeploymentConfigWorkload { + return &DeploymentConfigWorkload{dc: dc} +} + +// Ensure DeploymentConfigWorkload implements WorkloadAccessor. +var _ WorkloadAccessor = (*DeploymentConfigWorkload)(nil) + +func (w *DeploymentConfigWorkload) Kind() Kind { + return KindDeploymentConfig +} + +func (w *DeploymentConfigWorkload) GetObject() client.Object { + return w.dc +} + +func (w *DeploymentConfigWorkload) GetName() string { + return w.dc.Name +} + +func (w *DeploymentConfigWorkload) GetNamespace() string { + return w.dc.Namespace +} + +func (w *DeploymentConfigWorkload) GetAnnotations() map[string]string { + return w.dc.Annotations +} + +func (w *DeploymentConfigWorkload) GetPodTemplateAnnotations() map[string]string { + if w.dc.Spec.Template == nil { + return nil + } + if w.dc.Spec.Template.Annotations == nil { + w.dc.Spec.Template.Annotations = make(map[string]string) + } + return w.dc.Spec.Template.Annotations +} + +func (w *DeploymentConfigWorkload) SetPodTemplateAnnotation(key, value string) { + if w.dc.Spec.Template == nil { + w.dc.Spec.Template = &corev1.PodTemplateSpec{} + } + if w.dc.Spec.Template.Annotations == nil { + w.dc.Spec.Template.Annotations = make(map[string]string) + } + w.dc.Spec.Template.Annotations[key] = value +} + +func (w *DeploymentConfigWorkload) GetContainers() []corev1.Container { + if w.dc.Spec.Template == nil { + return nil + } + return w.dc.Spec.Template.Spec.Containers +} + +func (w *DeploymentConfigWorkload) SetContainers(containers []corev1.Container) { + if w.dc.Spec.Template == nil { + w.dc.Spec.Template = &corev1.PodTemplateSpec{} + } + w.dc.Spec.Template.Spec.Containers = containers +} + +func (w *DeploymentConfigWorkload) GetInitContainers() []corev1.Container { + if w.dc.Spec.Template == nil { + return nil + } + return w.dc.Spec.Template.Spec.InitContainers +} + +func (w *DeploymentConfigWorkload) SetInitContainers(containers []corev1.Container) { + if w.dc.Spec.Template == nil { + w.dc.Spec.Template = &corev1.PodTemplateSpec{} + } + w.dc.Spec.Template.Spec.InitContainers = containers +} + +func (w *DeploymentConfigWorkload) GetVolumes() []corev1.Volume { + if w.dc.Spec.Template == nil { + return nil + } + return w.dc.Spec.Template.Spec.Volumes +} + +func (w *DeploymentConfigWorkload) Update(ctx context.Context, c client.Client) error { + return c.Update(ctx, w.dc) +} + +func (w *DeploymentConfigWorkload) DeepCopy() Workload { + return &DeploymentConfigWorkload{dc: w.dc.DeepCopy()} +} + +func (w *DeploymentConfigWorkload) GetEnvFromSources() []corev1.EnvFromSource { + if w.dc.Spec.Template == nil { + return nil + } + var sources []corev1.EnvFromSource + for _, container := range w.dc.Spec.Template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range w.dc.Spec.Template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (w *DeploymentConfigWorkload) UsesConfigMap(name string) bool { + if w.dc.Spec.Template == nil { + return false + } + return SpecUsesConfigMap(&w.dc.Spec.Template.Spec, name) +} + +func (w *DeploymentConfigWorkload) UsesSecret(name string) bool { + if w.dc.Spec.Template == nil { + return false + } + return SpecUsesSecret(&w.dc.Spec.Template.Spec, name) +} + +func (w *DeploymentConfigWorkload) GetOwnerReferences() []metav1.OwnerReference { + return w.dc.OwnerReferences +} + +// GetDeploymentConfig returns the underlying DeploymentConfig for special handling. +func (w *DeploymentConfigWorkload) GetDeploymentConfig() *openshiftv1.DeploymentConfig { + return w.dc +} diff --git a/internal/pkg/workload/interface.go b/internal/pkg/workload/interface.go index 6f805af40..e1d50a18a 100644 --- a/internal/pkg/workload/interface.go +++ b/internal/pkg/workload/interface.go @@ -18,12 +18,13 @@ import ( type Kind string const ( - KindDeployment Kind = "Deployment" - KindDaemonSet Kind = "DaemonSet" - KindStatefulSet Kind = "StatefulSet" - KindArgoRollout Kind = "Rollout" - KindJob Kind = "Job" - KindCronJob Kind = "CronJob" + KindDeployment Kind = "Deployment" + KindDaemonSet Kind = "DaemonSet" + KindStatefulSet Kind = "StatefulSet" + KindArgoRollout Kind = "Rollout" + KindJob Kind = "Job" + KindCronJob Kind = "CronJob" + KindDeploymentConfig Kind = "DeploymentConfig" ) // Workload provides a uniform interface for managing Kubernetes workloads. diff --git a/internal/pkg/workload/lister.go b/internal/pkg/workload/lister.go index a30bdfcc7..07cde6155 100644 --- a/internal/pkg/workload/lister.go +++ b/internal/pkg/workload/lister.go @@ -4,6 +4,7 @@ import ( "context" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftv1 "github.com/openshift/api/apps/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -128,3 +129,15 @@ func listRollouts(ctx context.Context, c client.Client, namespace string) ([]Wor } return result, nil } + +func listDeploymentConfigs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { + var list openshiftv1.DeploymentConfigList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]WorkloadAccessor, len(list.Items)) + for i := range list.Items { + result[i] = NewDeploymentConfigWorkload(&list.Items[i]) + } + return result, nil +} diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 696b11c55..3516338d0 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -6,6 +6,7 @@ import ( "strings" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftv1 "github.com/openshift/api/apps/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -14,16 +15,24 @@ import ( // WorkloadLister is a function that lists workloads of a specific kind. type WorkloadLister func(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) +// RegistryOptions configures the workload registry. +type RegistryOptions struct { + ArgoRolloutsEnabled bool + DeploymentConfigEnabled bool +} + // Registry provides factory methods for creating Workload instances. type Registry struct { - argoRolloutsEnabled bool - listers map[Kind]WorkloadLister + argoRolloutsEnabled bool + deploymentConfigEnabled bool + listers map[Kind]WorkloadLister } // NewRegistry creates a new workload registry. -func NewRegistry(argoRolloutsEnabled bool) *Registry { +func NewRegistry(opts RegistryOptions) *Registry { r := &Registry{ - argoRolloutsEnabled: argoRolloutsEnabled, + argoRolloutsEnabled: opts.ArgoRolloutsEnabled, + deploymentConfigEnabled: opts.DeploymentConfigEnabled, listers: map[Kind]WorkloadLister{ KindDeployment: listDeployments, KindDaemonSet: listDaemonSets, @@ -32,9 +41,12 @@ func NewRegistry(argoRolloutsEnabled bool) *Registry { KindCronJob: listCronJobs, }, } - if argoRolloutsEnabled { + if opts.ArgoRolloutsEnabled { r.listers[KindArgoRollout] = listRollouts } + if opts.DeploymentConfigEnabled { + r.listers[KindDeploymentConfig] = listDeploymentConfigs + } return r } @@ -55,6 +67,9 @@ func (r *Registry) SupportedKinds() []Kind { if r.argoRolloutsEnabled { kinds = append(kinds, KindArgoRollout) } + if r.deploymentConfigEnabled { + kinds = append(kinds, KindDeploymentConfig) + } return kinds } @@ -76,6 +91,11 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { return nil, fmt.Errorf("argo Rollouts support is not enabled") } return NewRolloutWorkload(o), nil + case *openshiftv1.DeploymentConfig: + if !r.deploymentConfigEnabled { + return nil, fmt.Errorf("openShift DeploymentConfig support is not enabled") + } + return NewDeploymentConfigWorkload(o), nil default: return nil, fmt.Errorf("unsupported object type: %T", obj) } @@ -84,18 +104,20 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { // kindAliases maps string representations to Kind constants. // Supports lowercase, title case, and plural forms for user convenience. var kindAliases = map[string]Kind{ - "deployment": KindDeployment, - "deployments": KindDeployment, - "daemonset": KindDaemonSet, - "daemonsets": KindDaemonSet, - "statefulset": KindStatefulSet, - "statefulsets": KindStatefulSet, - "rollout": KindArgoRollout, - "rollouts": KindArgoRollout, - "job": KindJob, - "jobs": KindJob, - "cronjob": KindCronJob, - "cronjobs": KindCronJob, + "deployment": KindDeployment, + "deployments": KindDeployment, + "daemonset": KindDaemonSet, + "daemonsets": KindDaemonSet, + "statefulset": KindStatefulSet, + "statefulsets": KindStatefulSet, + "rollout": KindArgoRollout, + "rollouts": KindArgoRollout, + "job": KindJob, + "jobs": KindJob, + "cronjob": KindCronJob, + "cronjobs": KindCronJob, + "deploymentconfig": KindDeploymentConfig, + "deploymentconfigs": KindDeploymentConfig, } // KindFromString converts a string to a Kind. diff --git a/internal/pkg/workload/registry_test.go b/internal/pkg/workload/registry_test.go index e681438d1..b84830fae 100644 --- a/internal/pkg/workload/registry_test.go +++ b/internal/pkg/workload/registry_test.go @@ -4,6 +4,7 @@ import ( "testing" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftv1 "github.com/openshift/api/apps/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -11,7 +12,7 @@ import ( ) func TestNewRegistry_WithoutArgoRollouts(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: false}) kinds := r.SupportedKinds() if len(kinds) != 5 { @@ -30,7 +31,7 @@ func TestNewRegistry_WithoutArgoRollouts(t *testing.T) { } func TestNewRegistry_WithArgoRollouts(t *testing.T) { - r := NewRegistry(true) + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) kinds := r.SupportedKinds() if len(kinds) != 6 { @@ -54,7 +55,7 @@ func TestNewRegistry_WithArgoRollouts(t *testing.T) { } func TestRegistry_ListerFor_AllKinds(t *testing.T) { - r := NewRegistry(true) + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) tests := []struct { kind Kind @@ -78,7 +79,7 @@ func TestRegistry_ListerFor_AllKinds(t *testing.T) { } func TestRegistry_FromObject_Deployment(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) deploy := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -93,7 +94,7 @@ func TestRegistry_FromObject_Deployment(t *testing.T) { } func TestRegistry_FromObject_DaemonSet(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) ds := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -108,7 +109,7 @@ func TestRegistry_FromObject_DaemonSet(t *testing.T) { } func TestRegistry_FromObject_StatefulSet(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -123,7 +124,7 @@ func TestRegistry_FromObject_StatefulSet(t *testing.T) { } func TestRegistry_FromObject_Job(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -138,7 +139,7 @@ func TestRegistry_FromObject_Job(t *testing.T) { } func TestRegistry_FromObject_CronJob(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) cj := &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -153,7 +154,7 @@ func TestRegistry_FromObject_CronJob(t *testing.T) { } func TestRegistry_FromObject_Rollout_Enabled(t *testing.T) { - r := NewRegistry(true) + r := NewRegistry(RegistryOptions{ArgoRolloutsEnabled: true}) rollout := &argorolloutv1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -168,7 +169,7 @@ func TestRegistry_FromObject_Rollout_Enabled(t *testing.T) { } func TestRegistry_FromObject_Rollout_Disabled(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) rollout := &argorolloutv1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -180,7 +181,7 @@ func TestRegistry_FromObject_Rollout_Disabled(t *testing.T) { } func TestRegistry_FromObject_UnsupportedType(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, } @@ -234,7 +235,7 @@ func TestKindFromString(t *testing.T) { } func TestNewLister(t *testing.T) { - r := NewRegistry(false) + r := NewRegistry(RegistryOptions{}) l := NewLister(nil, r, nil) if l == nil { @@ -244,3 +245,122 @@ func TestNewLister(t *testing.T) { t.Error("NewLister should set Registry") } } + +// DeploymentConfig registry tests +func TestNewRegistry_WithDeploymentConfig(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: true}) + + kinds := r.SupportedKinds() + if len(kinds) != 6 { + t.Errorf("SupportedKinds() = %d kinds, want 6", len(kinds)) + } + + found := false + for _, k := range kinds { + if k == KindDeploymentConfig { + found = true + break + } + } + if !found { + t.Error("SupportedKinds() should include DeploymentConfig when enabled") + } + + if r.ListerFor(KindDeploymentConfig) == nil { + t.Error("ListerFor(KindDeploymentConfig) should return a function when enabled") + } +} + +func TestNewRegistry_WithoutDeploymentConfig(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: false}) + + for _, k := range r.SupportedKinds() { + if k == KindDeploymentConfig { + t.Error("SupportedKinds() should not include DeploymentConfig when disabled") + } + } + + if r.ListerFor(KindDeploymentConfig) != nil { + t.Error("ListerFor(KindDeploymentConfig) should return nil when disabled") + } +} + +func TestNewRegistry_WithBothOptionalWorkloads(t *testing.T) { + r := NewRegistry(RegistryOptions{ + ArgoRolloutsEnabled: true, + DeploymentConfigEnabled: true, + }) + + kinds := r.SupportedKinds() + if len(kinds) != 7 { + t.Errorf("SupportedKinds() = %d kinds, want 7 (5 base + ArgoRollout + DeploymentConfig)", len(kinds)) + } + + foundRollout := false + foundDC := false + for _, k := range kinds { + if k == KindArgoRollout { + foundRollout = true + } + if k == KindDeploymentConfig { + foundDC = true + } + } + if !foundRollout { + t.Error("SupportedKinds() should include ArgoRollout") + } + if !foundDC { + t.Error("SupportedKinds() should include DeploymentConfig") + } +} + +func TestRegistry_FromObject_DeploymentConfig_Enabled(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: true}) + dc := &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + w, err := r.FromObject(dc) + if err != nil { + t.Fatalf("FromObject(DeploymentConfig) error = %v", err) + } + if w.Kind() != KindDeploymentConfig { + t.Errorf("FromObject(DeploymentConfig).Kind() = %v, want %v", w.Kind(), KindDeploymentConfig) + } +} + +func TestRegistry_FromObject_DeploymentConfig_Disabled(t *testing.T) { + r := NewRegistry(RegistryOptions{DeploymentConfigEnabled: false}) + dc := &openshiftv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + _, err := r.FromObject(dc) + if err == nil { + t.Error("FromObject(DeploymentConfig) should return error when DeploymentConfig disabled") + } +} + +func TestKindFromString_DeploymentConfig(t *testing.T) { + tests := []struct { + input string + want Kind + wantErr bool + }{ + {"deploymentconfig", KindDeploymentConfig, false}, + {"deploymentconfigs", KindDeploymentConfig, false}, + {"DeploymentConfig", KindDeploymentConfig, false}, + {"DEPLOYMENTCONFIG", KindDeploymentConfig, false}, + } + + for _, tt := range tests { + got, err := KindFromString(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("KindFromString(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + continue + } + if got != tt.want { + t.Errorf("KindFromString(%q) = %v, want %v", tt.input, got, tt.want) + } + } +} diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go index 674f7dbc3..91139d85a 100644 --- a/internal/pkg/workload/workload_test.go +++ b/internal/pkg/workload/workload_test.go @@ -4,22 +4,43 @@ import ( "testing" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/testutil" ) -func TestDeploymentWorkload_BasicGetters(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deploy", - Namespace: "test-ns", - Annotations: map[string]string{ - "key": "value", +// addEnvVar adds an environment variable with a ConfigMapKeyRef or SecretKeyRef to a container. +func addEnvVarConfigMapRef(containers []corev1.Container, envName, configMapName, key string) { + if len(containers) > 0 { + containers[0].Env = append(containers[0].Env, corev1.EnvVar{ + Name: envName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, + Key: key, + }, }, - }, + }) } +} + +func addEnvVarSecretRef(containers []corev1.Container, envName, secretName, key string) { + if len(containers) > 0 { + containers[0].Env = append(containers[0].Env, corev1.EnvVar{ + Name: envName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }) + } +} + +func TestDeploymentWorkload_BasicGetters(t *testing.T) { + deploy := testutil.NewDeployment("test-deploy", "test-ns", map[string]string{"key": "value"}) w := NewDeploymentWorkload(deploy) @@ -41,18 +62,8 @@ func TestDeploymentWorkload_BasicGetters(t *testing.T) { } func TestDeploymentWorkload_PodTemplateAnnotations(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "existing": "annotation", - }, - }, - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations["existing"] = "annotation" w := NewDeploymentWorkload(deploy) @@ -70,14 +81,8 @@ func TestDeploymentWorkload_PodTemplateAnnotations(t *testing.T) { } func TestDeploymentWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - // No annotations set - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations = nil w := NewDeploymentWorkload(deploy) @@ -95,21 +100,8 @@ func TestDeploymentWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { } func TestDeploymentWorkload_Containers(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - InitContainers: []corev1.Container{ - {Name: "init", Image: "busybox"}, - }, - }, - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} w := NewDeploymentWorkload(deploy) @@ -141,18 +133,10 @@ func TestDeploymentWorkload_Containers(t *testing.T) { } func TestDeploymentWorkload_Volumes(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - {Name: "config-vol"}, - {Name: "secret-vol"}, - }, - }, - }, - }, + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, } w := NewDeploymentWorkload(deploy) @@ -164,27 +148,7 @@ func TestDeploymentWorkload_Volumes(t *testing.T) { } func TestDeploymentWorkload_UsesConfigMap_Volume(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "config-vol", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "my-config", - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithVolume("test", "default", "my-config", "") w := NewDeploymentWorkload(deploy) @@ -197,33 +161,7 @@ func TestDeploymentWorkload_UsesConfigMap_Volume(t *testing.T) { } func TestDeploymentWorkload_UsesConfigMap_ProjectedVolume(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "projected-vol", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "projected-config", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithProjectedVolume("test", "default", "projected-config", "") w := NewDeploymentWorkload(deploy) @@ -233,29 +171,7 @@ func TestDeploymentWorkload_UsesConfigMap_ProjectedVolume(t *testing.T) { } func TestDeploymentWorkload_UsesConfigMap_EnvFrom(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "env-config", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithEnvFrom("test", "default", "env-config", "") w := NewDeploymentWorkload(deploy) @@ -265,33 +181,8 @@ func TestDeploymentWorkload_UsesConfigMap_EnvFrom(t *testing.T) { } func TestDeploymentWorkload_UsesConfigMap_EnvVar(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Env: []corev1.EnvVar{ - { - Name: "CONFIG_VALUE", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "var-config", - }, - Key: "some-key", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) + addEnvVarConfigMapRef(deploy.Spec.Template.Spec.Containers, "CONFIG_VALUE", "var-config", "some-key") w := NewDeploymentWorkload(deploy) @@ -301,24 +192,14 @@ func TestDeploymentWorkload_UsesConfigMap_EnvVar(t *testing.T) { } func TestDeploymentWorkload_UsesConfigMap_InitContainer(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{ - { - Name: "init", - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "init-config", - }, - }, - }, - }, - }, + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "init-config"}, }, }, }, @@ -333,25 +214,7 @@ func TestDeploymentWorkload_UsesConfigMap_InitContainer(t *testing.T) { } func TestDeploymentWorkload_UsesSecret_Volume(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-vol", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "my-secret", - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithVolume("test", "default", "", "my-secret") w := NewDeploymentWorkload(deploy) @@ -364,33 +227,7 @@ func TestDeploymentWorkload_UsesSecret_Volume(t *testing.T) { } func TestDeploymentWorkload_UsesSecret_ProjectedVolume(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "projected-vol", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "projected-secret", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithProjectedVolume("test", "default", "", "projected-secret") w := NewDeploymentWorkload(deploy) @@ -400,29 +237,7 @@ func TestDeploymentWorkload_UsesSecret_ProjectedVolume(t *testing.T) { } func TestDeploymentWorkload_UsesSecret_EnvFrom(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "env-secret", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeploymentWithEnvFrom("test", "default", "", "env-secret") w := NewDeploymentWorkload(deploy) @@ -432,33 +247,8 @@ func TestDeploymentWorkload_UsesSecret_EnvFrom(t *testing.T) { } func TestDeploymentWorkload_UsesSecret_EnvVar(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Env: []corev1.EnvVar{ - { - Name: "SECRET_VALUE", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "var-secret", - }, - Key: "some-key", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) + addEnvVarSecretRef(deploy.Spec.Template.Spec.Containers, "SECRET_VALUE", "var-secret", "some-key") w := NewDeploymentWorkload(deploy) @@ -468,24 +258,14 @@ func TestDeploymentWorkload_UsesSecret_EnvVar(t *testing.T) { } func TestDeploymentWorkload_UsesSecret_InitContainer(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{ - { - Name: "init", - EnvFrom: []corev1.EnvFromSource{ - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "init-secret", - }, - }, - }, - }, - }, + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "init-secret"}, }, }, }, @@ -500,35 +280,21 @@ func TestDeploymentWorkload_UsesSecret_InitContainer(t *testing.T) { } func TestDeploymentWorkload_GetEnvFromSources(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, - }, - }, - { - Name: "sidecar", - EnvFrom: []corev1.EnvFromSource{ - {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, - }, - }, - }, - InitContainers: []corev1.Container{ - { - Name: "init", - EnvFrom: []corev1.EnvFromSource{ - {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "init-cm"}}}, - }, - }, - }, - }, - }, + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}}, + }, + { + Name: "sidecar", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, + }, + } + deploy.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "init-cm"}}}}, }, } @@ -541,21 +307,7 @@ func TestDeploymentWorkload_GetEnvFromSources(t *testing.T) { } func TestDeploymentWorkload_DeepCopy(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - }, - }, - }, - } + deploy := testutil.NewDeployment("test", "default", nil) w := NewDeploymentWorkload(deploy) copy := w.DeepCopy() @@ -571,17 +323,9 @@ func TestDeploymentWorkload_DeepCopy(t *testing.T) { } func TestDeploymentWorkload_GetOwnerReferences(t *testing.T) { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "ReplicaSet", - Name: "test-rs", - }, - }, - }, + deploy := testutil.NewDeployment("test", "default", nil) + deploy.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "ReplicaSet", Name: "test-rs"}, } w := NewDeploymentWorkload(deploy) @@ -594,15 +338,7 @@ func TestDeploymentWorkload_GetOwnerReferences(t *testing.T) { // DaemonSet tests func TestDaemonSetWorkload_BasicGetters(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ds", - Namespace: "test-ns", - Annotations: map[string]string{ - "key": "value", - }, - }, - } + ds := testutil.NewDaemonSet("test-ds", "test-ns", map[string]string{"key": "value"}) w := NewDaemonSetWorkload(ds) @@ -624,18 +360,8 @@ func TestDaemonSetWorkload_BasicGetters(t *testing.T) { } func TestDaemonSetWorkload_PodTemplateAnnotations(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "existing": "annotation", - }, - }, - }, - }, - } + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Annotations["existing"] = "annotation" w := NewDaemonSetWorkload(ds) @@ -651,12 +377,8 @@ func TestDaemonSetWorkload_PodTemplateAnnotations(t *testing.T) { } func TestDaemonSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{}, - }, - } + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Annotations = nil w := NewDaemonSetWorkload(ds) @@ -672,21 +394,8 @@ func TestDaemonSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { } func TestDaemonSetWorkload_Containers(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - InitContainers: []corev1.Container{ - {Name: "init", Image: "busybox"}, - }, - }, - }, - }, - } + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} w := NewDaemonSetWorkload(ds) @@ -714,18 +423,10 @@ func TestDaemonSetWorkload_Containers(t *testing.T) { } func TestDaemonSetWorkload_Volumes(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - {Name: "config-vol"}, - {Name: "secret-vol"}, - }, - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, } w := NewDaemonSetWorkload(ds) @@ -737,23 +438,13 @@ func TestDaemonSetWorkload_Volumes(t *testing.T) { } func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "config-vol", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "ds-config", - }, - }, - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "ds-config"}, }, }, }, @@ -770,28 +461,9 @@ func TestDaemonSetWorkload_UsesConfigMap(t *testing.T) { } func TestDaemonSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "ds-env-config", - }, - }, - }, - }, - }, - }, - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "ds-env-config"}}}, } w := NewDaemonSetWorkload(ds) @@ -802,22 +474,12 @@ func TestDaemonSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { } func TestDaemonSetWorkload_UsesSecret(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-vol", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "ds-secret", - }, - }, - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: "ds-secret"}, }, }, } @@ -833,29 +495,14 @@ func TestDaemonSetWorkload_UsesSecret(t *testing.T) { } func TestDaemonSetWorkload_GetEnvFromSources(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, - }, - }, - }, - InitContainers: []corev1.Container{ - { - Name: "init", - EnvFrom: []corev1.EnvFromSource{ - {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, - }, - }, - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + } + ds.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, }, } @@ -868,21 +515,7 @@ func TestDaemonSetWorkload_GetEnvFromSources(t *testing.T) { } func TestDaemonSetWorkload_DeepCopy(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: appsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - }, - }, - }, - } + ds := testutil.NewDaemonSet("test", "default", nil) w := NewDaemonSetWorkload(ds) copy := w.DeepCopy() @@ -896,17 +529,9 @@ func TestDaemonSetWorkload_DeepCopy(t *testing.T) { } func TestDaemonSetWorkload_GetOwnerReferences(t *testing.T) { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "DaemonSet", - Name: "test-owner", - }, - }, - }, + ds := testutil.NewDaemonSet("test", "default", nil) + ds.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "DaemonSet", Name: "test-owner"}, } w := NewDaemonSetWorkload(ds) @@ -919,15 +544,7 @@ func TestDaemonSetWorkload_GetOwnerReferences(t *testing.T) { // StatefulSet tests func TestStatefulSetWorkload_BasicGetters(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - Namespace: "test-ns", - Annotations: map[string]string{ - "key": "value", - }, - }, - } + sts := testutil.NewStatefulSet("test-sts", "test-ns", map[string]string{"key": "value"}) w := NewStatefulSetWorkload(sts) @@ -949,18 +566,8 @@ func TestStatefulSetWorkload_BasicGetters(t *testing.T) { } func TestStatefulSetWorkload_PodTemplateAnnotations(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "existing": "annotation", - }, - }, - }, - }, - } + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Annotations["existing"] = "annotation" w := NewStatefulSetWorkload(sts) @@ -976,12 +583,8 @@ func TestStatefulSetWorkload_PodTemplateAnnotations(t *testing.T) { } func TestStatefulSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{}, - }, - } + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Annotations = nil w := NewStatefulSetWorkload(sts) @@ -997,21 +600,8 @@ func TestStatefulSetWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { } func TestStatefulSetWorkload_Containers(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - InitContainers: []corev1.Container{ - {Name: "init", Image: "busybox"}, - }, - }, - }, - }, - } + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "init", Image: "busybox"}} w := NewStatefulSetWorkload(sts) @@ -1039,18 +629,10 @@ func TestStatefulSetWorkload_Containers(t *testing.T) { } func TestStatefulSetWorkload_Volumes(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - {Name: "config-vol"}, - {Name: "secret-vol"}, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, } w := NewStatefulSetWorkload(sts) @@ -1062,23 +644,13 @@ func TestStatefulSetWorkload_Volumes(t *testing.T) { } func TestStatefulSetWorkload_UsesConfigMap(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "config-vol", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "sts-config", - }, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "sts-config"}, }, }, }, @@ -1095,28 +667,9 @@ func TestStatefulSetWorkload_UsesConfigMap(t *testing.T) { } func TestStatefulSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "sts-env-config", - }, - }, - }, - }, - }, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "sts-env-config"}}}, } w := NewStatefulSetWorkload(sts) @@ -1127,22 +680,12 @@ func TestStatefulSetWorkload_UsesConfigMap_EnvFrom(t *testing.T) { } func TestStatefulSetWorkload_UsesSecret(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-vol", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "sts-secret", - }, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: "sts-secret"}, }, }, } @@ -1158,28 +701,9 @@ func TestStatefulSetWorkload_UsesSecret(t *testing.T) { } func TestStatefulSetWorkload_UsesSecret_EnvFrom(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "sts-env-secret", - }, - }, - }, - }, - }, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "sts-env-secret"}}}, } w := NewStatefulSetWorkload(sts) @@ -1190,29 +714,14 @@ func TestStatefulSetWorkload_UsesSecret_EnvFrom(t *testing.T) { } func TestStatefulSetWorkload_GetEnvFromSources(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, - }, - }, - }, - InitContainers: []corev1.Container{ - { - Name: "init", - EnvFrom: []corev1.EnvFromSource{ - {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, - }, - }, - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + } + sts.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}}, }, } @@ -1225,21 +734,7 @@ func TestStatefulSetWorkload_GetEnvFromSources(t *testing.T) { } func TestStatefulSetWorkload_DeepCopy(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: appsv1.StatefulSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Name: "main", Image: "nginx"}, - }, - }, - }, - }, - } + sts := testutil.NewStatefulSet("test", "default", nil) w := NewStatefulSetWorkload(sts) copy := w.DeepCopy() @@ -1253,17 +748,9 @@ func TestStatefulSetWorkload_DeepCopy(t *testing.T) { } func TestStatefulSetWorkload_GetOwnerReferences(t *testing.T) { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: "test-owner", - }, - }, - }, + sts := testutil.NewStatefulSet("test", "default", nil) + sts.OwnerReferences = []metav1.OwnerReference{ + {APIVersion: "apps/v1", Kind: "StatefulSet", Name: "test-owner"}, } w := NewStatefulSetWorkload(sts) @@ -1505,15 +992,7 @@ func TestToRolloutStrategy(t *testing.T) { // Job tests func TestJobWorkload_BasicGetters(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-job", - Namespace: "test-ns", - Annotations: map[string]string{ - "key": "value", - }, - }, - } + job := testutil.NewJobWithAnnotations("test-job", "test-ns", map[string]string{"key": "value"}) w := NewJobWorkload(job) @@ -1535,18 +1014,8 @@ func TestJobWorkload_BasicGetters(t *testing.T) { } func TestJobWorkload_PodTemplateAnnotations(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "existing": "annotation", - }, - }, - }, - }, - } + job := testutil.NewJob("test", "default") + job.Spec.Template.Annotations["existing"] = "annotation" w := NewJobWorkload(job) @@ -1562,23 +1031,13 @@ func TestJobWorkload_PodTemplateAnnotations(t *testing.T) { } func TestJobWorkload_UsesConfigMap(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "config-vol", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "job-config", - }, - }, - }, - }, - }, + job := testutil.NewJob("test", "default") + job.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "job-config"}, }, }, }, @@ -1595,26 +1054,11 @@ func TestJobWorkload_UsesConfigMap(t *testing.T) { } func TestJobWorkload_UsesSecret(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - EnvFrom: []corev1.EnvFromSource{ - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "job-secret", - }, - }, - }, - }, - }, - }, - }, + job := testutil.NewJob("test", "default") + job.Spec.Template.Spec.Containers[0].EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "job-secret"}, }, }, } @@ -1627,18 +1071,8 @@ func TestJobWorkload_UsesSecret(t *testing.T) { } func TestJobWorkload_DeepCopy(t *testing.T) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "original": "value", - }, - }, - }, - }, - } + job := testutil.NewJob("test", "default") + job.Spec.Template.Annotations["original"] = "value" w := NewJobWorkload(job) copy := w.DeepCopy() @@ -1653,15 +1087,7 @@ func TestJobWorkload_DeepCopy(t *testing.T) { // CronJob tests func TestCronJobWorkload_BasicGetters(t *testing.T) { - cj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cronjob", - Namespace: "test-ns", - Annotations: map[string]string{ - "key": "value", - }, - }, - } + cj := testutil.NewCronJobWithAnnotations("test-cronjob", "test-ns", map[string]string{"key": "value"}) w := NewCronJobWorkload(cj) @@ -1683,22 +1109,8 @@ func TestCronJobWorkload_BasicGetters(t *testing.T) { } func TestCronJobWorkload_PodTemplateAnnotations(t *testing.T) { - cj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.CronJobSpec{ - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "existing": "annotation", - }, - }, - }, - }, - }, - }, - } + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Annotations["existing"] = "annotation" w := NewCronJobWorkload(cj) @@ -1714,27 +1126,13 @@ func TestCronJobWorkload_PodTemplateAnnotations(t *testing.T) { } func TestCronJobWorkload_UsesConfigMap(t *testing.T) { - cj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.CronJobSpec{ - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "config-vol", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cronjob-config", - }, - }, - }, - }, - }, - }, - }, + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "cronjob-config"}, }, }, }, @@ -1751,31 +1149,229 @@ func TestCronJobWorkload_UsesConfigMap(t *testing.T) { } func TestCronJobWorkload_UsesSecret(t *testing.T) { - cj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.CronJobSpec{ - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "main", - Env: []corev1.EnvVar{ - { - Name: "SECRET_VALUE", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cronjob-secret", - }, - Key: "key", - }, - }, - }, - }, - }, - }, + cj := testutil.NewCronJob("test", "default") + addEnvVarSecretRef(cj.Spec.JobTemplate.Spec.Template.Spec.Containers, "SECRET_VALUE", "cronjob-secret", "key") + + w := NewCronJobWorkload(cj) + + if !w.UsesSecret("cronjob-secret") { + t.Error("CronJob UsesSecret should return true for Secret envVar") + } +} + +func TestCronJobWorkload_DeepCopy(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + cj.Spec.JobTemplate.Spec.Template.Annotations["original"] = "value" + + w := NewCronJobWorkload(cj) + copy := w.DeepCopy() + + w.SetPodTemplateAnnotation("modified", "true") + + copyAnnotations := copy.GetPodTemplateAnnotations() + if copyAnnotations["modified"] == "true" { + t.Error("DeepCopy should create independent copy") + } +} + +// Test that Job and CronJob implement the interface +func TestJobCronJobWorkloadInterface(t *testing.T) { + var _ WorkloadAccessor = (*JobWorkload)(nil) + var _ WorkloadAccessor = (*CronJobWorkload)(nil) +} + +// DeploymentConfig tests +func TestDeploymentConfigWorkload_BasicGetters(t *testing.T) { + dc := testutil.NewDeploymentConfig("test-dc", "test-ns", map[string]string{"key": "value"}) + + w := NewDeploymentConfigWorkload(dc) + + if w.Kind() != KindDeploymentConfig { + t.Errorf("Kind() = %v, want %v", w.Kind(), KindDeploymentConfig) + } + if w.GetName() != "test-dc" { + t.Errorf("GetName() = %v, want test-dc", w.GetName()) + } + if w.GetNamespace() != "test-ns" { + t.Errorf("GetNamespace() = %v, want test-ns", w.GetNamespace()) + } + if w.GetAnnotations()["key"] != "value" { + t.Errorf("GetAnnotations()[key] = %v, want value", w.GetAnnotations()["key"]) + } + if w.GetObject() != dc { + t.Error("GetObject() should return the underlying deploymentconfig") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = map[string]string{"existing": "annotation"} + + w := NewDeploymentConfigWorkload(dc) + + annotations := w.GetPodTemplateAnnotations() + if annotations["existing"] != "annotation" { + t.Errorf("GetPodTemplateAnnotations()[existing] = %v, want annotation", annotations["existing"]) + } + + w.SetPodTemplateAnnotation("new-key", "new-value") + if w.GetPodTemplateAnnotations()["new-key"] != "new-value" { + t.Error("SetPodTemplateAnnotation should add new annotation") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + // Should handle nil template gracefully + annotations := w.GetPodTemplateAnnotations() + if annotations != nil { + t.Error("GetPodTemplateAnnotations should return nil for nil template") + } + + // SetPodTemplateAnnotation should initialize template + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil template") + } +} + +func TestDeploymentConfigWorkload_PodTemplateAnnotations_NilInit(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = nil + + w := NewDeploymentConfigWorkload(dc) + + // Should initialize nil map + annotations := w.GetPodTemplateAnnotations() + if annotations == nil { + t.Error("GetPodTemplateAnnotations should initialize nil map") + } + + w.SetPodTemplateAnnotation("key", "value") + if w.GetPodTemplateAnnotations()["key"] != "value" { + t.Error("SetPodTemplateAnnotation should work with nil initial map") + } +} + +func TestDeploymentConfigWorkload_Containers(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "main", Image: "nginx"}, + } + dc.Spec.Template.Spec.InitContainers = []corev1.Container{ + {Name: "init", Image: "busybox"}, + } + + w := NewDeploymentConfigWorkload(dc) + + containers := w.GetContainers() + if len(containers) != 1 || containers[0].Name != "main" { + t.Errorf("GetContainers() = %v, want [main]", containers) + } + + initContainers := w.GetInitContainers() + if len(initContainers) != 1 || initContainers[0].Name != "init" { + t.Errorf("GetInitContainers() = %v, want [init]", initContainers) + } + + newContainers := []corev1.Container{{Name: "new-main", Image: "alpine"}} + w.SetContainers(newContainers) + if w.GetContainers()[0].Name != "new-main" { + t.Error("SetContainers should update containers") + } + + newInitContainers := []corev1.Container{{Name: "new-init", Image: "alpine"}} + w.SetInitContainers(newInitContainers) + if w.GetInitContainers()[0].Name != "new-init" { + t.Error("SetInitContainers should update init containers") + } +} + +func TestDeploymentConfigWorkload_Containers_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetContainers() != nil { + t.Error("GetContainers should return nil for nil template") + } + if w.GetInitContainers() != nil { + t.Error("GetInitContainers should return nil for nil template") + } + + // SetContainers should initialize template + w.SetContainers([]corev1.Container{{Name: "main"}}) + if len(w.GetContainers()) != 1 { + t.Error("SetContainers should work with nil template") + } +} + +func TestDeploymentConfigWorkload_Volumes(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + {Name: "config-vol"}, + {Name: "secret-vol"}, + } + + w := NewDeploymentConfigWorkload(dc) + + volumes := w.GetVolumes() + if len(volumes) != 2 { + t.Errorf("GetVolumes() length = %d, want 2", len(volumes)) + } +} + +func TestDeploymentConfigWorkload_Volumes_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetVolumes() != nil { + t.Error("GetVolumes should return nil for nil template") + } +} + +func TestDeploymentConfigWorkload_UsesConfigMap_Volume(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-config", + }, + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesConfigMap("dc-config") { + t.Error("DeploymentConfig UsesConfigMap should return true for ConfigMap volume") + } + if w.UsesConfigMap("other-config") { + t.Error("UsesConfigMap should return false for non-existent ConfigMap") + } +} + +func TestDeploymentConfigWorkload_UsesConfigMap_EnvFrom(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-env-config", }, }, }, @@ -1783,24 +1379,57 @@ func TestCronJobWorkload_UsesSecret(t *testing.T) { }, } - w := NewCronJobWorkload(cj) + w := NewDeploymentConfigWorkload(dc) - if !w.UsesSecret("cronjob-secret") { - t.Error("CronJob UsesSecret should return true for Secret envVar") + if !w.UsesConfigMap("dc-env-config") { + t.Error("DeploymentConfig UsesConfigMap should return true for envFrom ConfigMap") } } -func TestCronJobWorkload_DeepCopy(t *testing.T) { - cj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: batchv1.CronJobSpec{ - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "original": "value", - }, +func TestDeploymentConfigWorkload_UsesConfigMap_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.UsesConfigMap("any-config") { + t.Error("UsesConfigMap should return false for nil template") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_Volume(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: "secret-vol", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "dc-secret", + }, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesSecret("dc-secret") { + t.Error("DeploymentConfig UsesSecret should return true for Secret volume") + } + if w.UsesSecret("other-secret") { + t.Error("UsesSecret should return false for non-existent Secret") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_EnvFrom(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "dc-env-secret", }, }, }, @@ -1808,7 +1437,67 @@ func TestCronJobWorkload_DeepCopy(t *testing.T) { }, } - w := NewCronJobWorkload(cj) + w := NewDeploymentConfigWorkload(dc) + + if !w.UsesSecret("dc-env-secret") { + t.Error("DeploymentConfig UsesSecret should return true for envFrom Secret") + } +} + +func TestDeploymentConfigWorkload_UsesSecret_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.UsesSecret("any-secret") { + t.Error("UsesSecret should return false for nil template") + } +} + +func TestDeploymentConfigWorkload_GetEnvFromSources(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: "main", + EnvFrom: []corev1.EnvFromSource{ + {ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "cm1"}}}, + }, + }, + } + dc.Spec.Template.Spec.InitContainers = []corev1.Container{ + { + Name: "init", + EnvFrom: []corev1.EnvFromSource{ + {SecretRef: &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secret1"}}}, + }, + }, + } + + w := NewDeploymentConfigWorkload(dc) + + sources := w.GetEnvFromSources() + if len(sources) != 2 { + t.Errorf("GetEnvFromSources() returned %d sources, want 2", len(sources)) + } +} + +func TestDeploymentConfigWorkload_GetEnvFromSources_NilTemplate(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template = nil + + w := NewDeploymentConfigWorkload(dc) + + if w.GetEnvFromSources() != nil { + t.Error("GetEnvFromSources should return nil for nil template") + } +} + +func TestDeploymentConfigWorkload_DeepCopy(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.Spec.Template.Annotations = map[string]string{"original": "value"} + + w := NewDeploymentConfigWorkload(dc) copy := w.DeepCopy() w.SetPodTemplateAnnotation("modified", "true") @@ -1819,8 +1508,35 @@ func TestCronJobWorkload_DeepCopy(t *testing.T) { } } -// Test that Job and CronJob implement the interface -func TestJobCronJobWorkloadInterface(t *testing.T) { - var _ WorkloadAccessor = (*JobWorkload)(nil) - var _ WorkloadAccessor = (*CronJobWorkload)(nil) +func TestDeploymentConfigWorkload_GetOwnerReferences(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + dc.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "apps.openshift.io/v1", + Kind: "DeploymentConfig", + Name: "test-owner", + }, + } + + w := NewDeploymentConfigWorkload(dc) + + refs := w.GetOwnerReferences() + if len(refs) != 1 || refs[0].Name != "test-owner" { + t.Errorf("GetOwnerReferences() = %v, want owner ref to test-owner", refs) + } +} + +func TestDeploymentConfigWorkload_GetDeploymentConfig(t *testing.T) { + dc := testutil.NewDeploymentConfig("test", "default", nil) + + w := NewDeploymentConfigWorkload(dc) + + if w.GetDeploymentConfig() != dc { + t.Error("GetDeploymentConfig should return the underlying DeploymentConfig") + } +} + +// Test that DeploymentConfig implements the interface +func TestDeploymentConfigWorkloadInterface(t *testing.T) { + var _ WorkloadAccessor = (*DeploymentConfigWorkload)(nil) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 6d3dcc4c5..b8303d9d9 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -10,12 +10,15 @@ import ( "time" "github.com/go-logr/zerologr" + openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" "github.com/rs/zerolog" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/openshift" "github.com/stakater/Reloader/internal/pkg/testutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -32,12 +35,14 @@ const ( ) var ( - k8sClient kubernetes.Interface - cfg *config.Config - namespace string - skipE2ETests bool - cancelManager context.CancelFunc - restCfg *rest.Config + k8sClient kubernetes.Interface + osClient openshiftclient.Interface + cfg *config.Config + namespace string + skipE2ETests bool + skipDeploymentConfigTests bool + cancelManager context.CancelFunc + restCfg *rest.Config ) // testFixture provides a clean way to set up and tear down test resources. @@ -155,7 +160,7 @@ func (f *testFixture) assertDeploymentReloaded(name string, testCfg *config.Conf if testCfg == nil { testCfg = cfg } - updated, err := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg, waitTimeout) + updated, err := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg.Annotations.LastReloadedFrom, waitTimeout) if err != nil { f.t.Fatalf("Error waiting for deployment %s update: %v", name, err) } @@ -171,7 +176,7 @@ func (f *testFixture) assertDeploymentNotReloaded(name string, testCfg *config.C testCfg = cfg } time.Sleep(negativeTestTimeout) - updated, _ := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg, negativeTestTimeout) + updated, _ := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg.Annotations.LastReloadedFrom, negativeTestTimeout) if updated { f.t.Errorf("Deployment %s should not have been updated", name) } @@ -180,7 +185,7 @@ func (f *testFixture) assertDeploymentNotReloaded(name string, testCfg *config.C // assertDaemonSetReloaded asserts that a daemonset was reloaded. func (f *testFixture) assertDaemonSetReloaded(name string) { f.t.Helper() - updated, err := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg, waitTimeout) + updated, err := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) if err != nil { f.t.Fatalf("Error waiting for daemonset %s update: %v", name, err) } @@ -192,7 +197,7 @@ func (f *testFixture) assertDaemonSetReloaded(name string) { // assertStatefulSetReloaded asserts that a statefulset was reloaded. func (f *testFixture) assertStatefulSetReloaded(name string) { f.t.Helper() - updated, err := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg, waitTimeout) + updated, err := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) if err != nil { f.t.Fatalf("Error waiting for statefulset %s update: %v", name, err) } @@ -201,6 +206,28 @@ func (f *testFixture) assertStatefulSetReloaded(name string) { } } +// createDeploymentConfig creates a DeploymentConfig and registers it for cleanup. +func (f *testFixture) createDeploymentConfig(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateDeploymentConfig(osClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create DeploymentConfig %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deploymentconfig"}) +} + +// assertDeploymentConfigReloaded asserts that a DeploymentConfig was reloaded. +func (f *testFixture) assertDeploymentConfigReloaded(name string) { + f.t.Helper() + updated, err := testutil.WaitForDeploymentConfigReloadedAnnotation(osClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for DeploymentConfig %s update: %v", name, err) + } + if !updated { + f.t.Errorf("DeploymentConfig %s was not updated after resource change", name) + } +} + // cleanup removes all created resources. func (f *testFixture) cleanup() { for _, w := range f.workloads { @@ -211,6 +238,10 @@ func (f *testFixture) cleanup() { _ = testutil.DeleteDaemonSet(k8sClient, namespace, w.name) case "statefulset": _ = testutil.DeleteStatefulSet(k8sClient, namespace, w.name) + case "deploymentconfig": + if osClient != nil { + _ = testutil.DeleteDeploymentConfig(osClient, namespace, w.name) + } } } for _, name := range f.configMaps { @@ -266,6 +297,25 @@ func TestMain(m *testing.M) { cfg = config.NewDefault() cfg.AutoReloadAll = false + // Check if cluster supports DeploymentConfig + discoveryClient, err := discovery.NewDiscoveryClientForConfig(restCfg) + if err != nil { + skipDeploymentConfigTests = true + } else { + // Use a nop logger for detection + nopLog := ctrl.Log.WithName("dc-detection") + if openshift.HasDeploymentConfigSupport(discoveryClient, nopLog) { + cfg.DeploymentConfigEnabled = true + // Create OpenShift client for DeploymentConfig tests + osClient, err = testutil.NewOpenshiftClient(restCfg) + if err != nil { + skipDeploymentConfigTests = true + } + } else { + skipDeploymentConfigTests = true + } + } + _, cancelManager = startManagerWithConfig(cfg, restCfg) code := m.Run() @@ -285,6 +335,13 @@ func skipIfNoCluster(t *testing.T) { } } +func skipIfNoDeploymentConfig(t *testing.T) { + skipIfNoCluster(t) + if skipDeploymentConfigTests { + t.Skip("Skipping DeploymentConfig test: cluster does not support DeploymentConfig API") + } +} + // TestConfigMapUpdate tests that updating a ConfigMap triggers a workload reload. func TestConfigMapUpdate(t *testing.T) { f := newFixture(t, "cm-update") @@ -497,6 +554,67 @@ func TestAutoWithBothExplicitAndReferencedChange(t *testing.T) { f.assertDeploymentReloaded(referencedCM, nil) } +// newFixtureForDeploymentConfig creates a new test fixture for DeploymentConfig tests. +func newFixtureForDeploymentConfig(t *testing.T, prefix string) *testFixture { + t.Helper() + skipIfNoDeploymentConfig(t) + return &testFixture{ + t: t, + name: prefix + "-" + testutil.RandSeq(5), + } +} + +// TestDeploymentConfigReloadConfigMap tests that updating a ConfigMap triggers a DeploymentConfig reload. +func TestDeploymentConfigReloadConfigMap(t *testing.T) { + f := newFixtureForDeploymentConfig(t, "dc-cm-reload") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeploymentConfig( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentConfigReloaded(f.name) +} + +// TestDeploymentConfigReloadSecret tests that updating a Secret triggers a DeploymentConfig reload. +func TestDeploymentConfigReloadSecret(t *testing.T) { + f := newFixtureForDeploymentConfig(t, "dc-secret-reload") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDeploymentConfig( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertDeploymentConfigReloaded(f.name) +} + +// TestDeploymentConfigAutoReload tests the auto-reload annotation on DeploymentConfig. +func TestDeploymentConfigAutoReload(t *testing.T) { + f := newFixtureForDeploymentConfig(t, "dc-auto-reload") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeploymentConfig( + f.name, true, map[string]string{ + cfg.Annotations.Auto: "true", + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentConfigReloaded(f.name) +} + // startManagerWithConfig creates and starts a controller-runtime manager for e2e testing. func startManagerWithConfig(cfg *config.Config, restConfig *rest.Config) (manager.Manager, context.CancelFunc) { collectors := metrics.NewCollectors() From b55e597a2f2be17f1ab4d76951b9899c5a0f0cf6 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 21:26:08 +0100 Subject: [PATCH 30/35] fix: Issues with paused rollouts and test cases --- .../pkg/controller/configmap_reconciler.go | 2 + .../pkg/controller/deployment_reconciler.go | 13 +- internal/pkg/controller/handler.go | 2 + internal/pkg/controller/manager.go | 2 + internal/pkg/controller/retry.go | 146 +++++++-- internal/pkg/controller/retry_test.go | 283 ++++++++++++++++++ internal/pkg/controller/secret_reconciler.go | 2 + internal/pkg/testutil/testutil.go | 48 +++ test/e2e/e2e_test.go | 111 +++++++ 9 files changed, 576 insertions(+), 33 deletions(-) diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index ef60e9fca..6736a518e 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -30,6 +30,7 @@ type ConfigMapReconciler struct { EventRecorder *events.Recorder WebhookClient *webhook.Client Alerter alerting.Alerter + PauseHandler *reload.PauseHandler handler *ReloadHandler initialized bool @@ -100,6 +101,7 @@ func (r *ConfigMapReconciler) reloadHandler() *ReloadHandler { Collectors: r.Collectors, EventRecorder: r.EventRecorder, Alerter: r.Alerter, + PauseHandler: r.PauseHandler, } } return r.handler diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go index b0acb643f..ff81e8121 100644 --- a/internal/pkg/controller/deployment_reconciler.go +++ b/internal/pkg/controller/deployment_reconciler.go @@ -54,11 +54,18 @@ func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: remainingTime}, nil } - // Pause period has expired - unpause the deployment log.Info("Unpausing deployment after pause period expired") - r.PauseHandler.ClearPause(&deploy) + err = UpdateObjectWithRetry( + ctx, r.Client, &deploy, func() (bool, error) { + if !r.PauseHandler.IsPausedByReloader(&deploy) { + return false, nil + } + r.PauseHandler.ClearPause(&deploy) + return true, nil + }, + ) - if err := r.Update(ctx, &deploy, client.FieldOwner(FieldManager)); err != nil { + if err != nil { log.Error(err, "Failed to unpause deployment") return ctrl.Result{}, err } diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index 00b7218a3..423f2432f 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -24,6 +24,7 @@ type ReloadHandler struct { Collectors *metrics.Collectors EventRecorder *events.Recorder Alerter alerting.Alerter + PauseHandler *reload.PauseHandler } // Process handles the reload workflow: list workloads, get decisions, webhook or apply. @@ -112,6 +113,7 @@ func (h *ReloadHandler) applyReloads( ctx, h.Client, h.ReloadService, + h.PauseHandler, decision.Workload, resourceName, resourceType, diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index 07ffcfbf3..e6a0bcd73 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -150,6 +150,7 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col EventRecorder: eventRecorder, WebhookClient: webhookClient, Alerter: alerter, + PauseHandler: pauseHandler, }).SetupWithManager(mgr); err != nil { return fmt.Errorf("setting up configmap reconciler: %w", err) } @@ -167,6 +168,7 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col EventRecorder: eventRecorder, WebhookClient: webhookClient, Alerter: alerter, + PauseHandler: pauseHandler, }).SetupWithManager(mgr); err != nil { return fmt.Errorf("setting up secret reconciler: %w", err) } diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index b91064908..a7f20810b 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -13,6 +13,39 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// UpdateObjectWithRetry updates a Kubernetes object with retry on conflict. +// It re-fetches the object on each retry attempt and calls modifyFn to apply changes. +// The modifyFn receives the latest version of the object and should modify it in place. +// If modifyFn returns false, the update is skipped (e.g., if the condition no longer applies). +func UpdateObjectWithRetry( + ctx context.Context, + c client.Client, + obj client.Object, + modifyFn func() (shouldUpdate bool, err error), +) error { + return retry.RetryOnConflict( + retry.DefaultBackoff, func() error { + if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + shouldUpdate, err := modifyFn() + if err != nil { + return err + } + + if !shouldUpdate { + return nil + } + + return c.Update(ctx, obj, client.FieldOwner(FieldManager)) + }, + ) +} + // UpdateWorkloadWithRetry updates a workload with exponential backoff on conflict. // On conflict, it re-fetches the object, re-applies the reload changes, and retries. // For Jobs and CronJobs, special handling is applied: @@ -23,6 +56,7 @@ func UpdateWorkloadWithRetry( ctx context.Context, c client.Client, reloadService *reload.Service, + pauseHandler *reload.PauseHandler, wl workload.WorkloadAccessor, resourceName string, resourceType reload.ResourceType, @@ -38,6 +72,8 @@ func UpdateWorkloadWithRetry( return updateCronJobWithNewJob(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) case workload.KindArgoRollout: return updateArgoRollout(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) + case workload.KindDeployment: + return updateDeploymentWithPause(ctx, c, reloadService, pauseHandler, wl, resourceName, resourceType, namespace, hash, autoReload) default: return updateStandardWorkload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) } @@ -60,36 +96,38 @@ func retryWithReload( var updated bool isFirstAttempt := true - err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if !isFirstAttempt { - obj := wl.GetObject() - key := client.ObjectKeyFromObject(obj) - if err := c.Get(ctx, key, obj); err != nil { - if errors.IsNotFound(err) { - return nil + err := retry.RetryOnConflict( + retry.DefaultBackoff, func() error { + if !isFirstAttempt { + obj := wl.GetObject() + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err } - return err } - } - isFirstAttempt = false + isFirstAttempt = false - var applyErr error - updated, applyErr = reloadService.ApplyReload(ctx, wl, resourceName, resourceType, namespace, hash, autoReload) - if applyErr != nil { - return applyErr - } + var applyErr error + updated, applyErr = reloadService.ApplyReload(ctx, wl, resourceName, resourceType, namespace, hash, autoReload) + if applyErr != nil { + return applyErr + } - if !updated { - return nil - } + if !updated { + return nil + } - return updateFn() - }) + return updateFn() + }, + ) return updated, err } -// updateStandardWorkload updates Deployments, DaemonSets, StatefulSets, etc. +// updateStandardWorkload updates DaemonSets, StatefulSets, etc. func updateStandardWorkload( ctx context.Context, c client.Client, @@ -101,10 +139,40 @@ func updateStandardWorkload( hash string, autoReload bool, ) (bool, error) { - return retryWithReload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + return retryWithReload( + ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + func() error { + return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + }, + ) +} + +// updateDeploymentWithPause updates a Deployment and applies pause if configured. +func updateDeploymentWithPause( + ctx context.Context, + c client.Client, + reloadService *reload.Service, + pauseHandler *reload.PauseHandler, + wl workload.WorkloadAccessor, + resourceName string, + resourceType reload.ResourceType, + namespace string, + hash string, + autoReload bool, +) (bool, error) { + shouldPause := pauseHandler != nil && pauseHandler.ShouldPause(wl) + + return retryWithReload( + ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, func() error { + if shouldPause { + if err := pauseHandler.ApplyPause(wl); err != nil { + return err + } + } return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) - }) + }, + ) } // updateJobWithRecreate deletes the Job and recreates it with the updated spec. @@ -148,9 +216,11 @@ func updateJobWithRecreate( // Delete the old job with background propagation policy := metav1.DeletePropagationBackground - if err := c.Delete(ctx, oldJob, &client.DeleteOptions{ - PropagationPolicy: &policy, - }); err != nil { + if err := c.Delete( + ctx, oldJob, &client.DeleteOptions{ + PropagationPolicy: &policy, + }, + ); err != nil { if !errors.IsNotFound(err) { return false, err } @@ -217,12 +287,10 @@ func updateCronJobWithNewJob( cronJob := cronJobWl.GetCronJob() - // Build annotations for the new Job annotations := make(map[string]string) annotations["cronjob.kubernetes.io/instantiate"] = "manual" maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) - // Create a new Job from the CronJob template job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ GenerateName: cronJob.Name + "-", @@ -240,6 +308,22 @@ func updateCronJobWithNewJob( return false, err } + savedAnnotations := maps.Clone(cronJob.Spec.JobTemplate.Spec.Template.Annotations) + + err = UpdateObjectWithRetry( + ctx, c, cronJob, func() (bool, error) { + if cronJob.Spec.JobTemplate.Spec.Template.Annotations == nil { + cronJob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) + } + maps.Copy(cronJob.Spec.JobTemplate.Spec.Template.Annotations, savedAnnotations) + return true, nil + }, + ) + + if err != nil { + return false, err + } + return true, nil } @@ -262,8 +346,10 @@ func updateArgoRollout( return false, nil } - return retryWithReload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, + return retryWithReload( + ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, func() error { return rolloutWl.Update(ctx, c) - }) + }, + ) } diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go index b62bc7a4f..97271f85e 100644 --- a/internal/pkg/controller/retry_test.go +++ b/internal/pkg/controller/retry_test.go @@ -133,6 +133,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { context.Background(), fakeClient, reloadService, + nil, // no pause handler wl, "test-resource", tt.resourceType, @@ -214,6 +215,7 @@ func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { context.Background(), fakeClient, reloadService, + nil, // no pause handler for this test wl, "test-cm", reload.ResourceTypeConfigMap, @@ -265,6 +267,7 @@ func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { context.Background(), fakeClient, reloadService, + nil, // no pause handler wl, "test-cm", reload.ResourceTypeConfigMap, @@ -300,3 +303,283 @@ func TestResourceTypeKind(t *testing.T) { ) } } + +func TestUpdateWorkloadWithRetry_PauseDeployment(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + "deployment.reloader.stakater.com/pause-period": "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation tests pause with explicit configmap annotation (no auto). +func TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.ConfigmapReload: "test-cm", // explicit, not auto + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + false, // NOT auto reload + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Template.Annotations == nil { + t.Fatal("Expected pod template annotations to be set") + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithSecretReload tests pause with Secret-triggered reload. +func TestUpdateWorkloadWithRetry_PauseWithSecretReload(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.SecretReload: "test-secret", // explicit secret, not auto + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-secret", + reload.ResourceTypeSecret, + "default", + "abc123", + false, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } + + pausedAt := result.Annotations[cfg.Annotations.PausedAt] + if pausedAt == "" { + t.Error("Expected paused-at annotation to be set") + } +} + +// TestUpdateWorkloadWithRetry_PauseWithAutoSecret tests pause with auto annotation + Secret change. +func TestUpdateWorkloadWithRetry_PauseWithAutoSecret(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.PausePeriod: "5m", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-secret", + reload.ResourceTypeSecret, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if !result.Spec.Paused { + t.Error("Expected deployment to be paused (spec.Paused=true)") + } +} + +func TestUpdateWorkloadWithRetry_NoPauseWithoutAnnotation(t *testing.T) { + cfg := config.NewDefault() + reloadService := reload.NewService(cfg) + pauseHandler := reload.NewPauseHandler(cfg) + + deployment := testutil.NewDeployment( + "test-deployment", "default", map[string]string{ + "reloader.stakater.com/auto": "true", + }, + ) + + fakeClient := fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithObjects(deployment). + Build() + + wl := workload.NewDeploymentWorkload(deployment) + + updated, err := controller.UpdateWorkloadWithRetry( + context.Background(), + fakeClient, + reloadService, + pauseHandler, + wl, + "test-cm", + reload.ResourceTypeConfigMap, + "default", + "abc123", + true, + ) + + if err != nil { + t.Fatalf("UpdateWorkloadWithRetry failed: %v", err) + } + if !updated { + t.Error("Expected workload to be updated") + } + + var result appsv1.Deployment + if err := fakeClient.Get( + context.Background(), types.NamespacedName{Name: "test-deployment", Namespace: "default"}, &result, + ); err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + + if result.Spec.Paused { + t.Error("Expected deployment NOT to be paused (no pause-period annotation)") + } +} diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index c28f8e38b..46723803f 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -30,6 +30,7 @@ type SecretReconciler struct { EventRecorder *events.Recorder WebhookClient *webhook.Client Alerter alerting.Alerter + PauseHandler *reload.PauseHandler handler *ReloadHandler initialized bool @@ -97,6 +98,7 @@ func (r *SecretReconciler) reloadHandler() *ReloadHandler { Collectors: r.Collectors, EventRecorder: r.EventRecorder, Alerter: r.Alerter, + PauseHandler: r.PauseHandler, } } return r.handler diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go index 630155348..1a574789b 100644 --- a/internal/pkg/testutil/testutil.go +++ b/internal/pkg/testutil/testutil.go @@ -430,3 +430,51 @@ func WaitForDeploymentConfigReloadedAnnotation(client openshiftclient.Interface, } return found, err } + +// WaitForDeploymentPaused waits for a deployment to be paused (spec.Paused=true). +func WaitForDeploymentPaused(client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var paused bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if deployment.Spec.Paused { + paused = true + return true, nil + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return paused, nil + } + return paused, err +} + +// WaitForDeploymentUnpaused waits for a deployment to be unpaused (spec.Paused=false). +func WaitForDeploymentUnpaused(client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var unpaused bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep waiting + } + if !deployment.Spec.Paused { + unpaused = true + return true, nil + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return unpaused, nil + } + return unpaused, err +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index b8303d9d9..d6351b1d1 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -228,6 +228,46 @@ func (f *testFixture) assertDeploymentConfigReloaded(name string) { } } +// assertDeploymentPaused asserts that a deployment is paused (spec.Paused=true). +func (f *testFixture) assertDeploymentPaused(name string) { + f.t.Helper() + paused, err := testutil.WaitForDeploymentPaused(k8sClient, namespace, name, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for deployment %s to be paused: %v", name, err) + } + if !paused { + f.t.Errorf("Deployment %s was not paused after reload", name) + } +} + +// assertDeploymentUnpaused asserts that a deployment is unpaused (spec.Paused=false). +func (f *testFixture) assertDeploymentUnpaused(name string, timeout time.Duration) { + f.t.Helper() + unpaused, err := testutil.WaitForDeploymentUnpaused(k8sClient, namespace, name, timeout) + if err != nil { + f.t.Fatalf("Error waiting for deployment %s to be unpaused: %v", name, err) + } + if !unpaused { + f.t.Errorf("Deployment %s was not unpaused after pause period", name) + } +} + +// assertDeploymentHasPausedAtAnnotation asserts that a deployment has the paused-at annotation. +func (f *testFixture) assertDeploymentHasPausedAtAnnotation(name string) { + f.t.Helper() + deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + f.t.Fatalf("Failed to get deployment %s: %v", name, err) + } + if deploy.Annotations == nil { + f.t.Errorf("Deployment %s has no annotations", name) + return + } + if _, ok := deploy.Annotations[cfg.Annotations.PausedAt]; !ok { + f.t.Errorf("Deployment %s does not have paused-at annotation", name) + } +} + // cleanup removes all created resources. func (f *testFixture) cleanup() { for _, w := range f.workloads { @@ -615,6 +655,77 @@ func TestDeploymentConfigAutoReload(t *testing.T) { f.assertDeploymentConfigReloaded(f.name) } +// TestDeploymentPausePeriod tests the pause-period annotation on Deployment. +// It verifies that after a reload, the deployment is paused and then unpaused after the period expires. +func TestDeploymentPausePeriod(t *testing.T) { + f := newFixture(t, "pause-period") + defer f.cleanup() + + pausePeriod := "10s" + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + cfg.Annotations.PausePeriod: pausePeriod, + }, + ) + f.waitForReady() + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) + f.assertDeploymentPaused(f.name) + f.assertDeploymentHasPausedAtAnnotation(f.name) + t.Log("Waiting for pause period to expire...") + f.assertDeploymentUnpaused(f.name, 20*time.Second) +} + +// TestDeploymentPausePeriodWithAutoReload tests pause-period with auto reload annotation. +func TestDeploymentPausePeriodWithAutoReload(t *testing.T) { + f := newFixture(t, "pause-auto") + defer f.cleanup() + + pausePeriod := "10s" + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.Auto: "true", + cfg.Annotations.PausePeriod: pausePeriod, + }, + ) + f.waitForReady() + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) + f.assertDeploymentPaused(f.name) + t.Log("Waiting for pause period to expire...") + f.assertDeploymentUnpaused(f.name, 20*time.Second) +} + +// TestDeploymentNoPauseWithoutAnnotation tests that deployments without pause-period are not paused. +func TestDeploymentNoPauseWithoutAnnotation(t *testing.T) { + f := newFixture(t, "no-pause") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) + + time.Sleep(3 * time.Second) + deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get deployment: %v", err) + } + if deploy.Spec.Paused { + t.Errorf("Deployment should NOT be paused without pause-period annotation") + } +} + // startManagerWithConfig creates and starts a controller-runtime manager for e2e testing. func startManagerWithConfig(cfg *config.Config, restConfig *rest.Config) (manager.Manager, context.CancelFunc) { collectors := metrics.NewCollectors() From da6f33c059a76ba777d2165abe6642a9ad72d3c3 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 28 Dec 2025 21:53:14 +0100 Subject: [PATCH 31/35] feat: Test cases for envvar strategy and more --- internal/pkg/testutil/testutil.go | 152 +++++++- test/e2e/{ => annotations}/e2e_test.go | 373 ++++++++++++++++++- test/e2e/envvars/e2e_test.go | 474 +++++++++++++++++++++++++ 3 files changed, 992 insertions(+), 7 deletions(-) rename test/e2e/{ => annotations}/e2e_test.go (68%) create mode 100644 test/e2e/envvars/e2e_test.go diff --git a/internal/pkg/testutil/testutil.go b/internal/pkg/testutil/testutil.go index 1a574789b..c15aacb41 100644 --- a/internal/pkg/testutil/testutil.go +++ b/internal/pkg/testutil/testutil.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "encoding/base64" "fmt" + "strings" "time" openshiftv1 "github.com/openshift/api/apps/v1" @@ -55,6 +56,23 @@ func CreateConfigMap(client kubernetes.Interface, namespace, name, data string) return client.CoreV1().ConfigMaps(namespace).Create(context.Background(), cm, metav1.CreateOptions{}) } +// CreateConfigMapWithAnnotations creates a ConfigMap with the given name, data, and annotations. +func CreateConfigMapWithAnnotations(client kubernetes.Interface, namespace, name, data string, annotations map[string]string) ( + *corev1.ConfigMap, error, +) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: map[string]string{ + "url": data, + }, + } + return client.CoreV1().ConfigMaps(namespace).Create(context.Background(), cm, metav1.CreateOptions{}) +} + // UpdateConfigMap updates the ConfigMap with new label and/or data. func UpdateConfigMap(cm *corev1.ConfigMap, namespace, name, label, data string) error { if label != "" { @@ -66,7 +84,6 @@ func UpdateConfigMap(cm *corev1.ConfigMap, namespace, name, label, data string) if data != "" { cm.Data["url"] = data } - // Note: caller must have a client to update return nil } @@ -157,6 +174,19 @@ func DeleteDeployment(client kubernetes.Interface, namespace, name string) error return client.AppsV1().Deployments(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) } +// CreateDeploymentWithBoth creates a Deployment that references both a ConfigMap and a Secret. +func CreateDeploymentWithBoth(client kubernetes.Interface, name, namespace, configMapName, secretName string, annotations map[string]string) ( + *appsv1.Deployment, error, +) { + deployment := NewDeploymentWithEnvFrom(name, namespace, configMapName, secretName) + deployment.Annotations = annotations + // Override image for integration tests + deployment.Spec.Template.Spec.Containers[0].Image = "busybox:1.36" + deployment.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "while true; do sleep 3600; done"} + + return client.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) +} + // CreateDaemonSet creates a DaemonSet that references a ConfigMap/Secret. func CreateDaemonSet(client kubernetes.Interface, name, namespace string, useConfigMap bool, annotations map[string]string) ( *appsv1.DaemonSet, error, @@ -478,3 +508,123 @@ func WaitForDeploymentUnpaused(client kubernetes.Interface, namespace, name stri } return unpaused, err } + +// WaitForCronJobTriggeredJob waits for a Job to be created by a CronJob (triggered by Reloader). +func WaitForCronJobTriggeredJob(client kubernetes.Interface, namespace, cronJobName string, timeout time.Duration) (bool, error) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, nil // Keep waiting + } + for _, job := range jobs.Items { + if strings.HasPrefix(job.Name, cronJobName+"-") { + if job.Annotations != nil { + if _, ok := job.Annotations["cronjob.kubernetes.io/instantiate"]; ok { + found = true + return true, nil + } + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDeploymentEnvVar waits for a deployment's containers to have the specified env var with a non-empty value. +func WaitForDeploymentEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range deployment.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForDaemonSetEnvVar waits for a daemonset's containers to have the specified env var with a non-empty value. +func WaitForDaemonSetEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + daemonset, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range daemonset.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} + +// WaitForStatefulSetEnvVar waits for a statefulset's containers to have the specified env var with a non-empty value. +func WaitForStatefulSetEnvVar(client kubernetes.Interface, namespace, name, envVarPrefix string, timeout time.Duration) ( + bool, error, +) { + var found bool + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := wait.PollUntilContextTimeout( + ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { + statefulset, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + for _, container := range statefulset.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, envVarPrefix) && env.Value != "" { + found = true + return true, nil + } + } + } + return false, nil + }, + ) + if wait.Interrupted(err) { + return found, nil + } + return found, err +} diff --git a/test/e2e/e2e_test.go b/test/e2e/annotations/e2e_test.go similarity index 68% rename from test/e2e/e2e_test.go rename to test/e2e/annotations/e2e_test.go index d6351b1d1..cdb88b6c2 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/annotations/e2e_test.go @@ -1,5 +1,5 @@ -// Package e2e contains end-to-end tests for Reloader. -package e2e +// Package annotations contains end-to-end tests for Reloader's Annotations Reload Strategy. +package annotations import ( "context" @@ -56,7 +56,7 @@ type testFixture struct { type workloadInfo struct { name string - kind string // "deployment", "daemonset", "statefulset" + kind string // "deployment", "daemonset", "statefulset", "cronjob" } // newFixture creates a new test fixture with a unique name prefix. @@ -154,6 +154,24 @@ func (f *testFixture) updateSecret(name, data string) { } } +// updateSecretLabel updates only a Secret's label (not data). +func (f *testFixture) updateSecretLabel(name, label string) { + f.t.Helper() + secret, err := k8sClient.CoreV1().Secrets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + f.t.Fatalf("Failed to get Secret %s: %v", name, err) + } + var data string + if secret.Data != nil { + if d, ok := secret.Data["test"]; ok { + data = string(d) + } + } + if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, label, data); err != nil { + f.t.Fatalf("Failed to update Secret label %s: %v", name, err) + } +} + // assertDeploymentReloaded asserts that a deployment was reloaded. func (f *testFixture) assertDeploymentReloaded(name string, testCfg *config.Config) { f.t.Helper() @@ -194,6 +212,16 @@ func (f *testFixture) assertDaemonSetReloaded(name string) { } } +// assertDaemonSetNotReloaded asserts that a daemonset was NOT reloaded. +func (f *testFixture) assertDaemonSetNotReloaded(name string) { + f.t.Helper() + time.Sleep(negativeTestTimeout) + updated, _ := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, negativeTestTimeout) + if updated { + f.t.Errorf("DaemonSet %s should not have been updated", name) + } +} + // assertStatefulSetReloaded asserts that a statefulset was reloaded. func (f *testFixture) assertStatefulSetReloaded(name string) { f.t.Helper() @@ -206,6 +234,16 @@ func (f *testFixture) assertStatefulSetReloaded(name string) { } } +// assertStatefulSetNotReloaded asserts that a statefulset was NOT reloaded. +func (f *testFixture) assertStatefulSetNotReloaded(name string) { + f.t.Helper() + time.Sleep(negativeTestTimeout) + updated, _ := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, negativeTestTimeout) + if updated { + f.t.Errorf("StatefulSet %s should not have been updated", name) + } +} + // createDeploymentConfig creates a DeploymentConfig and registers it for cleanup. func (f *testFixture) createDeploymentConfig(name string, useConfigMap bool, annotations map[string]string) { f.t.Helper() @@ -282,6 +320,8 @@ func (f *testFixture) cleanup() { if osClient != nil { _ = testutil.DeleteDeploymentConfig(osClient, namespace, w.name) } + case "cronjob": + _ = testutil.DeleteCronJob(k8sClient, namespace, w.name) } } for _, name := range f.configMaps { @@ -337,16 +377,13 @@ func TestMain(m *testing.M) { cfg = config.NewDefault() cfg.AutoReloadAll = false - // Check if cluster supports DeploymentConfig discoveryClient, err := discovery.NewDiscoveryClientForConfig(restCfg) if err != nil { skipDeploymentConfigTests = true } else { - // Use a nop logger for detection nopLog := ctrl.Log.WithName("dc-detection") if openshift.HasDeploymentConfigSupport(discoveryClient, nopLog) { cfg.DeploymentConfigEnabled = true - // Create OpenShift client for DeploymentConfig tests osClient, err = testutil.NewOpenshiftClient(restCfg) if err != nil { skipDeploymentConfigTests = true @@ -726,6 +763,330 @@ func TestDeploymentNoPauseWithoutAnnotation(t *testing.T) { } } +// TestDaemonSetSecretReload tests that DaemonSets are reloaded when Secrets change. +func TestDaemonSetSecretReload(t *testing.T) { + f := newFixture(t, "ds-secret-reload") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDaemonSet( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertDaemonSetReloaded(f.name) +} + +// TestStatefulSetConfigMapReload tests that StatefulSets are reloaded when ConfigMaps change. +func TestStatefulSetConfigMapReload(t *testing.T) { + f := newFixture(t, "sts-cm-reload") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createStatefulSet( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertStatefulSetReloaded(f.name) +} + +// TestSecretLabelOnlyChange tests that Secret label-only changes don't trigger reloads. +func TestSecretLabelOnlyChange(t *testing.T) { + f := newFixture(t, "secret-label-only") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDeployment( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecretLabel(f.name, "new-label") + f.assertDeploymentNotReloaded(f.name, nil) +} + +// TestDaemonSetLabelOnlyChange tests that ConfigMap label-only changes don't trigger DaemonSet reloads. +func TestDaemonSetLabelOnlyChange(t *testing.T) { + f := newFixture(t, "ds-label-only") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDaemonSet( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMapLabel(f.name, "new-label") + f.assertDaemonSetNotReloaded(f.name) +} + +// TestStatefulSetLabelOnlyChange tests that Secret label-only changes don't trigger StatefulSet reloads. +func TestStatefulSetLabelOnlyChange(t *testing.T) { + f := newFixture(t, "sts-label-only") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createStatefulSet( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecretLabel(f.name, "new-label") + f.assertStatefulSetNotReloaded(f.name) +} + +// TestMultipleConfigMapUpdates tests that multiple updates to a ConfigMap all trigger reloads correctly. +func TestMultipleConfigMapUpdates(t *testing.T) { + f := newFixture(t, "multi-update") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data-1") + f.assertDeploymentReloaded(f.name, nil) + + time.Sleep(2 * time.Second) + + f.updateConfigMap(f.name, "updated-data-2") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestMultipleSecretUpdates tests that multiple updates to a Secret all trigger reloads correctly. +func TestMultipleSecretUpdates(t *testing.T) { + f := newFixture(t, "multi-secret-update") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDeployment( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret-1") + f.assertDeploymentReloaded(f.name, nil) + + time.Sleep(2 * time.Second) + + f.updateSecret(f.name, "updated-secret-2") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestSecretOnlyAuto tests the secret-only auto annotation (secret.reloader.stakater.com/auto). +func TestSecretOnlyAuto(t *testing.T) { + f := newFixture(t, "secret-auto") + defer f.cleanup() + + secretName := f.name + "-secret" + cmName := f.name + "-cm" + + f.createSecret(secretName, "initial-secret") + f.createConfigMap(cmName, "initial-data") + + _, err := testutil.CreateDeploymentWithBoth( + k8sClient, f.name, namespace, cmName, secretName, map[string]string{ + cfg.Annotations.SecretAuto: "true", + }, + ) + if err != nil { + t.Fatalf("Failed to create deployment: %v", err) + } + f.workloads = append(f.workloads, workloadInfo{name: f.name, kind: "deployment"}) + f.waitForReady() + + f.updateSecret(secretName, "updated-secret") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestConfigMapOnlyAuto tests the configmap-only auto annotation (configmap.reloader.stakater.com/auto). +func TestConfigMapOnlyAuto(t *testing.T) { + f := newFixture(t, "cm-auto") + defer f.cleanup() + + secretName := f.name + "-secret" + cmName := f.name + "-cm" + + f.createSecret(secretName, "initial-secret") + f.createConfigMap(cmName, "initial-data") + + _, err := testutil.CreateDeploymentWithBoth( + k8sClient, f.name, namespace, cmName, secretName, map[string]string{ + cfg.Annotations.ConfigmapAuto: "true", + }, + ) + if err != nil { + t.Fatalf("Failed to create deployment: %v", err) + } + f.workloads = append(f.workloads, workloadInfo{name: f.name, kind: "deployment"}) + f.waitForReady() + + f.updateConfigMap(cmName, "updated-data") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestSearchMatchAnnotations tests the search + match annotation pattern. +func TestSearchMatchAnnotations(t *testing.T) { + f := newFixture(t, "search-match") + defer f.cleanup() + + cm, err := testutil.CreateConfigMapWithAnnotations( + k8sClient, namespace, f.name, "initial-data", map[string]string{ + cfg.Annotations.Match: "true", + }, + ) + if err != nil { + t.Fatalf("Failed to create ConfigMap: %v", err) + } + f.configMaps = append(f.configMaps, cm.Name) + + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.Search: "true", + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentReloaded(f.name, nil) +} + +// TestSearchWithoutMatch tests that search annotation without match doesn't trigger reload. +func TestSearchWithoutMatch(t *testing.T) { + f := newFixture(t, "search-no-match") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.Search: "true", + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentNotReloaded(f.name, nil) +} + +// TestResourceIgnore tests the ignore annotation on ConfigMap/Secret. +func TestResourceIgnore(t *testing.T) { + f := newFixture(t, "ignore") + defer f.cleanup() + + cm, err := testutil.CreateConfigMapWithAnnotations( + k8sClient, namespace, f.name, "initial-data", map[string]string{ + cfg.Annotations.Ignore: "true", + }, + ) + if err != nil { + t.Fatalf("Failed to create ConfigMap: %v", err) + } + f.configMaps = append(f.configMaps, cm.Name) + + f.createDeployment( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentNotReloaded(f.name, nil) +} + +// createCronJob creates a CronJob and registers it for cleanup. +func (f *testFixture) createCronJob(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateCronJob(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create CronJob %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "cronjob"}) +} + +// assertCronJobTriggeredJob asserts that a CronJob triggered a new Job. +func (f *testFixture) assertCronJobTriggeredJob(name string) { + f.t.Helper() + triggered, err := testutil.WaitForCronJobTriggeredJob(k8sClient, namespace, name, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for CronJob %s to trigger Job: %v", name, err) + } + if !triggered { + f.t.Errorf("CronJob %s did not trigger a Job after resource change", name) + } +} + +// TestCronJobConfigMapReload tests that updating a ConfigMap triggers a CronJob to create a new Job. +func TestCronJobConfigMapReload(t *testing.T) { + f := newFixture(t, "cj-cm") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createCronJob( + f.name, true, map[string]string{ + cfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertCronJobTriggeredJob(f.name) +} + +// TestCronJobSecretReload tests that updating a Secret triggers a CronJob to create a new Job. +func TestCronJobSecretReload(t *testing.T) { + f := newFixture(t, "cj-secret") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createCronJob( + f.name, false, map[string]string{ + cfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertCronJobTriggeredJob(f.name) +} + +// TestCronJobAutoReload tests that CronJob with auto annotation triggers a Job on ConfigMap update. +func TestCronJobAutoReload(t *testing.T) { + f := newFixture(t, "cj-auto") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createCronJob( + f.name, true, map[string]string{ + cfg.Annotations.Auto: "true", + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertCronJobTriggeredJob(f.name) +} + // startManagerWithConfig creates and starts a controller-runtime manager for e2e testing. func startManagerWithConfig(cfg *config.Config, restConfig *rest.Config) (manager.Manager, context.CancelFunc) { collectors := metrics.NewCollectors() diff --git a/test/e2e/envvars/e2e_test.go b/test/e2e/envvars/e2e_test.go new file mode 100644 index 000000000..2c1c7b1c7 --- /dev/null +++ b/test/e2e/envvars/e2e_test.go @@ -0,0 +1,474 @@ +// Package envvars contains end-to-end tests for Reloader's EnvVars Reload Strategy. +package envvars + +import ( + "context" + "flag" + "log" + "os" + "testing" + "time" + + "github.com/go-logr/zerologr" + "github.com/rs/zerolog" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/testutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + testNamespacePrefix = "test-reloader-envvars-" + waitTimeout = 30 * time.Second + setupDelay = 2 * time.Second + negativeTestTimeout = 5 * time.Second + envVarPrefix = "STAKATER_" +) + +var ( + k8sClient kubernetes.Interface + envVarsCfg *config.Config + namespace string + skipE2ETests bool + cancelManager context.CancelFunc + restCfg *rest.Config +) + +// envVarsFixture provides test setup/teardown for EnvVars strategy tests. +type envVarsFixture struct { + t *testing.T + name string + configMaps []string + secrets []string + workloads []workloadInfo +} + +type workloadInfo struct { + name string + kind string +} + +func newEnvVarsFixture(t *testing.T, prefix string) *envVarsFixture { + t.Helper() + skipIfNoCluster(t) + return &envVarsFixture{ + t: t, + name: prefix + "-" + testutil.RandSeq(5), + } +} + +func (f *envVarsFixture) createConfigMap(name, data string) { + f.t.Helper() + _, err := testutil.CreateConfigMap(k8sClient, namespace, name, data) + if err != nil { + f.t.Fatalf("Failed to create ConfigMap %s: %v", name, err) + } + f.configMaps = append(f.configMaps, name) +} + +func (f *envVarsFixture) createSecret(name, data string) { + f.t.Helper() + _, err := testutil.CreateSecret(k8sClient, namespace, name, data) + if err != nil { + f.t.Fatalf("Failed to create Secret %s: %v", name, err) + } + f.secrets = append(f.secrets, name) +} + +func (f *envVarsFixture) createDeployment(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateDeployment(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create Deployment %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deployment"}) +} + +func (f *envVarsFixture) createDaemonSet(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateDaemonSet(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create DaemonSet %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "daemonset"}) +} + +func (f *envVarsFixture) createStatefulSet(name string, useConfigMap bool, annotations map[string]string) { + f.t.Helper() + _, err := testutil.CreateStatefulSet(k8sClient, name, namespace, useConfigMap, annotations) + if err != nil { + f.t.Fatalf("Failed to create StatefulSet %s: %v", name, err) + } + f.workloads = append(f.workloads, workloadInfo{name: name, kind: "statefulset"}) +} + +func (f *envVarsFixture) waitForReady() { + time.Sleep(setupDelay) +} + +func (f *envVarsFixture) updateConfigMap(name, data string) { + f.t.Helper() + if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, "", data); err != nil { + f.t.Fatalf("Failed to update ConfigMap %s: %v", name, err) + } +} + +func (f *envVarsFixture) updateConfigMapLabel(name, label string) { + f.t.Helper() + cm, err := k8sClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + f.t.Fatalf("Failed to get ConfigMap %s: %v", name, err) + } + data := cm.Data["url"] + if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, label, data); err != nil { + f.t.Fatalf("Failed to update ConfigMap label %s: %v", name, err) + } +} + +func (f *envVarsFixture) updateSecret(name, data string) { + f.t.Helper() + if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, "", data); err != nil { + f.t.Fatalf("Failed to update Secret %s: %v", name, err) + } +} + +func (f *envVarsFixture) assertDeploymentHasEnvVar(name string) { + f.t.Helper() + updated, err := testutil.WaitForDeploymentEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for deployment %s env var: %v", name, err) + } + if !updated { + f.t.Errorf("Deployment %s does not have Reloader env var", name) + } +} + +func (f *envVarsFixture) assertDeploymentNoEnvVar(name string) { + f.t.Helper() + time.Sleep(negativeTestTimeout) + updated, _ := testutil.WaitForDeploymentEnvVar(k8sClient, namespace, name, envVarPrefix, negativeTestTimeout) + if updated { + f.t.Errorf("Deployment %s should not have Reloader env var", name) + } +} + +func (f *envVarsFixture) assertDaemonSetHasEnvVar(name string) { + f.t.Helper() + updated, err := testutil.WaitForDaemonSetEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for daemonset %s env var: %v", name, err) + } + if !updated { + f.t.Errorf("DaemonSet %s does not have Reloader env var", name) + } +} + +func (f *envVarsFixture) assertStatefulSetHasEnvVar(name string) { + f.t.Helper() + updated, err := testutil.WaitForStatefulSetEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) + if err != nil { + f.t.Fatalf("Error waiting for statefulset %s env var: %v", name, err) + } + if !updated { + f.t.Errorf("StatefulSet %s does not have Reloader env var", name) + } +} + +func (f *envVarsFixture) cleanup() { + for _, w := range f.workloads { + switch w.kind { + case "deployment": + _ = testutil.DeleteDeployment(k8sClient, namespace, w.name) + case "daemonset": + _ = testutil.DeleteDaemonSet(k8sClient, namespace, w.name) + case "statefulset": + _ = testutil.DeleteStatefulSet(k8sClient, namespace, w.name) + } + } + for _, name := range f.configMaps { + _ = testutil.DeleteConfigMap(k8sClient, namespace, name) + } + for _, name := range f.secrets { + _ = testutil.DeleteSecret(k8sClient, namespace, name) + } +} + +func TestMain(m *testing.M) { + flag.Parse() + + if testing.Short() { + os.Exit(0) + } + + zl := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}). + Level(zerolog.WarnLevel). + With(). + Timestamp(). + Logger() + ctrllog.SetLogger(zerologr.New(&zl)) + + kubeconfig := os.Getenv("KUBECONFIG") + if kubeconfig == "" { + kubeconfig = os.Getenv("HOME") + "/.kube/config" + } + + var err error + restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + skipE2ETests = true + os.Exit(0) + } + + k8sClient, err = kubernetes.NewForConfig(restCfg) + if err != nil { + skipE2ETests = true + os.Exit(0) + } + + if _, err = k8sClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}); err != nil { + skipE2ETests = true + os.Exit(0) + } + + namespace = testNamespacePrefix + testutil.RandSeq(5) + if err := testutil.CreateNamespace(namespace, k8sClient); err != nil { + panic(err) + } + + envVarsCfg = config.NewDefault() + envVarsCfg.ReloadStrategy = config.ReloadStrategyEnvVars + envVarsCfg.AutoReloadAll = false + + collectors := metrics.NewCollectors() + mgr, err := controller.NewManagerWithRestConfig( + controller.ManagerOptions{ + Config: envVarsCfg, + Log: ctrl.Log.WithName("envvars-test-manager"), + Collectors: &collectors, + }, restCfg, + ) + if err != nil { + panic("Failed to create EnvVars manager: " + err.Error()) + } + + if err := controller.SetupReconcilers(mgr, envVarsCfg, ctrl.Log.WithName("envvars-test-reconcilers"), &collectors); err != nil { + panic("Failed to setup EnvVars reconcilers: " + err.Error()) + } + + ctx, cancel := context.WithCancel(context.Background()) + cancelManager = cancel + + go func() { + if err := controller.RunManager(ctx, mgr, ctrl.Log.WithName("envvars-test-runner")); err != nil { + log.Printf("Manager exited: %v", err) + } + }() + + time.Sleep(3 * time.Second) + + code := m.Run() + + if cancelManager != nil { + cancelManager() + time.Sleep(2 * time.Second) + } + + _ = testutil.DeleteNamespace(namespace, k8sClient) + os.Exit(code) +} + +func skipIfNoCluster(t *testing.T) { + if skipE2ETests { + t.Skip("Skipping e2e test: no Kubernetes cluster available") + } +} + +// TestEnvVarsConfigMapUpdate tests that updating a ConfigMap triggers env var update in deployment. +func TestEnvVarsConfigMapUpdate(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-cm") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + envVarsCfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentHasEnvVar(f.name) +} + +// TestEnvVarsSecretUpdate tests that updating a Secret triggers env var update in deployment. +func TestEnvVarsSecretUpdate(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-secret") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDeployment( + f.name, false, map[string]string{ + envVarsCfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertDeploymentHasEnvVar(f.name) +} + +// TestEnvVarsAutoReload tests auto-reload with EnvVars strategy. +func TestEnvVarsAutoReload(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-auto") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + envVarsCfg.Annotations.Auto: "true", + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDeploymentHasEnvVar(f.name) +} + +// TestEnvVarsDaemonSetConfigMap tests that DaemonSets get env var on ConfigMap change. +func TestEnvVarsDaemonSetConfigMap(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-ds-cm") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDaemonSet( + f.name, true, map[string]string{ + envVarsCfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertDaemonSetHasEnvVar(f.name) +} + +// TestEnvVarsDaemonSetSecret tests that DaemonSets get env var on Secret change. +func TestEnvVarsDaemonSetSecret(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-ds-secret") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createDaemonSet( + f.name, false, map[string]string{ + envVarsCfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertDaemonSetHasEnvVar(f.name) +} + +// TestEnvVarsStatefulSetConfigMap tests that StatefulSets get env var on ConfigMap change. +func TestEnvVarsStatefulSetConfigMap(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-sts-cm") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createStatefulSet( + f.name, true, map[string]string{ + envVarsCfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data") + f.assertStatefulSetHasEnvVar(f.name) +} + +// TestEnvVarsStatefulSetSecret tests that StatefulSets get env var on Secret change. +func TestEnvVarsStatefulSetSecret(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-sts-secret") + defer f.cleanup() + + f.createSecret(f.name, "initial-secret") + f.createStatefulSet( + f.name, false, map[string]string{ + envVarsCfg.Annotations.SecretReload: f.name, + }, + ) + f.waitForReady() + + f.updateSecret(f.name, "updated-secret") + f.assertStatefulSetHasEnvVar(f.name) +} + +// TestEnvVarsLabelOnlyChange tests that label-only changes don't trigger env var updates. +func TestEnvVarsLabelOnlyChange(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-label") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + envVarsCfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMapLabel(f.name, "new-label") + f.assertDeploymentNoEnvVar(f.name) +} + +// TestEnvVarsMultipleUpdates tests multiple updates with EnvVars strategy. +func TestEnvVarsMultipleUpdates(t *testing.T) { + f := newEnvVarsFixture(t, "envvars-multi") + defer f.cleanup() + + f.createConfigMap(f.name, "initial-data") + f.createDeployment( + f.name, true, map[string]string{ + envVarsCfg.Annotations.ConfigmapReload: f.name, + }, + ) + f.waitForReady() + + f.updateConfigMap(f.name, "updated-data-1") + f.assertDeploymentHasEnvVar(f.name) + + deploy1, _ := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) + var envValue1 string + for _, container := range deploy1.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if len(env.Name) > len(envVarPrefix) && env.Name[:len(envVarPrefix)] == envVarPrefix { + envValue1 = env.Value + break + } + } + } + + time.Sleep(2 * time.Second) + + f.updateConfigMap(f.name, "updated-data-2") + time.Sleep(5 * time.Second) + + deploy2, _ := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) + var envValue2 string + for _, container := range deploy2.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if len(env.Name) > len(envVarPrefix) && env.Name[:len(envVarPrefix)] == envVarPrefix { + envValue2 = env.Value + break + } + } + } + + if envValue1 == envValue2 { + t.Errorf("Env var value should have changed after second update, got same value: %s", envValue1) + } +} From c785067a44b0d69df128c550972c85c36a5a5f60 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Sun, 4 Jan 2026 14:01:14 +0100 Subject: [PATCH 32/35] feat: Use strategic merge to patch workloads and add metrics for load testing --- cmd/reloader/main.go | 22 +- go.mod | 1 - go.sum | 6 - .../pkg/controller/configmap_reconciler.go | 31 +- internal/pkg/controller/handler.go | 77 ++++- internal/pkg/controller/manager.go | 22 +- internal/pkg/controller/retry.go | 11 +- internal/pkg/controller/secret_reconciler.go | 28 +- internal/pkg/metrics/prometheus.go | 316 +++++++++++++++++- internal/pkg/workload/cronjob.go | 4 + internal/pkg/workload/daemonset.go | 17 +- internal/pkg/workload/deployment.go | 17 +- internal/pkg/workload/deploymentconfig.go | 19 +- internal/pkg/workload/interface.go | 9 + internal/pkg/workload/job.go | 4 + internal/pkg/workload/rollout.go | 22 +- internal/pkg/workload/statefulset.go | 17 +- 17 files changed, 537 insertions(+), 86 deletions(-) diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go index da23618c1..b603df654 100644 --- a/cmd/reloader/main.go +++ b/cmd/reloader/main.go @@ -106,6 +106,18 @@ func run(cmd *cobra.Command, args []string) error { collectors := metrics.SetupPrometheusEndpoint() + if config.ShouldAutoDetectOpenShift() { + restConfig := controllerruntime.GetConfigOrDie() + discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) + if err != nil { + log.V(1).Info("Failed to create discovery client for DeploymentConfig detection", "error", err) + } else if openshift.HasDeploymentConfigSupport(discoveryClient, log) { + cfg.DeploymentConfigEnabled = true + } + } + + controller.AddOptionalSchemes(cfg.ArgoRolloutsEnabled, cfg.DeploymentConfigEnabled) + mgr, err := controller.NewManager( controller.ManagerOptions{ Config: cfg, @@ -117,16 +129,6 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("creating manager: %w", err) } - if config.ShouldAutoDetectOpenShift() { - restConfig := controllerruntime.GetConfigOrDie() - discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) - if err != nil { - log.V(1).Info("Failed to create discovery client for DeploymentConfig detection", "error", err) - } else if openshift.HasDeploymentConfigSupport(discoveryClient, log) { - cfg.DeploymentConfigEnabled = true - } - } - if err := controller.SetupReconcilers(mgr, cfg, log, &collectors); err != nil { return fmt.Errorf("setting up reconcilers: %w", err) } diff --git a/go.mod b/go.mod index ece1cd8a8..ad21ed637 100644 --- a/go.mod +++ b/go.mod @@ -60,7 +60,6 @@ require ( github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/procfs v0.19.2 // indirect github.com/sagikazarmark/locafero v0.12.0 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect diff --git a/go.sum b/go.sum index caceb32c8..07c360254 100644 --- a/go.sum +++ b/go.sum @@ -117,8 +117,6 @@ github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/openshift/api v0.0.0-20251222154221-d4b2fef98af2 h1:q7fp/9fnJuXWCzpBzbHZne6aMLGYPKhzPy1uULfaJqA= -github.com/openshift/api v0.0.0-20251222154221-d4b2fef98af2/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a h1:lz22938uOBlzTHjGpobGeVWkcxGu6fDQ7oZWheClTHE= github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8= @@ -143,12 +141,8 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index 6736a518e..e14c14bf6 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -3,6 +3,7 @@ package controller import ( "context" "sync" + "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" @@ -39,6 +40,7 @@ type ConfigMapReconciler struct { // Reconcile handles ConfigMap events and triggers workload reloads as needed. func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + startTime := time.Now() log := r.Log.WithValues("configmap", req.NamespacedName) r.initOnce.Do(func() { @@ -46,34 +48,53 @@ func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( log.Info("ConfigMap controller initialized") }) + r.Collectors.RecordEventReceived("reconcile", "configmap") + var cm corev1.ConfigMap if err := r.Get(ctx, req.NamespacedName, &cm); err != nil { if errors.IsNotFound(err) { if r.Config.ReloadOnDelete { - return r.handleDelete(ctx, req, log) + r.Collectors.RecordEventReceived("delete", "configmap") + result, err := r.handleDelete(ctx, req, log) + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } + return result, err } + r.Collectors.RecordSkipped("not_found") + r.Collectors.RecordReconcile("success", time.Since(startTime)) return ctrl.Result{}, nil } log.Error(err, "failed to get ConfigMap") + r.Collectors.RecordError("get_configmap") + r.Collectors.RecordReconcile("error", time.Since(startTime)) return ctrl.Result{}, err } if r.Config.IsNamespaceIgnored(cm.Namespace) { log.V(1).Info("skipping ConfigMap in ignored namespace") + r.Collectors.RecordSkipped("ignored_namespace") + r.Collectors.RecordReconcile("success", time.Since(startTime)) return ctrl.Result{}, nil } - return r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, reload.ResourceTypeConfigMap, + result, err := r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, reload.ResourceTypeConfigMap, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { return r.ReloadService.Process(reload.ConfigMapChange{ ConfigMap: &cm, EventType: reload.EventTypeUpdate, }, workloads) }, log) -} -// FieldManager is the field manager name used for server-side apply. -const FieldManager = "reloader" + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } + return result, err +} func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { log.Info("handling ConfigMap deletion") diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index 423f2432f..f4065ce81 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -38,11 +38,32 @@ func (h *ReloadHandler) Process( workloads, err := h.Lister.List(ctx, namespace) if err != nil { log.Error(err, "failed to list workloads") + h.Collectors.RecordError("list_workloads") return ctrl.Result{}, err } + workloadsByKind := make(map[string]int) + for _, w := range workloads { + workloadsByKind[string(w.Kind())]++ + } + for kind, count := range workloadsByKind { + h.Collectors.RecordWorkloadsScanned(kind, count) + } + decisions := reload.FilterDecisions(getDecisions(workloads)) + matchedByKind := make(map[string]int) + for _, d := range decisions { + matchedByKind[string(d.Workload.Kind())]++ + } + for kind, count := range matchedByKind { + h.Collectors.RecordWorkloadsMatched(kind, count) + } + + if len(decisions) == 0 { + h.Collectors.RecordSkipped("no_match") + } + if h.WebhookClient.IsConfigured() && len(decisions) > 0 { return h.sendWebhook(ctx, resourceName, namespace, resourceType, decisions, log) } @@ -61,11 +82,13 @@ func (h *ReloadHandler) sendWebhook( var workloads []webhook.WorkloadInfo var hash string for _, d := range decisions { - workloads = append(workloads, webhook.WorkloadInfo{ - Kind: string(d.Workload.Kind()), - Name: d.Workload.GetName(), - Namespace: d.Workload.GetNamespace(), - }) + workloads = append( + workloads, webhook.WorkloadInfo{ + Kind: string(d.Workload.Kind()), + Name: d.Workload.GetName(), + Namespace: d.Workload.GetNamespace(), + }, + ) if hash == "" { hash = d.Hash } @@ -81,17 +104,22 @@ func (h *ReloadHandler) sendWebhook( Workloads: workloads, } + actionStartTime := time.Now() if err := h.WebhookClient.Send(ctx, payload); err != nil { log.Error(err, "failed to send webhook notification") h.Collectors.RecordReload(false, namespace) + h.Collectors.RecordAction("webhook", "error", time.Since(actionStartTime)) + h.Collectors.RecordError("webhook_send") return ctrl.Result{}, err } - log.Info("webhook notification sent", + log.Info( + "webhook notification sent", "resource", resourceName, "workloadCount", len(workloads), ) h.Collectors.RecordReload(true, namespace) + h.Collectors.RecordAction("webhook", "success", time.Since(actionStartTime)) return ctrl.Result{}, nil } @@ -103,12 +131,14 @@ func (h *ReloadHandler) applyReloads( log logr.Logger, ) { for _, decision := range decisions { - log.Info("reloading workload", + log.Info( + "reloading workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), "reason", decision.Reason, ) + actionStartTime := time.Now() updated, err := UpdateWorkloadWithRetry( ctx, h.Client, @@ -121,35 +151,46 @@ func (h *ReloadHandler) applyReloads( decision.Hash, decision.AutoReload, ) + actionLatency := time.Since(actionStartTime) + if err != nil { - log.Error(err, "failed to update workload", + log.Error( + err, "failed to update workload", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) h.EventRecorder.ReloadFailed(decision.Workload.GetObject(), resourceType.Kind(), resourceName, err) h.Collectors.RecordReload(false, resourceNamespace) + h.Collectors.RecordAction(string(decision.Workload.Kind()), "error", actionLatency) + h.Collectors.RecordError("update_workload") continue } if updated { h.EventRecorder.ReloadSuccess(decision.Workload.GetObject(), resourceType.Kind(), resourceName) h.Collectors.RecordReload(true, resourceNamespace) - log.Info("workload reloaded successfully", + h.Collectors.RecordAction(string(decision.Workload.Kind()), "success", actionLatency) + log.Info( + "workload reloaded successfully", "workload", decision.Workload.GetName(), "kind", decision.Workload.Kind(), ) - if err := h.Alerter.Send(ctx, alerting.AlertMessage{ - WorkloadKind: string(decision.Workload.Kind()), - WorkloadName: decision.Workload.GetName(), - WorkloadNamespace: decision.Workload.GetNamespace(), - ResourceKind: resourceType.Kind(), - ResourceName: resourceName, - ResourceNamespace: resourceNamespace, - Timestamp: time.Now(), - }); err != nil { + if err := h.Alerter.Send( + ctx, alerting.AlertMessage{ + WorkloadKind: string(decision.Workload.Kind()), + WorkloadName: decision.Workload.GetName(), + WorkloadNamespace: decision.Workload.GetNamespace(), + ResourceKind: resourceType.Kind(), + ResourceName: resourceName, + ResourceNamespace: resourceNamespace, + Timestamp: time.Now(), + }, + ); err != nil { log.Error(err, "failed to send alert") } + } else { + h.Collectors.RecordAction(string(decision.Workload.Kind()), "no_change", actionLatency) } } } diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index e6a0bcd73..aa1ab6cd8 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -27,8 +27,16 @@ var runtimeScheme = runtime.NewScheme() func init() { utilruntime.Must(clientgoscheme.AddToScheme(runtimeScheme)) - utilruntime.Must(argorolloutsv1alpha1.AddToScheme(runtimeScheme)) - utilruntime.Must(openshiftv1.AddToScheme(runtimeScheme)) +} + +// AddOptionalSchemes adds optional workload type schemes if enabled. +func AddOptionalSchemes(argoRolloutsEnabled, deploymentConfigEnabled bool) { + if argoRolloutsEnabled { + utilruntime.Must(argorolloutsv1alpha1.AddToScheme(runtimeScheme)) + } + if deploymentConfigEnabled { + utilruntime.Must(openshiftv1.AddToScheme(runtimeScheme)) + } } // ManagerOptions contains options for creating a new Manager. @@ -117,10 +125,12 @@ func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctr // SetupReconcilers sets up all reconcilers with the manager. func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { - registry := workload.NewRegistry(workload.RegistryOptions{ - ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, - DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, - }) + registry := workload.NewRegistry( + workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + }, + ) reloadService := reload.NewService(cfg) eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) pauseHandler := reload.NewPauseHandler(cfg) diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index a7f20810b..fec0daacf 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -41,7 +41,7 @@ func UpdateObjectWithRetry( return nil } - return c.Update(ctx, obj, client.FieldOwner(FieldManager)) + return c.Update(ctx, obj, client.FieldOwner(workload.FieldManager)) }, ) } @@ -107,6 +107,7 @@ func retryWithReload( } return err } + wl.ResetOriginal() } isFirstAttempt = false @@ -142,7 +143,7 @@ func updateStandardWorkload( return retryWithReload( ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, func() error { - return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + return wl.Update(ctx, c) }, ) } @@ -170,7 +171,7 @@ func updateDeploymentWithPause( return err } } - return c.Update(ctx, wl.GetObject(), client.FieldOwner(FieldManager)) + return wl.Update(ctx, c) }, ) } @@ -242,7 +243,7 @@ func updateJobWithRecreate( newJob.Spec.Selector = nil // Create the new job with same spec - if err := c.Create(ctx, newJob, client.FieldOwner(FieldManager)); err != nil { + if err := c.Create(ctx, newJob, client.FieldOwner(workload.FieldManager)); err != nil { return false, err } @@ -304,7 +305,7 @@ func updateCronJobWithNewJob( Spec: cronJob.Spec.JobTemplate.Spec, } - if err := c.Create(ctx, job, client.FieldOwner(FieldManager)); err != nil { + if err := c.Create(ctx, job, client.FieldOwner(workload.FieldManager)); err != nil { return false, err } diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index 46723803f..e7b2481bb 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -3,6 +3,7 @@ package controller import ( "context" "sync" + "time" "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" @@ -39,6 +40,7 @@ type SecretReconciler struct { // Reconcile handles Secret events and triggers workload reloads as needed. func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + startTime := time.Now() log := r.Log.WithValues("secret", req.NamespacedName) r.initOnce.Do(func() { @@ -46,30 +48,52 @@ func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr log.Info("Secret controller initialized") }) + r.Collectors.RecordEventReceived("reconcile", "secret") + var secret corev1.Secret if err := r.Get(ctx, req.NamespacedName, &secret); err != nil { if errors.IsNotFound(err) { if r.Config.ReloadOnDelete { - return r.handleDelete(ctx, req, log) + r.Collectors.RecordEventReceived("delete", "secret") + result, err := r.handleDelete(ctx, req, log) + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } + return result, err } + r.Collectors.RecordSkipped("not_found") + r.Collectors.RecordReconcile("success", time.Since(startTime)) return ctrl.Result{}, nil } log.Error(err, "failed to get Secret") + r.Collectors.RecordError("get_secret") + r.Collectors.RecordReconcile("error", time.Since(startTime)) return ctrl.Result{}, err } if r.Config.IsNamespaceIgnored(secret.Namespace) { log.V(1).Info("skipping Secret in ignored namespace") + r.Collectors.RecordSkipped("ignored_namespace") + r.Collectors.RecordReconcile("success", time.Since(startTime)) return ctrl.Result{}, nil } - return r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, reload.ResourceTypeSecret, + result, err := r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, reload.ResourceTypeSecret, func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { return r.ReloadService.Process(reload.SecretChange{ Secret: &secret, EventType: reload.EventTypeUpdate, }, workloads) }, log) + + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } + return result, err } func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { diff --git a/internal/pkg/metrics/prometheus.go b/internal/pkg/metrics/prometheus.go index 87f9148be..b95731c88 100644 --- a/internal/pkg/metrics/prometheus.go +++ b/internal/pkg/metrics/prometheus.go @@ -3,16 +3,48 @@ package metrics import ( "net/http" "os" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" ) -// Collectors holds Prometheus metrics collectors for Reloader. +// Collectors holds all Prometheus metrics collectors for Reloader. type Collectors struct { Reloaded *prometheus.CounterVec ReloadedByNamespace *prometheus.CounterVec countByNamespace bool + + // === Comprehensive metrics for load testing === + + // Reconcile/Handler metrics + ReconcileTotal *prometheus.CounterVec // Total reconcile calls by result + ReconcileDuration *prometheus.HistogramVec // Time spent in reconcile/handler + + // Action metrics + ActionTotal *prometheus.CounterVec // Total actions by workload kind and result + ActionLatency *prometheus.HistogramVec // Time from event to action applied + + // Skip metrics + SkippedTotal *prometheus.CounterVec // Skipped operations by reason + + // Queue metrics (controller-runtime exposes some automatically, but we add custom ones) + QueueDepth prometheus.Gauge // Current queue depth + QueueAdds prometheus.Counter // Total items added to queue + QueueLatency *prometheus.HistogramVec // Time spent in queue + + // Error and retry metrics + ErrorsTotal *prometheus.CounterVec // Errors by type + RetriesTotal prometheus.Counter // Total retries + + // Event processing metrics + EventsReceived *prometheus.CounterVec // Events received by type (add/update/delete) + EventsProcessed *prometheus.CounterVec // Events processed by type and result + + // Resource discovery metrics + WorkloadsScanned *prometheus.CounterVec // Workloads scanned by kind + WorkloadsMatched *prometheus.CounterVec // Workloads matched for reload by kind } // RecordReload records a reload event with the given success status and namespace. @@ -29,11 +61,111 @@ func (c *Collectors) RecordReload(success bool, namespace string) { c.Reloaded.With(prometheus.Labels{"success": successLabel}).Inc() if c.countByNamespace { - c.ReloadedByNamespace.With(prometheus.Labels{ - "success": successLabel, - "namespace": namespace, - }).Inc() + c.ReloadedByNamespace.With( + prometheus.Labels{ + "success": successLabel, + "namespace": namespace, + }, + ).Inc() + } +} + +// RecordReconcile records a reconcile/handler invocation. +func (c *Collectors) RecordReconcile(result string, duration time.Duration) { + if c == nil { + return + } + c.ReconcileTotal.With(prometheus.Labels{"result": result}).Inc() + c.ReconcileDuration.With(prometheus.Labels{"result": result}).Observe(duration.Seconds()) +} + +// RecordAction records a reload action on a workload. +func (c *Collectors) RecordAction(workloadKind string, result string, latency time.Duration) { + if c == nil { + return + } + c.ActionTotal.With(prometheus.Labels{"workload_kind": workloadKind, "result": result}).Inc() + c.ActionLatency.With(prometheus.Labels{"workload_kind": workloadKind}).Observe(latency.Seconds()) +} + +// RecordSkipped records a skipped operation with reason. +func (c *Collectors) RecordSkipped(reason string) { + if c == nil { + return + } + c.SkippedTotal.With(prometheus.Labels{"reason": reason}).Inc() +} + +// RecordQueueAdd records an item being added to the queue. +func (c *Collectors) RecordQueueAdd() { + if c == nil { + return + } + c.QueueAdds.Inc() +} + +// SetQueueDepth sets the current queue depth. +func (c *Collectors) SetQueueDepth(depth int) { + if c == nil { + return + } + c.QueueDepth.Set(float64(depth)) +} + +// RecordQueueLatency records how long an item spent in the queue. +func (c *Collectors) RecordQueueLatency(latency time.Duration) { + if c == nil { + return + } + c.QueueLatency.With(prometheus.Labels{}).Observe(latency.Seconds()) +} + +// RecordError records an error by type. +func (c *Collectors) RecordError(errorType string) { + if c == nil { + return + } + c.ErrorsTotal.With(prometheus.Labels{"type": errorType}).Inc() +} + +// RecordRetry records a retry attempt. +func (c *Collectors) RecordRetry() { + if c == nil { + return + } + c.RetriesTotal.Inc() +} + +// RecordEventReceived records an event being received. +func (c *Collectors) RecordEventReceived(eventType string, resourceType string) { + if c == nil { + return } + c.EventsReceived.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType}).Inc() +} + +// RecordEventProcessed records an event being processed. +func (c *Collectors) RecordEventProcessed(eventType string, resourceType string, result string) { + if c == nil { + return + } + c.EventsProcessed.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType, "result": result}).Inc() +} + +// RecordWorkloadsScanned records workloads scanned during a reconcile. +func (c *Collectors) RecordWorkloadsScanned(kind string, count int) { + if c == nil { + return + } + c.WorkloadsScanned.With(prometheus.Labels{"kind": kind}).Add(float64(count)) +} + +// RecordWorkloadsMatched records workloads matched for reload. +func (c *Collectors) RecordWorkloadsMatched(kind string, count int) { + if c == nil { + return + } + c.WorkloadsMatched.With(prometheus.Labels{"kind": kind}).Add(float64(count)) } func NewCollectors() Collectors { @@ -43,11 +175,8 @@ func NewCollectors() Collectors { Name: "reload_executed_total", Help: "Counter of reloads executed by Reloader.", }, - []string{ - "success", - }, + []string{"success"}, ) - reloaded.With(prometheus.Labels{"success": "true"}).Add(0) reloaded.With(prometheus.Labels{"success": "false"}).Add(0) @@ -57,26 +186,185 @@ func NewCollectors() Collectors { Name: "reload_executed_total_by_namespace", Help: "Counter of reloads executed by Reloader by namespace.", }, - []string{ - "success", - "namespace", + []string{"success", "namespace"}, + ) + + // === Comprehensive metrics === + + reconcileTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "reconcile_total", + Help: "Total number of reconcile/handler invocations by result.", + }, + []string{"result"}, + ) + + reconcileDuration := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "reconcile_duration_seconds", + Help: "Time spent in reconcile/handler in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, + }, + []string{"result"}, + ) + + actionTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "action_total", + Help: "Total number of reload actions by workload kind and result.", + }, + []string{"workload_kind", "result"}, + ) + + actionLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "action_latency_seconds", + Help: "Time from event received to action applied in seconds.", + Buckets: []float64{0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60}, }, + []string{"workload_kind"}, ) + + skippedTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "skipped_total", + Help: "Total number of skipped operations by reason.", + }, + []string{"reason"}, + ) + + queueDepth := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "reloader", + Name: "workqueue_depth", + Help: "Current depth of the work queue.", + }, + ) + + queueAdds := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workqueue_adds_total", + Help: "Total number of items added to the work queue.", + }, + ) + + queueLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "reloader", + Name: "workqueue_latency_seconds", + Help: "Time spent in the work queue in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5}, + }, + []string{}, + ) + + errorsTotal := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "errors_total", + Help: "Total number of errors by type.", + }, + []string{"type"}, + ) + + retriesTotal := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "retries_total", + Help: "Total number of retry attempts.", + }, + ) + + eventsReceived := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "events_received_total", + Help: "Total number of events received by type and resource.", + }, + []string{"event_type", "resource_type"}, + ) + + eventsProcessed := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "events_processed_total", + Help: "Total number of events processed by type, resource, and result.", + }, + []string{"event_type", "resource_type", "result"}, + ) + + workloadsScanned := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workloads_scanned_total", + Help: "Total number of workloads scanned by kind.", + }, + []string{"kind"}, + ) + + workloadsMatched := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "reloader", + Name: "workloads_matched_total", + Help: "Total number of workloads matched for reload by kind.", + }, + []string{"kind"}, + ) + return Collectors{ Reloaded: reloaded, ReloadedByNamespace: reloadedByNamespace, countByNamespace: os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled", + + ReconcileTotal: reconcileTotal, + ReconcileDuration: reconcileDuration, + ActionTotal: actionTotal, + ActionLatency: actionLatency, + SkippedTotal: skippedTotal, + QueueDepth: queueDepth, + QueueAdds: queueAdds, + QueueLatency: queueLatency, + ErrorsTotal: errorsTotal, + RetriesTotal: retriesTotal, + EventsReceived: eventsReceived, + EventsProcessed: eventsProcessed, + WorkloadsScanned: workloadsScanned, + WorkloadsMatched: workloadsMatched, } } func SetupPrometheusEndpoint() Collectors { collectors := NewCollectors() - prometheus.MustRegister(collectors.Reloaded) + + ctrlmetrics.Registry.MustRegister(collectors.Reloaded) + ctrlmetrics.Registry.MustRegister(collectors.ReconcileTotal) + ctrlmetrics.Registry.MustRegister(collectors.ReconcileDuration) + ctrlmetrics.Registry.MustRegister(collectors.ActionTotal) + ctrlmetrics.Registry.MustRegister(collectors.ActionLatency) + ctrlmetrics.Registry.MustRegister(collectors.SkippedTotal) + ctrlmetrics.Registry.MustRegister(collectors.QueueDepth) + ctrlmetrics.Registry.MustRegister(collectors.QueueAdds) + ctrlmetrics.Registry.MustRegister(collectors.QueueLatency) + ctrlmetrics.Registry.MustRegister(collectors.ErrorsTotal) + ctrlmetrics.Registry.MustRegister(collectors.RetriesTotal) + ctrlmetrics.Registry.MustRegister(collectors.EventsReceived) + ctrlmetrics.Registry.MustRegister(collectors.EventsProcessed) + ctrlmetrics.Registry.MustRegister(collectors.WorkloadsScanned) + ctrlmetrics.Registry.MustRegister(collectors.WorkloadsMatched) if os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled" { - prometheus.MustRegister(collectors.ReloadedByNamespace) + ctrlmetrics.Registry.MustRegister(collectors.ReloadedByNamespace) } + // Note: For controller-runtime based Reloader, the metrics are served + // by controller-runtime's metrics server. This http.Handle is kept for + // the legacy informer-based Reloader which uses its own HTTP server. http.Handle("/metrics", promhttp.Handler()) return collectors diff --git a/internal/pkg/workload/cronjob.go b/internal/pkg/workload/cronjob.go index 80d672e33..9f61b019b 100644 --- a/internal/pkg/workload/cronjob.go +++ b/internal/pkg/workload/cronjob.go @@ -91,6 +91,10 @@ func (w *CronJobWorkload) DeepCopy() Workload { return &CronJobWorkload{cronjob: w.cronjob.DeepCopy()} } +// ResetOriginal is a no-op for CronJobs since they don't use strategic merge patch. +// CronJobs create new Jobs instead of being patched. +func (w *CronJobWorkload) ResetOriginal() {} + func (w *CronJobWorkload) GetEnvFromSources() []corev1.EnvFromSource { var sources []corev1.EnvFromSource for _, container := range w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers { diff --git a/internal/pkg/workload/daemonset.go b/internal/pkg/workload/daemonset.go index 85ac7b053..c2294a4c3 100644 --- a/internal/pkg/workload/daemonset.go +++ b/internal/pkg/workload/daemonset.go @@ -12,11 +12,15 @@ import ( // DaemonSetWorkload wraps a Kubernetes DaemonSet. type DaemonSetWorkload struct { daemonset *appsv1.DaemonSet + original *appsv1.DaemonSet } // NewDaemonSetWorkload creates a new DaemonSetWorkload. func NewDaemonSetWorkload(d *appsv1.DaemonSet) *DaemonSetWorkload { - return &DaemonSetWorkload{daemonset: d} + return &DaemonSetWorkload{ + daemonset: d, + original: d.DeepCopy(), + } } // Ensure DaemonSetWorkload implements WorkloadAccessor. @@ -77,11 +81,18 @@ func (w *DaemonSetWorkload) GetVolumes() []corev1.Volume { } func (w *DaemonSetWorkload) Update(ctx context.Context, c client.Client) error { - return c.Update(ctx, w.daemonset) + return c.Patch(ctx, w.daemonset, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) } func (w *DaemonSetWorkload) DeepCopy() Workload { - return &DaemonSetWorkload{daemonset: w.daemonset.DeepCopy()} + return &DaemonSetWorkload{ + daemonset: w.daemonset.DeepCopy(), + original: w.original.DeepCopy(), + } +} + +func (w *DaemonSetWorkload) ResetOriginal() { + w.original = w.daemonset.DeepCopy() } func (w *DaemonSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go index e4ebefb53..747e99452 100644 --- a/internal/pkg/workload/deployment.go +++ b/internal/pkg/workload/deployment.go @@ -12,11 +12,15 @@ import ( // DeploymentWorkload wraps a Kubernetes Deployment. type DeploymentWorkload struct { deployment *appsv1.Deployment + original *appsv1.Deployment } // NewDeploymentWorkload creates a new DeploymentWorkload. func NewDeploymentWorkload(d *appsv1.Deployment) *DeploymentWorkload { - return &DeploymentWorkload{deployment: d} + return &DeploymentWorkload{ + deployment: d, + original: d.DeepCopy(), + } } // Ensure DeploymentWorkload implements WorkloadAccessor. @@ -77,11 +81,18 @@ func (w *DeploymentWorkload) GetVolumes() []corev1.Volume { } func (w *DeploymentWorkload) Update(ctx context.Context, c client.Client) error { - return c.Update(ctx, w.deployment) + return c.Patch(ctx, w.deployment, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) } func (w *DeploymentWorkload) DeepCopy() Workload { - return &DeploymentWorkload{deployment: w.deployment.DeepCopy()} + return &DeploymentWorkload{ + deployment: w.deployment.DeepCopy(), + original: w.original.DeepCopy(), + } +} + +func (w *DeploymentWorkload) ResetOriginal() { + w.original = w.deployment.DeepCopy() } func (w *DeploymentWorkload) GetEnvFromSources() []corev1.EnvFromSource { diff --git a/internal/pkg/workload/deploymentconfig.go b/internal/pkg/workload/deploymentconfig.go index 14c60469a..680a78b63 100644 --- a/internal/pkg/workload/deploymentconfig.go +++ b/internal/pkg/workload/deploymentconfig.go @@ -11,12 +11,16 @@ import ( // DeploymentConfigWorkload wraps an OpenShift DeploymentConfig. type DeploymentConfigWorkload struct { - dc *openshiftv1.DeploymentConfig + dc *openshiftv1.DeploymentConfig + original *openshiftv1.DeploymentConfig } // NewDeploymentConfigWorkload creates a new DeploymentConfigWorkload. func NewDeploymentConfigWorkload(dc *openshiftv1.DeploymentConfig) *DeploymentConfigWorkload { - return &DeploymentConfigWorkload{dc: dc} + return &DeploymentConfigWorkload{ + dc: dc, + original: dc.DeepCopy(), + } } // Ensure DeploymentConfigWorkload implements WorkloadAccessor. @@ -98,11 +102,18 @@ func (w *DeploymentConfigWorkload) GetVolumes() []corev1.Volume { } func (w *DeploymentConfigWorkload) Update(ctx context.Context, c client.Client) error { - return c.Update(ctx, w.dc) + return c.Patch(ctx, w.dc, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) } func (w *DeploymentConfigWorkload) DeepCopy() Workload { - return &DeploymentConfigWorkload{dc: w.dc.DeepCopy()} + return &DeploymentConfigWorkload{ + dc: w.dc.DeepCopy(), + original: w.original.DeepCopy(), + } +} + +func (w *DeploymentConfigWorkload) ResetOriginal() { + w.original = w.dc.DeepCopy() } func (w *DeploymentConfigWorkload) GetEnvFromSources() []corev1.EnvFromSource { diff --git a/internal/pkg/workload/interface.go b/internal/pkg/workload/interface.go index e1d50a18a..40249edb4 100644 --- a/internal/pkg/workload/interface.go +++ b/internal/pkg/workload/interface.go @@ -14,6 +14,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// FieldManager is the field manager name used for server-side apply and patch operations. +// This identifies Reloader as the actor making changes to workload resources. +const FieldManager = "reloader" + // Kind represents the type of workload. type Kind string @@ -69,6 +73,11 @@ type Workload interface { // Update persists changes to the workload. Update(ctx context.Context, c client.Client) error + // ResetOriginal resets the original state to the current object state. + // This should be called after re-fetching the object (e.g., after a conflict) + // to ensure strategic merge patch diffs are calculated correctly. + ResetOriginal() + // DeepCopy returns a deep copy of the workload. DeepCopy() Workload } diff --git a/internal/pkg/workload/job.go b/internal/pkg/workload/job.go index 4e6c9fc67..291249f89 100644 --- a/internal/pkg/workload/job.go +++ b/internal/pkg/workload/job.go @@ -90,6 +90,10 @@ func (w *JobWorkload) DeepCopy() Workload { return &JobWorkload{job: w.job.DeepCopy()} } +// ResetOriginal is a no-op for Jobs since they don't use strategic merge patch. +// Jobs are deleted and recreated instead of being patched. +func (w *JobWorkload) ResetOriginal() {} + func (w *JobWorkload) GetEnvFromSources() []corev1.EnvFromSource { var sources []corev1.EnvFromSource for _, container := range w.job.Spec.Template.Spec.Containers { diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go index f19c17132..8e78d3e9e 100644 --- a/internal/pkg/workload/rollout.go +++ b/internal/pkg/workload/rollout.go @@ -27,12 +27,16 @@ const RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" // RolloutWorkload wraps an Argo Rollout. type RolloutWorkload struct { - rollout *argorolloutv1alpha1.Rollout + rollout *argorolloutv1alpha1.Rollout + original *argorolloutv1alpha1.Rollout } // NewRolloutWorkload creates a new RolloutWorkload. func NewRolloutWorkload(r *argorolloutv1alpha1.Rollout) *RolloutWorkload { - return &RolloutWorkload{rollout: r} + return &RolloutWorkload{ + rollout: r, + original: r.DeepCopy(), + } } // Ensure RolloutWorkload implements WorkloadAccessor. @@ -98,12 +102,11 @@ func (w *RolloutWorkload) Update(ctx context.Context, c client.Client) error { strategy := w.getStrategy() switch strategy { case RolloutStrategyRestart: - // Use merge patch to set restartAt field + // Set restartAt field to trigger a restart restartAt := metav1.NewTime(time.Now()) w.rollout.Spec.RestartAt = &restartAt } - // For both strategies, we update the rollout (annotations have already been set) - return c.Update(ctx, w.rollout) + return c.Patch(ctx, w.rollout, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) } // getStrategy returns the rollout strategy from the annotation. @@ -122,7 +125,14 @@ func (w *RolloutWorkload) getStrategy() RolloutStrategy { } func (w *RolloutWorkload) DeepCopy() Workload { - return &RolloutWorkload{rollout: w.rollout.DeepCopy()} + return &RolloutWorkload{ + rollout: w.rollout.DeepCopy(), + original: w.original.DeepCopy(), + } +} + +func (w *RolloutWorkload) ResetOriginal() { + w.original = w.rollout.DeepCopy() } func (w *RolloutWorkload) GetEnvFromSources() []corev1.EnvFromSource { diff --git a/internal/pkg/workload/statefulset.go b/internal/pkg/workload/statefulset.go index 31dddeb29..ebec4a007 100644 --- a/internal/pkg/workload/statefulset.go +++ b/internal/pkg/workload/statefulset.go @@ -12,11 +12,15 @@ import ( // StatefulSetWorkload wraps a Kubernetes StatefulSet. type StatefulSetWorkload struct { statefulset *appsv1.StatefulSet + original *appsv1.StatefulSet } // NewStatefulSetWorkload creates a new StatefulSetWorkload. func NewStatefulSetWorkload(s *appsv1.StatefulSet) *StatefulSetWorkload { - return &StatefulSetWorkload{statefulset: s} + return &StatefulSetWorkload{ + statefulset: s, + original: s.DeepCopy(), + } } // Ensure StatefulSetWorkload implements WorkloadAccessor. @@ -77,11 +81,18 @@ func (w *StatefulSetWorkload) GetVolumes() []corev1.Volume { } func (w *StatefulSetWorkload) Update(ctx context.Context, c client.Client) error { - return c.Update(ctx, w.statefulset) + return c.Patch(ctx, w.statefulset, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) } func (w *StatefulSetWorkload) DeepCopy() Workload { - return &StatefulSetWorkload{statefulset: w.statefulset.DeepCopy()} + return &StatefulSetWorkload{ + statefulset: w.statefulset.DeepCopy(), + original: w.original.DeepCopy(), + } +} + +func (w *StatefulSetWorkload) ResetOriginal() { + w.original = w.statefulset.DeepCopy() } func (w *StatefulSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { From 5548ce559a4600a15e43150dd0889f730351c836 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Mon, 5 Jan 2026 00:59:57 +0100 Subject: [PATCH 33/35] feat: Introduce a Generic ResourceReconciler and a generic BaseWorkload to de-duplicate a lot of code --- internal/pkg/alerting/http.go | 20 +- .../pkg/controller/configmap_reconciler.go | 162 +++-------- internal/pkg/controller/handler.go | 2 +- internal/pkg/controller/manager.go | 57 ++-- .../pkg/controller/resource_reconciler.go | 186 +++++++++++++ internal/pkg/controller/retry.go | 193 ++----------- internal/pkg/controller/retry_test.go | 29 +- internal/pkg/controller/secret_reconciler.go | 162 +++-------- internal/pkg/controller/test_helpers_test.go | 104 ++++--- internal/pkg/http/client.go | 69 +++++ internal/pkg/http/client_test.go | 142 ++++++++++ internal/pkg/metadata/metadata.go | 2 - internal/pkg/metadata/publisher.go | 5 +- internal/pkg/reload/decision.go | 2 +- internal/pkg/reload/pause.go | 6 +- internal/pkg/reload/pause_test.go | 4 +- internal/pkg/reload/service.go | 42 +-- internal/pkg/reload/service_test.go | 77 +++--- internal/pkg/webhook/webhook.go | 9 +- internal/pkg/workload/base.go | 188 +++++++++++++ internal/pkg/workload/cronjob.go | 134 ++++----- internal/pkg/workload/daemonset.go | 115 ++------ internal/pkg/workload/deployment.go | 116 ++------ internal/pkg/workload/deploymentconfig.go | 155 +++-------- internal/pkg/workload/interface.go | 121 +++++---- internal/pkg/workload/job.go | 142 +++++----- internal/pkg/workload/lister.go | 43 +-- internal/pkg/workload/registry.go | 39 ++- internal/pkg/workload/rollout.go | 126 ++------- internal/pkg/workload/statefulset.go | 115 ++------ internal/pkg/workload/workload_test.go | 256 +++++++++++++++++- 31 files changed, 1479 insertions(+), 1344 deletions(-) create mode 100644 internal/pkg/controller/resource_reconciler.go create mode 100644 internal/pkg/http/client.go create mode 100644 internal/pkg/http/client_test.go create mode 100644 internal/pkg/workload/base.go diff --git a/internal/pkg/alerting/http.go b/internal/pkg/alerting/http.go index e5bb3890c..2501e695f 100644 --- a/internal/pkg/alerting/http.go +++ b/internal/pkg/alerting/http.go @@ -6,8 +6,8 @@ import ( "fmt" "io" "net/http" - "net/url" - "time" + + httputil "github.com/stakater/Reloader/internal/pkg/http" ) // httpClient wraps http.Client with common configuration. @@ -17,20 +17,12 @@ type httpClient struct { // newHTTPClient creates a new httpClient with optional proxy support. func newHTTPClient(proxyURL string) *httpClient { - transport := &http.Transport{} - - if proxyURL != "" { - proxy, err := url.Parse(proxyURL) - if err == nil { - transport.Proxy = http.ProxyURL(proxy) - } - } + cfg := httputil.DefaultConfig() + cfg.Timeout = httputil.AlertingTimeout + cfg.ProxyURL = proxyURL return &httpClient{ - client: &http.Client{ - Transport: transport, - Timeout: 10 * time.Second, - }, + client: httputil.NewClient(cfg), } } diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index e14c14bf6..bfa40622b 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -1,10 +1,6 @@ package controller import ( - "context" - "sync" - "time" - "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" @@ -14,129 +10,57 @@ import ( "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // ConfigMapReconciler watches ConfigMaps and triggers workload reloads. -type ConfigMapReconciler struct { - client.Client - Log logr.Logger - Config *config.Config - ReloadService *reload.Service - Registry *workload.Registry - Collectors *metrics.Collectors - EventRecorder *events.Recorder - WebhookClient *webhook.Client - Alerter alerting.Alerter - PauseHandler *reload.PauseHandler - - handler *ReloadHandler - initialized bool - initOnce sync.Once -} - -// Reconcile handles ConfigMap events and triggers workload reloads as needed. -func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - startTime := time.Now() - log := r.Log.WithValues("configmap", req.NamespacedName) - - r.initOnce.Do(func() { - r.initialized = true - log.Info("ConfigMap controller initialized") - }) - - r.Collectors.RecordEventReceived("reconcile", "configmap") - - var cm corev1.ConfigMap - if err := r.Get(ctx, req.NamespacedName, &cm); err != nil { - if errors.IsNotFound(err) { - if r.Config.ReloadOnDelete { - r.Collectors.RecordEventReceived("delete", "configmap") - result, err := r.handleDelete(ctx, req, log) - if err != nil { - r.Collectors.RecordReconcile("error", time.Since(startTime)) - } else { - r.Collectors.RecordReconcile("success", time.Since(startTime)) - } - return result, err - } - r.Collectors.RecordSkipped("not_found") - r.Collectors.RecordReconcile("success", time.Since(startTime)) - return ctrl.Result{}, nil - } - log.Error(err, "failed to get ConfigMap") - r.Collectors.RecordError("get_configmap") - r.Collectors.RecordReconcile("error", time.Since(startTime)) - return ctrl.Result{}, err - } - - if r.Config.IsNamespaceIgnored(cm.Namespace) { - log.V(1).Info("skipping ConfigMap in ignored namespace") - r.Collectors.RecordSkipped("ignored_namespace") - r.Collectors.RecordReconcile("success", time.Since(startTime)) - return ctrl.Result{}, nil - } - - result, err := r.reloadHandler().Process(ctx, cm.Namespace, cm.Name, reload.ResourceTypeConfigMap, - func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.Process(reload.ConfigMapChange{ - ConfigMap: &cm, - EventType: reload.EventTypeUpdate, - }, workloads) - }, log) - - if err != nil { - r.Collectors.RecordReconcile("error", time.Since(startTime)) - } else { - r.Collectors.RecordReconcile("success", time.Since(startTime)) - } - return result, err -} - -func (r *ConfigMapReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { - log.Info("handling ConfigMap deletion") - - cm := &corev1.ConfigMap{} - cm.Name = req.Name - cm.Namespace = req.Namespace - - return r.reloadHandler().Process(ctx, req.Namespace, req.Name, reload.ResourceTypeConfigMap, - func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.Process(reload.ConfigMapChange{ - ConfigMap: cm, - EventType: reload.EventTypeDelete, - }, workloads) - }, log) -} - -func (r *ConfigMapReconciler) reloadHandler() *ReloadHandler { - if r.handler == nil { - r.handler = &ReloadHandler{ - Client: r.Client, - Lister: workload.NewLister(r.Client, r.Registry, r.Config), - ReloadService: r.ReloadService, - WebhookClient: r.WebhookClient, - Collectors: r.Collectors, - EventRecorder: r.EventRecorder, - Alerter: r.Alerter, - PauseHandler: r.PauseHandler, - } - } - return r.handler +type ConfigMapReconciler = ResourceReconciler[*corev1.ConfigMap] + +// NewConfigMapReconciler creates a new ConfigMapReconciler with the given dependencies. +func NewConfigMapReconciler( + c client.Client, + log logr.Logger, + cfg *config.Config, + reloadService *reload.Service, + registry *workload.Registry, + collectors *metrics.Collectors, + eventRecorder *events.Recorder, + webhookClient *webhook.Client, + alerter alerting.Alerter, + pauseHandler *reload.PauseHandler, +) *ConfigMapReconciler { + return NewResourceReconciler( + ResourceReconcilerDeps{ + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + }, + ResourceConfig[*corev1.ConfigMap]{ + ResourceType: reload.ResourceTypeConfigMap, + NewResource: func() *corev1.ConfigMap { return &corev1.ConfigMap{} }, + CreateChange: func(cm *corev1.ConfigMap, eventType reload.EventType) reload.ResourceChange { + return reload.ConfigMapChange{ConfigMap: cm, EventType: eventType} + }, + CreatePredicates: func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate { + return reload.ConfigMapPredicates(cfg, hasher) + }, + }, + ) } -// SetupWithManager sets up the controller with the Manager. -func (r *ConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&corev1.ConfigMap{}). - WithEventFilter(BuildEventFilter( - reload.ConfigMapPredicates(r.Config, r.ReloadService.Hasher()), - r.Config, &r.initialized, - )). - Complete(r) +// SetupConfigMapReconciler sets up a ConfigMap reconciler with the manager. +func SetupConfigMapReconciler(mgr ctrl.Manager, r *ConfigMapReconciler) error { + return r.SetupWithManager(mgr, &corev1.ConfigMap{}) } var _ reconcile.Reconciler = &ConfigMapReconciler{} diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index f4065ce81..46905e81e 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -32,7 +32,7 @@ func (h *ReloadHandler) Process( ctx context.Context, namespace, resourceName string, resourceType reload.ResourceType, - getDecisions func([]workload.WorkloadAccessor) []reload.ReloadDecision, + getDecisions func([]workload.Workload) []reload.ReloadDecision, log logr.Logger, ) (ctrl.Result, error) { workloads, err := h.Lister.List(ctx, namespace) diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index aa1ab6cd8..6994c8811 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -127,11 +127,12 @@ func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctr func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, collectors *metrics.Collectors) error { registry := workload.NewRegistry( workload.RegistryOptions{ - ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, - DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, }, ) - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, log.WithName("reload")) eventRecorder := events.NewRecorder(mgr.GetEventRecorderFor("reloader")) pauseHandler := reload.NewPauseHandler(cfg) @@ -150,36 +151,38 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col // Setup ConfigMap reconciler if !cfg.IsResourceIgnored("configmaps") { - if err := (&ConfigMapReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("configmap-reconciler"), - Config: cfg, - ReloadService: reloadService, - Registry: registry, - Collectors: collectors, - EventRecorder: eventRecorder, - WebhookClient: webhookClient, - Alerter: alerter, - PauseHandler: pauseHandler, - }).SetupWithManager(mgr); err != nil { + cmReconciler := NewConfigMapReconciler( + mgr.GetClient(), + log.WithName("configmap-reconciler"), + cfg, + reloadService, + registry, + collectors, + eventRecorder, + webhookClient, + alerter, + pauseHandler, + ) + if err := SetupConfigMapReconciler(mgr, cmReconciler); err != nil { return fmt.Errorf("setting up configmap reconciler: %w", err) } } // Setup Secret reconciler if !cfg.IsResourceIgnored("secrets") { - if err := (&SecretReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("secret-reconciler"), - Config: cfg, - ReloadService: reloadService, - Registry: registry, - Collectors: collectors, - EventRecorder: eventRecorder, - WebhookClient: webhookClient, - Alerter: alerter, - PauseHandler: pauseHandler, - }).SetupWithManager(mgr); err != nil { + secretReconciler := NewSecretReconciler( + mgr.GetClient(), + log.WithName("secret-reconciler"), + cfg, + reloadService, + registry, + collectors, + eventRecorder, + webhookClient, + alerter, + pauseHandler, + ) + if err := SetupSecretReconciler(mgr, secretReconciler); err != nil { return fmt.Errorf("setting up secret reconciler: %w", err) } } diff --git a/internal/pkg/controller/resource_reconciler.go b/internal/pkg/controller/resource_reconciler.go new file mode 100644 index 000000000..0bd694dc2 --- /dev/null +++ b/internal/pkg/controller/resource_reconciler.go @@ -0,0 +1,186 @@ +package controller + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// ResourceReconcilerDeps holds shared dependencies for resource reconcilers. +type ResourceReconcilerDeps struct { + Client client.Client + Log logr.Logger + Config *config.Config + ReloadService *reload.Service + Registry *workload.Registry + Collectors *metrics.Collectors + EventRecorder *events.Recorder + WebhookClient *webhook.Client + Alerter alerting.Alerter + PauseHandler *reload.PauseHandler +} + +// ResourceConfig provides type-specific configuration for a resource reconciler. +type ResourceConfig[T client.Object] struct { + // ResourceType identifies the type of resource (configmap or secret). + ResourceType reload.ResourceType + + // NewResource creates a new instance of the resource type. + NewResource func() T + + // CreateChange creates a change event for the resource. + CreateChange func(resource T, eventType reload.EventType) reload.ResourceChange + + // CreatePredicates creates the predicates for this resource type. + CreatePredicates func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate +} + +// ResourceReconciler is a generic reconciler for ConfigMaps and Secrets. +type ResourceReconciler[T client.Object] struct { + ResourceReconcilerDeps + ResourceConfig[T] + + handler *ReloadHandler + initialized bool + initOnce sync.Once +} + +// NewResourceReconciler creates a new generic resource reconciler. +func NewResourceReconciler[T client.Object]( + deps ResourceReconcilerDeps, + cfg ResourceConfig[T], +) *ResourceReconciler[T] { + return &ResourceReconciler[T]{ + ResourceReconcilerDeps: deps, + ResourceConfig: cfg, + } +} + +// Reconcile handles resource events and triggers workload reloads as needed. +func (r *ResourceReconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + startTime := time.Now() + resourceType := string(r.ResourceType) + log := r.Log.WithValues(resourceType, req.NamespacedName) + + r.initOnce.Do(func() { + r.initialized = true + log.Info(resourceType + " controller initialized") + }) + + r.Collectors.RecordEventReceived("reconcile", resourceType) + + resource := r.NewResource() + if err := r.Client.Get(ctx, req.NamespacedName, resource); err != nil { + if errors.IsNotFound(err) { + return r.handleNotFound(ctx, req, log, startTime) + } + log.Error(err, "failed to get "+resourceType) + r.Collectors.RecordError("get_" + resourceType) + r.Collectors.RecordReconcile("error", time.Since(startTime)) + return ctrl.Result{}, err + } + + namespace := resource.GetNamespace() + if r.Config.IsNamespaceIgnored(namespace) { + log.V(1).Info("skipping " + resourceType + " in ignored namespace") + r.Collectors.RecordSkipped("ignored_namespace") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil + } + + result, err := r.reloadHandler().Process(ctx, req.Namespace, req.Name, r.ResourceType, + func(workloads []workload.Workload) []reload.ReloadDecision { + return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeUpdate), workloads) + }, log) + + r.recordReconcile(startTime, err) + return result, err +} + +func (r *ResourceReconciler[T]) handleNotFound( + ctx context.Context, + req ctrl.Request, + log logr.Logger, + startTime time.Time, +) (ctrl.Result, error) { + if r.Config.ReloadOnDelete { + r.Collectors.RecordEventReceived("delete", string(r.ResourceType)) + result, err := r.handleDelete(ctx, req, log) + r.recordReconcile(startTime, err) + return result, err + } + r.Collectors.RecordSkipped("not_found") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil +} + +func (r *ResourceReconciler[T]) handleDelete( + ctx context.Context, + req ctrl.Request, + log logr.Logger, +) (ctrl.Result, error) { + log.Info("handling " + string(r.ResourceType) + " deletion") + + // Create a minimal resource with just name/namespace for the delete event + resource := r.NewResource() + resource.SetName(req.Name) + resource.SetNamespace(req.Namespace) + + return r.reloadHandler().Process(ctx, req.Namespace, req.Name, r.ResourceType, + func(workloads []workload.Workload) []reload.ReloadDecision { + return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeDelete), workloads) + }, log) +} + +func (r *ResourceReconciler[T]) recordReconcile(startTime time.Time, err error) { + if err != nil { + r.Collectors.RecordReconcile("error", time.Since(startTime)) + } else { + r.Collectors.RecordReconcile("success", time.Since(startTime)) + } +} + +func (r *ResourceReconciler[T]) reloadHandler() *ReloadHandler { + if r.handler == nil { + r.handler = &ReloadHandler{ + Client: r.Client, + Lister: workload.NewLister(r.Client, r.Registry, r.Config), + ReloadService: r.ReloadService, + WebhookClient: r.WebhookClient, + Collectors: r.Collectors, + EventRecorder: r.EventRecorder, + Alerter: r.Alerter, + PauseHandler: r.PauseHandler, + } + } + return r.handler +} + +// Initialized returns whether the reconciler has been initialized. +func (r *ResourceReconciler[T]) Initialized() *bool { + return &r.initialized +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ResourceReconciler[T]) SetupWithManager(mgr ctrl.Manager, forObject T) error { + return ctrl.NewControllerManagedBy(mgr). + For(forObject). + WithEventFilter(BuildEventFilter( + r.CreatePredicates(r.Config, r.ReloadService.Hasher()), + r.Config, r.Initialized(), + )). + Complete(r) +} diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index fec0daacf..e71dfa7f8 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -2,13 +2,10 @@ package controller import ( "context" - "maps" "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/workload" - batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -48,33 +45,31 @@ func UpdateObjectWithRetry( // UpdateWorkloadWithRetry updates a workload with exponential backoff on conflict. // On conflict, it re-fetches the object, re-applies the reload changes, and retries. -// For Jobs and CronJobs, special handling is applied: -// - Jobs are deleted and recreated with the same spec -// - CronJobs create a new Job from their template -// For Argo Rollouts, special handling is applied based on the rollout strategy annotation. +// Workloads use their UpdateStrategy to determine how they're updated: +// - UpdateStrategyPatch: uses strategic merge patch with retry (most workloads) +// - UpdateStrategyRecreate: deletes and recreates (Jobs) +// - UpdateStrategyCreateNew: creates a new resource from template (CronJobs) +// Deployments have additional pause handling for paused rollouts. func UpdateWorkloadWithRetry( ctx context.Context, c client.Client, reloadService *reload.Service, pauseHandler *reload.PauseHandler, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType reload.ResourceType, namespace string, hash string, autoReload bool, ) (bool, error) { - // Handle special workload types - switch wl.Kind() { - case workload.KindJob: - return updateJobWithRecreate(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) - case workload.KindCronJob: - return updateCronJobWithNewJob(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) - case workload.KindArgoRollout: - return updateArgoRollout(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) - case workload.KindDeployment: - return updateDeploymentWithPause(ctx, c, reloadService, pauseHandler, wl, resourceName, resourceType, namespace, hash, autoReload) + switch wl.UpdateStrategy() { + case workload.UpdateStrategyRecreate, workload.UpdateStrategyCreateNew: + return updateWithSpecialStrategy(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) default: + // UpdateStrategyPatch: use standard retry logic with special handling for Deployments + if wl.Kind() == workload.KindDeployment { + return updateDeploymentWithPause(ctx, c, reloadService, pauseHandler, wl, resourceName, resourceType, namespace, hash, autoReload) + } return updateStandardWorkload(ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload) } } @@ -85,7 +80,7 @@ func retryWithReload( ctx context.Context, c client.Client, reloadService *reload.Service, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType reload.ResourceType, namespace string, @@ -133,7 +128,7 @@ func updateStandardWorkload( ctx context.Context, c client.Client, reloadService *reload.Service, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType reload.ResourceType, namespace string, @@ -154,7 +149,7 @@ func updateDeploymentWithPause( c client.Client, reloadService *reload.Service, pauseHandler *reload.PauseHandler, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType reload.ResourceType, namespace string, @@ -176,99 +171,19 @@ func updateDeploymentWithPause( ) } -// updateJobWithRecreate deletes the Job and recreates it with the updated spec. -// Jobs are immutable after creation, so we must delete and recreate. -func updateJobWithRecreate( - ctx context.Context, - c client.Client, - reloadService *reload.Service, - wl workload.WorkloadAccessor, - resourceName string, - resourceType reload.ResourceType, - namespace string, - hash string, - autoReload bool, -) (bool, error) { - jobWl, ok := wl.(*workload.JobWorkload) - if !ok { - return false, nil - } - - // Apply reload changes to the workload - updated, err := reloadService.ApplyReload( - ctx, - wl, - resourceName, - resourceType, - namespace, - hash, - autoReload, - ) - if err != nil { - return false, err - } - - if !updated { - return false, nil - } - - oldJob := jobWl.GetJob() - newJob := oldJob.DeepCopy() - - // Delete the old job with background propagation - policy := metav1.DeletePropagationBackground - if err := c.Delete( - ctx, oldJob, &client.DeleteOptions{ - PropagationPolicy: &policy, - }, - ); err != nil { - if !errors.IsNotFound(err) { - return false, err - } - } - - // Clear fields that should not be specified when creating a new Job - newJob.ResourceVersion = "" - newJob.UID = "" - newJob.CreationTimestamp = metav1.Time{} - newJob.Status = batchv1.JobStatus{} - - // Remove problematic labels that are auto-generated - delete(newJob.Spec.Template.Labels, "controller-uid") - delete(newJob.Spec.Template.Labels, batchv1.ControllerUidLabel) - delete(newJob.Spec.Template.Labels, batchv1.JobNameLabel) - delete(newJob.Spec.Template.Labels, "job-name") - - // Remove the selector to allow it to be auto-generated - newJob.Spec.Selector = nil - - // Create the new job with same spec - if err := c.Create(ctx, newJob, client.FieldOwner(workload.FieldManager)); err != nil { - return false, err - } - - return true, nil -} - -// updateCronJobWithNewJob creates a new Job from the CronJob's template. -// CronJobs don't get updated directly; instead, a new Job is triggered. -func updateCronJobWithNewJob( +// updateWithSpecialStrategy handles workloads that don't use standard patch. +// It applies reload changes, then delegates to the workload's PerformSpecialUpdate. +func updateWithSpecialStrategy( ctx context.Context, c client.Client, reloadService *reload.Service, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType reload.ResourceType, namespace string, hash string, autoReload bool, ) (bool, error) { - cronJobWl, ok := wl.(*workload.CronJobWorkload) - if !ok { - return false, nil - } - - // Apply reload changes to get the updated spec updated, err := reloadService.ApplyReload( ctx, wl, @@ -286,71 +201,5 @@ func updateCronJobWithNewJob( return false, nil } - cronJob := cronJobWl.GetCronJob() - - annotations := make(map[string]string) - annotations["cronjob.kubernetes.io/instantiate"] = "manual" - maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: cronJob.Name + "-", - Namespace: cronJob.Namespace, - Annotations: annotations, - Labels: cronJob.Spec.JobTemplate.Labels, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob")), - }, - }, - Spec: cronJob.Spec.JobTemplate.Spec, - } - - if err := c.Create(ctx, job, client.FieldOwner(workload.FieldManager)); err != nil { - return false, err - } - - savedAnnotations := maps.Clone(cronJob.Spec.JobTemplate.Spec.Template.Annotations) - - err = UpdateObjectWithRetry( - ctx, c, cronJob, func() (bool, error) { - if cronJob.Spec.JobTemplate.Spec.Template.Annotations == nil { - cronJob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) - } - maps.Copy(cronJob.Spec.JobTemplate.Spec.Template.Annotations, savedAnnotations) - return true, nil - }, - ) - - if err != nil { - return false, err - } - - return true, nil -} - -// updateArgoRollout updates an Argo Rollout using its custom Update method. -// This handles the rollout strategy annotation to determine whether to do -// a standard rollout or set the restartAt field. -func updateArgoRollout( - ctx context.Context, - c client.Client, - reloadService *reload.Service, - wl workload.WorkloadAccessor, - resourceName string, - resourceType reload.ResourceType, - namespace string, - hash string, - autoReload bool, -) (bool, error) { - rolloutWl, ok := wl.(*workload.RolloutWorkload) - if !ok { - return false, nil - } - - return retryWithReload( - ctx, c, reloadService, wl, resourceName, resourceType, namespace, hash, autoReload, - func() error { - return rolloutWl.Update(ctx, c) - }, - ) + return wl.PerformSpecialUpdate(ctx, c) } diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go index 97271f85e..8e43e0e21 100644 --- a/internal/pkg/controller/retry_test.go +++ b/internal/pkg/controller/retry_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr/testr" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/reload" @@ -22,14 +23,14 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { tests := []struct { name string object runtime.Object - workload func(runtime.Object) workload.WorkloadAccessor + workload func(runtime.Object) workload.Workload resourceType reload.ResourceType verify func(t *testing.T, c client.Client) }{ { name: "Deployment", object: testutil.NewDeployment("test-deployment", "default", nil), - workload: func(o runtime.Object) workload.WorkloadAccessor { + workload: func(o runtime.Object) workload.Workload { return workload.NewDeploymentWorkload(o.(*appsv1.Deployment)) }, resourceType: reload.ResourceTypeConfigMap, @@ -46,7 +47,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { { name: "DaemonSet", object: testutil.NewDaemonSet("test-daemonset", "default", nil), - workload: func(o runtime.Object) workload.WorkloadAccessor { + workload: func(o runtime.Object) workload.Workload { return workload.NewDaemonSetWorkload(o.(*appsv1.DaemonSet)) }, resourceType: reload.ResourceTypeSecret, @@ -63,7 +64,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { { name: "StatefulSet", object: testutil.NewStatefulSet("test-statefulset", "default", nil), - workload: func(o runtime.Object) workload.WorkloadAccessor { + workload: func(o runtime.Object) workload.Workload { return workload.NewStatefulSetWorkload(o.(*appsv1.StatefulSet)) }, resourceType: reload.ResourceTypeConfigMap, @@ -80,7 +81,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { { name: "Job", object: testutil.NewJob("test-job", "default"), - workload: func(o runtime.Object) workload.WorkloadAccessor { + workload: func(o runtime.Object) workload.Workload { return workload.NewJobWorkload(o.(*batchv1.Job)) }, resourceType: reload.ResourceTypeConfigMap, @@ -97,7 +98,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { { name: "CronJob", object: testutil.NewCronJob("test-cronjob", "default"), - workload: func(o runtime.Object) workload.WorkloadAccessor { + workload: func(o runtime.Object) workload.Workload { return workload.NewCronJobWorkload(o.(*batchv1.CronJob)) }, resourceType: reload.ResourceTypeSecret, @@ -120,7 +121,7 @@ func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { t.Run( tt.name, func(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) fakeClient := fake.NewClientBuilder(). WithScheme(testutil.NewScheme()). @@ -201,7 +202,7 @@ func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { tt.name, func(t *testing.T) { cfg := config.NewDefault() cfg.ReloadStrategy = tt.strategy - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) deployment := testutil.NewDeployment("test-deployment", "default", nil) fakeClient := fake.NewClientBuilder(). @@ -246,7 +247,7 @@ func TestUpdateWorkloadWithRetry_Strategies(t *testing.T) { func TestUpdateWorkloadWithRetry_NoUpdate(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) deployment := testutil.NewDeployment("test-deployment", "default", nil) deployment.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ @@ -306,7 +307,7 @@ func TestResourceTypeKind(t *testing.T) { func TestUpdateWorkloadWithRetry_PauseDeployment(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) pauseHandler := reload.NewPauseHandler(cfg) deployment := testutil.NewDeployment( @@ -367,7 +368,7 @@ func TestUpdateWorkloadWithRetry_PauseDeployment(t *testing.T) { // TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation tests pause with explicit configmap annotation (no auto). func TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) pauseHandler := reload.NewPauseHandler(cfg) deployment := testutil.NewDeployment( @@ -428,7 +429,7 @@ func TestUpdateWorkloadWithRetry_PauseWithExplicitAnnotation(t *testing.T) { // TestUpdateWorkloadWithRetry_PauseWithSecretReload tests pause with Secret-triggered reload. func TestUpdateWorkloadWithRetry_PauseWithSecretReload(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) pauseHandler := reload.NewPauseHandler(cfg) deployment := testutil.NewDeployment( @@ -485,7 +486,7 @@ func TestUpdateWorkloadWithRetry_PauseWithSecretReload(t *testing.T) { // TestUpdateWorkloadWithRetry_PauseWithAutoSecret tests pause with auto annotation + Secret change. func TestUpdateWorkloadWithRetry_PauseWithAutoSecret(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) pauseHandler := reload.NewPauseHandler(cfg) deployment := testutil.NewDeployment( @@ -536,7 +537,7 @@ func TestUpdateWorkloadWithRetry_PauseWithAutoSecret(t *testing.T) { func TestUpdateWorkloadWithRetry_NoPauseWithoutAnnotation(t *testing.T) { cfg := config.NewDefault() - reloadService := reload.NewService(cfg) + reloadService := reload.NewService(cfg, testr.New(t)) pauseHandler := reload.NewPauseHandler(cfg) deployment := testutil.NewDeployment( diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index e7b2481bb..f20f25a4a 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -1,10 +1,6 @@ package controller import ( - "context" - "sync" - "time" - "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" @@ -14,129 +10,57 @@ import ( "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // SecretReconciler watches Secrets and triggers workload reloads. -type SecretReconciler struct { - client.Client - Log logr.Logger - Config *config.Config - ReloadService *reload.Service - Registry *workload.Registry - Collectors *metrics.Collectors - EventRecorder *events.Recorder - WebhookClient *webhook.Client - Alerter alerting.Alerter - PauseHandler *reload.PauseHandler - - handler *ReloadHandler - initialized bool - initOnce sync.Once -} - -// Reconcile handles Secret events and triggers workload reloads as needed. -func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - startTime := time.Now() - log := r.Log.WithValues("secret", req.NamespacedName) - - r.initOnce.Do(func() { - r.initialized = true - log.Info("Secret controller initialized") - }) - - r.Collectors.RecordEventReceived("reconcile", "secret") - - var secret corev1.Secret - if err := r.Get(ctx, req.NamespacedName, &secret); err != nil { - if errors.IsNotFound(err) { - if r.Config.ReloadOnDelete { - r.Collectors.RecordEventReceived("delete", "secret") - result, err := r.handleDelete(ctx, req, log) - if err != nil { - r.Collectors.RecordReconcile("error", time.Since(startTime)) - } else { - r.Collectors.RecordReconcile("success", time.Since(startTime)) - } - return result, err - } - r.Collectors.RecordSkipped("not_found") - r.Collectors.RecordReconcile("success", time.Since(startTime)) - return ctrl.Result{}, nil - } - log.Error(err, "failed to get Secret") - r.Collectors.RecordError("get_secret") - r.Collectors.RecordReconcile("error", time.Since(startTime)) - return ctrl.Result{}, err - } - - if r.Config.IsNamespaceIgnored(secret.Namespace) { - log.V(1).Info("skipping Secret in ignored namespace") - r.Collectors.RecordSkipped("ignored_namespace") - r.Collectors.RecordReconcile("success", time.Since(startTime)) - return ctrl.Result{}, nil - } - - result, err := r.reloadHandler().Process(ctx, secret.Namespace, secret.Name, reload.ResourceTypeSecret, - func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.Process(reload.SecretChange{ - Secret: &secret, - EventType: reload.EventTypeUpdate, - }, workloads) - }, log) - - if err != nil { - r.Collectors.RecordReconcile("error", time.Since(startTime)) - } else { - r.Collectors.RecordReconcile("success", time.Since(startTime)) - } - return result, err -} - -func (r *SecretReconciler) handleDelete(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) { - log.Info("handling Secret deletion") - - secret := &corev1.Secret{} - secret.Name = req.Name - secret.Namespace = req.Namespace - - return r.reloadHandler().Process(ctx, req.Namespace, req.Name, reload.ResourceTypeSecret, - func(workloads []workload.WorkloadAccessor) []reload.ReloadDecision { - return r.ReloadService.Process(reload.SecretChange{ - Secret: secret, - EventType: reload.EventTypeDelete, - }, workloads) - }, log) -} - -func (r *SecretReconciler) reloadHandler() *ReloadHandler { - if r.handler == nil { - r.handler = &ReloadHandler{ - Client: r.Client, - Lister: workload.NewLister(r.Client, r.Registry, r.Config), - ReloadService: r.ReloadService, - WebhookClient: r.WebhookClient, - Collectors: r.Collectors, - EventRecorder: r.EventRecorder, - Alerter: r.Alerter, - PauseHandler: r.PauseHandler, - } - } - return r.handler +type SecretReconciler = ResourceReconciler[*corev1.Secret] + +// NewSecretReconciler creates a new SecretReconciler with the given dependencies. +func NewSecretReconciler( + c client.Client, + log logr.Logger, + cfg *config.Config, + reloadService *reload.Service, + registry *workload.Registry, + collectors *metrics.Collectors, + eventRecorder *events.Recorder, + webhookClient *webhook.Client, + alerter alerting.Alerter, + pauseHandler *reload.PauseHandler, +) *SecretReconciler { + return NewResourceReconciler( + ResourceReconcilerDeps{ + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + }, + ResourceConfig[*corev1.Secret]{ + ResourceType: reload.ResourceTypeSecret, + NewResource: func() *corev1.Secret { return &corev1.Secret{} }, + CreateChange: func(s *corev1.Secret, eventType reload.EventType) reload.ResourceChange { + return reload.SecretChange{Secret: s, EventType: eventType} + }, + CreatePredicates: func(cfg *config.Config, hasher *reload.Hasher) predicate.Predicate { + return reload.SecretPredicates(cfg, hasher) + }, + }, + ) } -// SetupWithManager sets up the controller with the Manager. -func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&corev1.Secret{}). - WithEventFilter(BuildEventFilter( - reload.SecretPredicates(r.Config, r.ReloadService.Hasher()), - r.Config, &r.initialized, - )). - Complete(r) +// SetupSecretReconciler sets up a Secret reconciler with the manager. +func SetupSecretReconciler(mgr ctrl.Manager, r *SecretReconciler) error { + return r.SetupWithManager(mgr, &corev1.Secret{}) } var _ reconcile.Reconciler = &SecretReconciler{} diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index 916696ab6..019be7898 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/go-logr/logr/testr" "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" @@ -21,56 +22,77 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -// newConfigMapReconciler creates a ConfigMapReconciler for testing. -func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { - t.Helper() - fakeClient := fake.NewClientBuilder(). - WithScheme(testutil.NewScheme()). - WithRuntimeObjects(objects...). - Build() +// testDeps holds shared test dependencies. +type testDeps struct { + client *fake.ClientBuilder + log logr.Logger + cfg *config.Config + reloadService *reload.Service + registry *workload.Registry + collectors *metrics.Collectors + eventRecorder *events.Recorder + webhookClient *webhook.Client + alerter alerting.Alerter +} +// newTestDeps creates shared test dependencies for reconciler tests. +func newTestDeps(t *testing.T, cfg *config.Config, objects ...runtime.Object) testDeps { + t.Helper() + log := testr.New(t) collectors := metrics.NewCollectors() - - return &controller.ConfigMapReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(workload.RegistryOptions{ - ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, - DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + return testDeps{ + client: fake.NewClientBuilder(). + WithScheme(testutil.NewScheme()). + WithRuntimeObjects(objects...), + log: log, + cfg: cfg, + reloadService: reload.NewService(cfg, log), + registry: workload.NewRegistry(workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, }), - Collectors: &collectors, - EventRecorder: events.NewRecorder(nil), - WebhookClient: webhook.NewClient("", testr.New(t)), - Alerter: &alerting.NoOpAlerter{}, + collectors: &collectors, + eventRecorder: events.NewRecorder(nil), + webhookClient: webhook.NewClient("", log), + alerter: &alerting.NoOpAlerter{}, } } +// newConfigMapReconciler creates a ConfigMapReconciler for testing. +func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.ConfigMapReconciler { + t.Helper() + deps := newTestDeps(t, cfg, objects...) + return controller.NewConfigMapReconciler( + deps.client.Build(), + deps.log, + deps.cfg, + deps.reloadService, + deps.registry, + deps.collectors, + deps.eventRecorder, + deps.webhookClient, + deps.alerter, + nil, + ) +} + // newSecretReconciler creates a SecretReconciler for testing. func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Object) *controller.SecretReconciler { t.Helper() - fakeClient := fake.NewClientBuilder(). - WithScheme(testutil.NewScheme()). - WithRuntimeObjects(objects...). - Build() - - collectors := metrics.NewCollectors() - - return &controller.SecretReconciler{ - Client: fakeClient, - Log: testr.New(t), - Config: cfg, - ReloadService: reload.NewService(cfg), - Registry: workload.NewRegistry(workload.RegistryOptions{ - ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, - DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, - }), - Collectors: &collectors, - EventRecorder: events.NewRecorder(nil), - WebhookClient: webhook.NewClient("", testr.New(t)), - Alerter: &alerting.NoOpAlerter{}, - } + deps := newTestDeps(t, cfg, objects...) + return controller.NewSecretReconciler( + deps.client.Build(), + deps.log, + deps.cfg, + deps.reloadService, + deps.registry, + deps.collectors, + deps.eventRecorder, + deps.webhookClient, + deps.alerter, + nil, + ) } // newNamespaceReconciler creates a NamespaceReconciler for testing. diff --git a/internal/pkg/http/client.go b/internal/pkg/http/client.go new file mode 100644 index 000000000..c1ca613df --- /dev/null +++ b/internal/pkg/http/client.go @@ -0,0 +1,69 @@ +// Package http provides shared HTTP client functionality. +package http + +import ( + "net/http" + "net/url" + "time" +) + +const ( + // DefaultTimeout is the default HTTP client timeout. + DefaultTimeout = 30 * time.Second + + // AlertingTimeout is the shorter timeout used for alerting. + AlertingTimeout = 10 * time.Second +) + +// ClientConfig configures an HTTP client. +type ClientConfig struct { + // Timeout for HTTP requests. + Timeout time.Duration + + // ProxyURL is an optional proxy URL. + ProxyURL string + + // MaxIdleConns controls the maximum number of idle connections. + MaxIdleConns int + + // MaxIdleConnsPerHost controls the maximum idle connections per host. + MaxIdleConnsPerHost int + + // IdleConnTimeout is the maximum time an idle connection remains open. + IdleConnTimeout time.Duration +} + +// DefaultConfig returns the default HTTP client configuration. +func DefaultConfig() ClientConfig { + return ClientConfig{ + Timeout: DefaultTimeout, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + } +} + +// NewClient creates a new HTTP client with the given configuration. +func NewClient(cfg ClientConfig) *http.Client { + transport := &http.Transport{ + MaxIdleConns: cfg.MaxIdleConns, + MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost, + IdleConnTimeout: cfg.IdleConnTimeout, + } + + if cfg.ProxyURL != "" { + if proxy, err := url.Parse(cfg.ProxyURL); err == nil { + transport.Proxy = http.ProxyURL(proxy) + } + } + + return &http.Client{ + Transport: transport, + Timeout: cfg.Timeout, + } +} + +// NewDefaultClient creates an HTTP client with default configuration. +func NewDefaultClient() *http.Client { + return NewClient(DefaultConfig()) +} diff --git a/internal/pkg/http/client_test.go b/internal/pkg/http/client_test.go new file mode 100644 index 000000000..2b937b192 --- /dev/null +++ b/internal/pkg/http/client_test.go @@ -0,0 +1,142 @@ +package http + +import ( + "net/http" + "testing" + "time" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + + if cfg.Timeout != DefaultTimeout { + t.Errorf("expected timeout %v, got %v", DefaultTimeout, cfg.Timeout) + } + if cfg.MaxIdleConns != 100 { + t.Errorf("expected MaxIdleConns 100, got %d", cfg.MaxIdleConns) + } + if cfg.MaxIdleConnsPerHost != 10 { + t.Errorf("expected MaxIdleConnsPerHost 10, got %d", cfg.MaxIdleConnsPerHost) + } + if cfg.IdleConnTimeout != 90*time.Second { + t.Errorf("expected IdleConnTimeout 90s, got %v", cfg.IdleConnTimeout) + } +} + +func TestNewClient(t *testing.T) { + tests := []struct { + name string + cfg ClientConfig + wantNil bool + }{ + { + name: "default config", + cfg: DefaultConfig(), + wantNil: false, + }, + { + name: "custom timeout", + cfg: ClientConfig{ + Timeout: 5 * time.Second, + MaxIdleConns: 50, + MaxIdleConnsPerHost: 5, + IdleConnTimeout: 30 * time.Second, + }, + wantNil: false, + }, + { + name: "with proxy", + cfg: ClientConfig{ + Timeout: DefaultTimeout, + ProxyURL: "http://proxy.example.com:8080", + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + }, + wantNil: false, + }, + { + name: "with invalid proxy URL", + cfg: ClientConfig{ + Timeout: DefaultTimeout, + ProxyURL: "://invalid", + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + }, + wantNil: false, + }, + { + name: "zero values", + cfg: ClientConfig{ + Timeout: 0, + }, + wantNil: false, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + + if tt.wantNil && client != nil { + t.Error("expected nil client") + } + if !tt.wantNil && client == nil { + t.Error("expected non-nil client") + } + + if client != nil { + if client.Timeout != tt.cfg.Timeout { + t.Errorf("expected timeout %v, got %v", tt.cfg.Timeout, client.Timeout) + } + + transport, ok := client.Transport.(*http.Transport) + if !ok { + t.Fatal("expected *http.Transport") + } + if transport.MaxIdleConns != tt.cfg.MaxIdleConns { + t.Errorf("expected MaxIdleConns %d, got %d", tt.cfg.MaxIdleConns, transport.MaxIdleConns) + } + if transport.MaxIdleConnsPerHost != tt.cfg.MaxIdleConnsPerHost { + t.Errorf("expected MaxIdleConnsPerHost %d, got %d", tt.cfg.MaxIdleConnsPerHost, transport.MaxIdleConnsPerHost) + } + } + }, + ) + } +} + +func TestNewDefaultClient(t *testing.T) { + client := NewDefaultClient() + + if client == nil { + t.Fatal("expected non-nil client") + } + + if client.Timeout != DefaultTimeout { + t.Errorf("expected timeout %v, got %v", DefaultTimeout, client.Timeout) + } + + transport, ok := client.Transport.(*http.Transport) + if !ok { + t.Fatal("expected *http.Transport") + } + + if transport.MaxIdleConns != 100 { + t.Errorf("expected MaxIdleConns 100, got %d", transport.MaxIdleConns) + } + if transport.MaxIdleConnsPerHost != 10 { + t.Errorf("expected MaxIdleConnsPerHost 10, got %d", transport.MaxIdleConnsPerHost) + } +} + +func TestConstants(t *testing.T) { + if DefaultTimeout != 30*time.Second { + t.Errorf("expected DefaultTimeout 30s, got %v", DefaultTimeout) + } + if AlertingTimeout != 10*time.Second { + t.Errorf("expected AlertingTimeout 10s, got %v", AlertingTimeout) + } +} diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go index 616d987a5..0e1327618 100644 --- a/internal/pkg/metadata/metadata.go +++ b/internal/pkg/metadata/metadata.go @@ -20,8 +20,6 @@ const ( ConfigMapLabelKey = "reloader.stakater.com/meta-info" // ConfigMapLabelValue is the label value for the metadata ConfigMap. ConfigMapLabelValue = "reloader-oss" - // FieldManager is the field manager name for server-side apply. - FieldManager = "reloader" // Environment variables for deployment info. EnvReloaderNamespace = "RELOADER_NAMESPACE" diff --git a/internal/pkg/metadata/publisher.go b/internal/pkg/metadata/publisher.go index b92cc8c71..385dd270f 100644 --- a/internal/pkg/metadata/publisher.go +++ b/internal/pkg/metadata/publisher.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -52,7 +53,7 @@ func (p *Publisher) Publish(ctx context.Context) error { return fmt.Errorf("failed to get existing meta info configmap: %w", err) } p.log.Info("Creating meta info configmap") - if err := p.client.Create(ctx, configMap, client.FieldOwner(FieldManager)); err != nil { + if err := p.client.Create(ctx, configMap, client.FieldOwner(workload.FieldManager)); err != nil { return fmt.Errorf("failed to create meta info configmap: %w", err) } p.log.Info("Meta info configmap created successfully") @@ -62,7 +63,7 @@ func (p *Publisher) Publish(ctx context.Context) error { p.log.Info("Meta info configmap already exists, updating it") existing.Data = configMap.Data existing.Labels = configMap.Labels - if err := p.client.Update(ctx, existing, client.FieldOwner(FieldManager)); err != nil { + if err := p.client.Update(ctx, existing, client.FieldOwner(workload.FieldManager)); err != nil { return fmt.Errorf("failed to update meta info configmap: %w", err) } p.log.Info("Meta info configmap updated successfully") diff --git a/internal/pkg/reload/decision.go b/internal/pkg/reload/decision.go index 6002b3b26..625925828 100644 --- a/internal/pkg/reload/decision.go +++ b/internal/pkg/reload/decision.go @@ -7,7 +7,7 @@ import ( // ReloadDecision contains the result of evaluating whether to reload a workload. type ReloadDecision struct { // Workload is the workload accessor. - Workload workload.WorkloadAccessor + Workload workload.Workload // ShouldReload indicates whether the workload should be reloaded. ShouldReload bool // AutoReload indicates if this is an auto-reload. diff --git a/internal/pkg/reload/pause.go b/internal/pkg/reload/pause.go index e6e33366a..78194e781 100644 --- a/internal/pkg/reload/pause.go +++ b/internal/pkg/reload/pause.go @@ -20,7 +20,7 @@ func NewPauseHandler(cfg *config.Config) *PauseHandler { } // ShouldPause checks if a deployment should be paused after reload. -func (h *PauseHandler) ShouldPause(wl workload.WorkloadAccessor) bool { +func (h *PauseHandler) ShouldPause(wl workload.Workload) bool { if wl.Kind() != workload.KindDeployment { return false } @@ -35,7 +35,7 @@ func (h *PauseHandler) ShouldPause(wl workload.WorkloadAccessor) bool { } // GetPausePeriod returns the configured pause period for a workload. -func (h *PauseHandler) GetPausePeriod(wl workload.WorkloadAccessor) (time.Duration, error) { +func (h *PauseHandler) GetPausePeriod(wl workload.Workload) (time.Duration, error) { annotations := wl.GetAnnotations() if annotations == nil { return 0, fmt.Errorf("no annotations on workload") @@ -50,7 +50,7 @@ func (h *PauseHandler) GetPausePeriod(wl workload.WorkloadAccessor) (time.Durati } // ApplyPause pauses a deployment and sets the paused-at annotation. -func (h *PauseHandler) ApplyPause(wl workload.WorkloadAccessor) error { +func (h *PauseHandler) ApplyPause(wl workload.Workload) error { deployWl, ok := wl.(*workload.DeploymentWorkload) if !ok { return fmt.Errorf("workload is not a deployment") diff --git a/internal/pkg/reload/pause_test.go b/internal/pkg/reload/pause_test.go index 74e8162b2..49fea4a51 100644 --- a/internal/pkg/reload/pause_test.go +++ b/internal/pkg/reload/pause_test.go @@ -16,7 +16,7 @@ func TestPauseHandler_ShouldPause(t *testing.T) { tests := []struct { name string - workload workload.WorkloadAccessor + workload workload.Workload want bool }{ { @@ -66,7 +66,7 @@ func TestPauseHandler_GetPausePeriod(t *testing.T) { tests := []struct { name string - workload workload.WorkloadAccessor + workload workload.Workload wantPeriod time.Duration wantErr bool }{ diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index 964608973..ae2e85f26 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -3,8 +3,10 @@ package reload import ( "context" "encoding/json" + "fmt" "time" + "github.com/go-logr/logr" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" @@ -13,15 +15,17 @@ import ( // Service orchestrates the reload logic for ConfigMaps and Secrets. type Service struct { cfg *config.Config + log logr.Logger hasher *Hasher matcher *Matcher strategy Strategy } // NewService creates a new reload Service with the given configuration. -func NewService(cfg *config.Config) *Service { +func NewService(cfg *config.Config, log logr.Logger) *Service { return &Service{ cfg: cfg, + log: log, hasher: NewHasher(), matcher: NewMatcher(cfg), strategy: NewStrategy(cfg), @@ -29,7 +33,7 @@ func NewService(cfg *config.Config) *Service { } // Process evaluates all workloads to determine which should be reloaded. -func (s *Service) Process(change ResourceChange, workloads []workload.WorkloadAccessor) []ReloadDecision { +func (s *Service) Process(change ResourceChange, workloads []workload.Workload) []ReloadDecision { if change.IsNil() { return nil } @@ -59,7 +63,7 @@ func (s *Service) processResource( resourceAnnotations map[string]string, resourceType ResourceType, hash string, - workloads []workload.WorkloadAccessor, + workloads []workload.Workload, ) []ReloadDecision { var decisions []ReloadDecision @@ -96,13 +100,15 @@ func (s *Service) processResource( shouldReload = false } - decisions = append(decisions, ReloadDecision{ - Workload: wl, - ShouldReload: shouldReload, - AutoReload: matchResult.AutoReload, - Reason: matchResult.Reason, - Hash: hash, - }) + decisions = append( + decisions, ReloadDecision{ + Workload: wl, + ShouldReload: shouldReload, + AutoReload: matchResult.AutoReload, + Reason: matchResult.Reason, + Hash: hash, + }, + ) } return decisions @@ -124,7 +130,7 @@ func (s *Service) shouldProcessEvent(eventType EventType) bool { // ApplyReload applies the reload strategy to a workload. func (s *Service) ApplyReload( ctx context.Context, - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType ResourceType, namespace string, @@ -149,20 +155,23 @@ func (s *Service) ApplyReload( } if updated { - s.setAttributionAnnotation(wl, resourceName, resourceType, namespace, hash, container) + // Attribution annotation is informational; log errors but don't fail reloads + if err := s.setAttributionAnnotation(wl, resourceName, resourceType, namespace, hash, container); err != nil { + s.log.V(1).Info("failed to set attribution annotation", "error", err, "workload", wl.GetName()) + } } return updated, nil } func (s *Service) setAttributionAnnotation( - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType ResourceType, namespace string, hash string, container *corev1.Container, -) { +) error { containerName := "" if container != nil { containerName = container.Name @@ -179,14 +188,15 @@ func (s *Service) setAttributionAnnotation( sourceJSON, err := json.Marshal(source) if err != nil { - return + return fmt.Errorf("failed to marshal reload source: %w", err) } wl.SetPodTemplateAnnotation(s.cfg.Annotations.LastReloadedFrom, string(sourceJSON)) + return nil } func (s *Service) findTargetContainer( - wl workload.WorkloadAccessor, + wl workload.Workload, resourceName string, resourceType ResourceType, autoReload bool, diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index dae653f72..5a13f0252 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr/testr" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/workload" @@ -13,7 +14,7 @@ import ( func TestService_ProcessConfigMap_AutoReload(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create a deployment with auto annotation that uses the configmap deploy := testutil.NewDeployment( @@ -34,7 +35,7 @@ func TestService_ProcessConfigMap_AutoReload(t *testing.T) { }, } - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -74,7 +75,7 @@ func TestService_ProcessConfigMap_AutoReload(t *testing.T) { func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ @@ -82,7 +83,7 @@ func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { }, ) - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -118,7 +119,7 @@ func TestService_ProcessConfigMap_ExplicitAnnotation(t *testing.T) { func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create a deployment with auto annotation deploy := testutil.NewDeployment( @@ -139,7 +140,7 @@ func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { }, } - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -174,7 +175,7 @@ func TestService_ProcessConfigMap_IgnoredResource(t *testing.T) { func TestService_ProcessSecret_AutoReload(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create a deployment with auto annotation that uses the secret deploy := testutil.NewDeployment( @@ -193,7 +194,7 @@ func TestService_ProcessSecret_AutoReload(t *testing.T) { }, } - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -230,7 +231,7 @@ func TestService_ProcessSecret_AutoReload(t *testing.T) { func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = true - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create a deployment with explicit configmap annotation deploy := testutil.NewDeployment( @@ -239,7 +240,7 @@ func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { }, ) - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -274,7 +275,7 @@ func TestService_ProcessConfigMap_DeleteEvent(t *testing.T) { func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnDelete = false // Disabled by default - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment( "test-deploy", "default", map[string]string{ @@ -282,7 +283,7 @@ func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { }, ) - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy), } @@ -309,7 +310,7 @@ func TestService_ProcessConfigMap_DeleteEventDisabled(t *testing.T) { func TestService_ApplyReload_EnvVarStrategy(t *testing.T) { cfg := config.NewDefault() cfg.ReloadStrategy = config.ReloadStrategyEnvVars - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test-deploy", "default", nil) accessor := workload.NewDeploymentWorkload(deploy) @@ -353,7 +354,7 @@ func TestService_ApplyReload_EnvVarStrategy(t *testing.T) { func TestService_ApplyReload_AnnotationStrategy(t *testing.T) { cfg := config.NewDefault() cfg.ReloadStrategy = config.ReloadStrategyAnnotations - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test-deploy", "default", nil) accessor := workload.NewDeploymentWorkload(deploy) @@ -379,7 +380,7 @@ func TestService_ApplyReload_AnnotationStrategy(t *testing.T) { func TestService_ApplyReload_EnvVarDeletion(t *testing.T) { cfg := config.NewDefault() cfg.ReloadStrategy = config.ReloadStrategyEnvVars - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test-deploy", "default", nil) // Pre-add an env var @@ -425,7 +426,7 @@ func TestService_ApplyReload_EnvVarDeletion(t *testing.T) { func TestService_ApplyReload_NoChangeIfSameHash(t *testing.T) { cfg := config.NewDefault() cfg.ReloadStrategy = config.ReloadStrategyEnvVars - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test-deploy", "default", nil) // Pre-add env var with same hash @@ -448,7 +449,7 @@ func TestService_ApplyReload_NoChangeIfSameHash(t *testing.T) { func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create multiple workloads deploy1 := testutil.NewDeployment( @@ -494,7 +495,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { }, ) - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy1), workload.NewDeploymentWorkload(deploy2), workload.NewDeploymentWorkload(deploy3), @@ -535,7 +536,7 @@ func TestService_ProcessConfigMap_MultipleWorkloads(t *testing.T) { func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Create deployments in different namespaces deploy1 := testutil.NewDeployment( @@ -574,7 +575,7 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { }, } - workloads := []workload.WorkloadAccessor{ + workloads := []workload.Workload{ workload.NewDeploymentWorkload(deploy1), workload.NewDeploymentWorkload(deploy2), } @@ -610,7 +611,7 @@ func TestService_ProcessConfigMap_DifferentNamespaces(t *testing.T) { func TestService_Hasher(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) hasher := svc.Hasher() if hasher == nil { @@ -650,7 +651,7 @@ func TestService_shouldProcessEvent(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnCreate = tt.reloadOnCreate cfg.ReloadOnDelete = tt.reloadOnDelete - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) result := svc.shouldProcessEvent(tt.eventType) if result != tt.expected { @@ -663,7 +664,7 @@ func TestService_shouldProcessEvent(t *testing.T) { func TestService_findVolumeUsingResource_ConfigMap(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) tests := []struct { name string @@ -749,7 +750,7 @@ func TestService_findVolumeUsingResource_ConfigMap(t *testing.T) { func TestService_findVolumeUsingResource_Secret(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) tests := []struct { name string @@ -824,7 +825,7 @@ func TestService_findVolumeUsingResource_Secret(t *testing.T) { func TestService_findContainerWithVolumeMount(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) tests := []struct { name string @@ -908,7 +909,7 @@ func TestService_findContainerWithVolumeMount(t *testing.T) { func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) tests := []struct { name string @@ -1010,7 +1011,7 @@ func TestService_findContainerWithEnvRef_ConfigMap(t *testing.T) { func TestService_findContainerWithEnvRef_Secret(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) tests := []struct { name string @@ -1099,7 +1100,7 @@ func TestService_findContainerWithEnvRef_Secret(t *testing.T) { func TestService_findTargetContainer_AutoReload(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Test with autoReload=true and volume mount deploy := testutil.NewDeployment("test", "default", nil) @@ -1135,7 +1136,7 @@ func TestService_findTargetContainer_AutoReload(t *testing.T) { func TestService_findTargetContainer_AutoReload_EnvRef(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Test with autoReload=true and env ref (no volume) deploy := testutil.NewDeployment("test", "default", nil) @@ -1173,7 +1174,7 @@ func TestService_findTargetContainer_AutoReload_EnvRef(t *testing.T) { func TestService_findTargetContainer_AutoReload_InitContainer(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Test with autoReload=true where init container uses the volume deploy := testutil.NewDeployment("test", "default", nil) @@ -1216,7 +1217,7 @@ func TestService_findTargetContainer_AutoReload_InitContainer(t *testing.T) { func TestService_findTargetContainer_AutoReload_InitContainerEnvRef(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // Test with autoReload=true where init container has env ref deploy := testutil.NewDeployment("test", "default", nil) @@ -1257,7 +1258,7 @@ func TestService_findTargetContainer_AutoReload_InitContainerEnvRef(t *testing.T func TestService_findTargetContainer_NoContainers(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{} @@ -1271,7 +1272,7 @@ func TestService_findTargetContainer_NoContainers(t *testing.T) { func TestService_findTargetContainer_NonAutoReload(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test", "default", nil) deploy.Spec.Template.Spec.Containers = []corev1.Container{ @@ -1292,7 +1293,7 @@ func TestService_findTargetContainer_NonAutoReload(t *testing.T) { func TestService_findTargetContainer_AutoReload_FallbackToFirst(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) // autoReload=true but no matching volume or env ref - should fallback to first container deploy := testutil.NewDeployment("test", "default", nil) @@ -1313,10 +1314,10 @@ func TestService_findTargetContainer_AutoReload_FallbackToFirst(t *testing.T) { func TestService_ProcessNilChange(t *testing.T) { cfg := config.NewDefault() - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment("test", "default", nil) - workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} + workloads := []workload.Workload{workload.NewDeploymentWorkload(deploy)} // Test with nil ConfigMap change := ConfigMapChange{ @@ -1333,14 +1334,14 @@ func TestService_ProcessNilChange(t *testing.T) { func TestService_ProcessCreateEventDisabled(t *testing.T) { cfg := config.NewDefault() cfg.ReloadOnCreate = false - svc := NewService(cfg) + svc := NewService(cfg, testr.New(t)) deploy := testutil.NewDeployment( "test", "default", map[string]string{ "reloader.stakater.com/auto": "true", }, ) - workloads := []workload.WorkloadAccessor{workload.NewDeploymentWorkload(deploy)} + workloads := []workload.Workload{workload.NewDeploymentWorkload(deploy)} cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "default"}, diff --git a/internal/pkg/webhook/webhook.go b/internal/pkg/webhook/webhook.go index d5b3c4cd7..ea2507325 100644 --- a/internal/pkg/webhook/webhook.go +++ b/internal/pkg/webhook/webhook.go @@ -11,6 +11,7 @@ import ( "time" "github.com/go-logr/logr" + httputil "github.com/stakater/Reloader/internal/pkg/http" ) // Payload represents the data sent to the webhook endpoint. @@ -43,11 +44,9 @@ type Client struct { // NewClient creates a new webhook client. func NewClient(url string, log logr.Logger) *Client { return &Client{ - httpClient: &http.Client{ - Timeout: 30 * time.Second, - }, - url: url, - log: log, + httpClient: httputil.NewDefaultClient(), + url: url, + log: log, } } diff --git a/internal/pkg/workload/base.go b/internal/pkg/workload/base.go new file mode 100644 index 000000000..71576b0ca --- /dev/null +++ b/internal/pkg/workload/base.go @@ -0,0 +1,188 @@ +package workload + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PodTemplateAccessor provides access to a workload's pod template. +// Each workload type implements this to provide access to its specific template location. +type PodTemplateAccessor interface { + // GetPodTemplateSpec returns a pointer to the pod template spec. + // Returns nil if the workload doesn't have a pod template + GetPodTemplateSpec() *corev1.PodTemplateSpec + + // GetObjectMeta returns the workload's object metadata. + GetObjectMeta() *metav1.ObjectMeta +} + +// BaseWorkload provides common functionality for all workload types. +// It uses composition with a PodTemplateAccessor to access type-specific fields. +type BaseWorkload[T client.Object] struct { + object T + original T + accessor PodTemplateAccessor + kind Kind +} + +// NewBaseWorkload creates a new BaseWorkload with the given object and accessor. +func NewBaseWorkload[T client.Object](obj T, original T, accessor PodTemplateAccessor, kind Kind) *BaseWorkload[T] { + return &BaseWorkload[T]{ + object: obj, + original: original, + accessor: accessor, + kind: kind, + } +} + +func (b *BaseWorkload[T]) Kind() Kind { + return b.kind +} + +func (b *BaseWorkload[T]) GetObject() client.Object { + return b.object +} + +func (b *BaseWorkload[T]) GetName() string { + return b.accessor.GetObjectMeta().Name +} + +func (b *BaseWorkload[T]) GetNamespace() string { + return b.accessor.GetObjectMeta().Namespace +} + +func (b *BaseWorkload[T]) GetAnnotations() map[string]string { + return b.accessor.GetObjectMeta().Annotations +} + +func (b *BaseWorkload[T]) GetPodTemplateAnnotations() map[string]string { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + if template.Annotations == nil { + template.Annotations = make(map[string]string) + } + return template.Annotations +} + +func (b *BaseWorkload[T]) SetPodTemplateAnnotation(key, value string) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + if template.Annotations == nil { + template.Annotations = make(map[string]string) + } + template.Annotations[key] = value +} + +func (b *BaseWorkload[T]) GetContainers() []corev1.Container { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.Containers +} + +func (b *BaseWorkload[T]) SetContainers(containers []corev1.Container) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + template.Spec.Containers = containers +} + +func (b *BaseWorkload[T]) GetInitContainers() []corev1.Container { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.InitContainers +} + +func (b *BaseWorkload[T]) SetInitContainers(containers []corev1.Container) { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return + } + template.Spec.InitContainers = containers +} + +func (b *BaseWorkload[T]) GetVolumes() []corev1.Volume { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + return template.Spec.Volumes +} + +func (b *BaseWorkload[T]) GetEnvFromSources() []corev1.EnvFromSource { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return nil + } + var sources []corev1.EnvFromSource + for _, container := range template.Spec.Containers { + sources = append(sources, container.EnvFrom...) + } + for _, container := range template.Spec.InitContainers { + sources = append(sources, container.EnvFrom...) + } + return sources +} + +func (b *BaseWorkload[T]) UsesConfigMap(name string) bool { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return false + } + return SpecUsesConfigMap(&template.Spec, name) +} + +func (b *BaseWorkload[T]) UsesSecret(name string) bool { + template := b.accessor.GetPodTemplateSpec() + if template == nil { + return false + } + return SpecUsesSecret(&template.Spec, name) +} + +func (b *BaseWorkload[T]) GetOwnerReferences() []metav1.OwnerReference { + return b.accessor.GetObjectMeta().OwnerReferences +} + +// Update performs a strategic merge patch update. +func (b *BaseWorkload[T]) Update(ctx context.Context, c client.Client) error { + return c.Patch(ctx, b.object, client.StrategicMergeFrom(b.original), client.FieldOwner(FieldManager)) +} + +// ResetOriginal resets the original state to the current object state. +func (b *BaseWorkload[T]) ResetOriginal() { + b.original = b.object.DeepCopyObject().(T) +} + +// UpdateStrategy returns the default patch strategy. +// Workloads with special update logic should override this. +func (b *BaseWorkload[T]) UpdateStrategy() UpdateStrategy { + return UpdateStrategyPatch +} + +// PerformSpecialUpdate returns false for standard workloads. +// Workloads with special update logic should override this. +func (b *BaseWorkload[T]) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + return false, nil +} + +// Object returns the underlying Kubernetes object. +func (b *BaseWorkload[T]) Object() T { + return b.object +} + +// Original returns the original state of the object. +func (b *BaseWorkload[T]) Original() T { + return b.original +} diff --git a/internal/pkg/workload/cronjob.go b/internal/pkg/workload/cronjob.go index 9f61b019b..222d4c61e 100644 --- a/internal/pkg/workload/cronjob.go +++ b/internal/pkg/workload/cronjob.go @@ -2,6 +2,7 @@ package workload import ( "context" + "maps" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -9,116 +10,89 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// CronJobWorkload wraps a Kubernetes CronJob. -// Note: CronJobs have a special update mechanism - instead of updating the CronJob itself, -// Reloader creates a new Job from the CronJob's template. -type CronJobWorkload struct { +// cronJobAccessor implements PodTemplateAccessor for CronJob. +type cronJobAccessor struct { cronjob *batchv1.CronJob } -// NewCronJobWorkload creates a new CronJobWorkload. -func NewCronJobWorkload(c *batchv1.CronJob) *CronJobWorkload { - return &CronJobWorkload{cronjob: c} -} - -// Ensure CronJobWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*CronJobWorkload)(nil) - -func (w *CronJobWorkload) Kind() Kind { - return KindCronJob -} - -func (w *CronJobWorkload) GetObject() client.Object { - return w.cronjob -} - -func (w *CronJobWorkload) GetName() string { - return w.cronjob.Name -} - -func (w *CronJobWorkload) GetNamespace() string { - return w.cronjob.Namespace +func (a *cronJobAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + // CronJob has the pod template nested under JobTemplate.Spec.Template + return &a.cronjob.Spec.JobTemplate.Spec.Template } -func (w *CronJobWorkload) GetAnnotations() map[string]string { - return w.cronjob.Annotations +func (a *cronJobAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.cronjob.ObjectMeta } -// GetPodTemplateAnnotations returns annotations from the JobTemplate's pod template. -func (w *CronJobWorkload) GetPodTemplateAnnotations() map[string]string { - if w.cronjob.Spec.JobTemplate.Spec.Template.Annotations == nil { - w.cronjob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) - } - return w.cronjob.Spec.JobTemplate.Spec.Template.Annotations +// CronJobWorkload wraps a Kubernetes CronJob. +// Note: CronJobs have a special update mechanism - instead of updating the CronJob itself, +// Reloader creates a new Job from the CronJob's template. +type CronJobWorkload struct { + *BaseWorkload[*batchv1.CronJob] } -func (w *CronJobWorkload) SetPodTemplateAnnotation(key, value string) { - if w.cronjob.Spec.JobTemplate.Spec.Template.Annotations == nil { - w.cronjob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) +// NewCronJobWorkload creates a new CronJobWorkload. +func NewCronJobWorkload(c *batchv1.CronJob) *CronJobWorkload { + original := c.DeepCopy() + accessor := &cronJobAccessor{cronjob: c} + return &CronJobWorkload{ + BaseWorkload: NewBaseWorkload(c, original, accessor, KindCronJob), } - w.cronjob.Spec.JobTemplate.Spec.Template.Annotations[key] = value -} - -func (w *CronJobWorkload) GetContainers() []corev1.Container { - return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers -} - -func (w *CronJobWorkload) SetContainers(containers []corev1.Container) { - w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers = containers } -func (w *CronJobWorkload) GetInitContainers() []corev1.Container { - return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers -} +// Ensure CronJobWorkload implements Workload. +var _ Workload = (*CronJobWorkload)(nil) -func (w *CronJobWorkload) SetInitContainers(containers []corev1.Container) { - w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers = containers -} - -func (w *CronJobWorkload) GetVolumes() []corev1.Volume { - return w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Volumes -} - -// Update for CronJob is a no-op - use CreateJobFromCronJob instead. +// Update for CronJob is a no-op - use PerformSpecialUpdate instead. // CronJobs trigger reloads by creating a new Job from their template. func (w *CronJobWorkload) Update(ctx context.Context, c client.Client) error { // CronJobs don't get updated directly - a new Job is created instead - // This is handled by the reload package's special CronJob logic + // This is handled by PerformSpecialUpdate return nil } -func (w *CronJobWorkload) DeepCopy() Workload { - return &CronJobWorkload{cronjob: w.cronjob.DeepCopy()} -} - // ResetOriginal is a no-op for CronJobs since they don't use strategic merge patch. // CronJobs create new Jobs instead of being patched. func (w *CronJobWorkload) ResetOriginal() {} -func (w *CronJobWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.cronjob.Spec.JobTemplate.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) +func (w *CronJobWorkload) UpdateStrategy() UpdateStrategy { + return UpdateStrategyCreateNew +} + +// PerformSpecialUpdate creates a new Job from the CronJob's template. +// This triggers an immediate execution of the CronJob with updated config. +func (w *CronJobWorkload) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + cronJob := w.Object() + + annotations := make(map[string]string) + annotations["cronjob.kubernetes.io/instantiate"] = "manual" + maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations) + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: cronJob.Name + "-", + Namespace: cronJob.Namespace, + Annotations: annotations, + Labels: cronJob.Spec.JobTemplate.Labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob")), + }, + }, + Spec: cronJob.Spec.JobTemplate.Spec, } - return sources -} -func (w *CronJobWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.cronjob.Spec.JobTemplate.Spec.Template.Spec, name) -} + if err := c.Create(ctx, job, client.FieldOwner(FieldManager)); err != nil { + return false, err + } -func (w *CronJobWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.cronjob.Spec.JobTemplate.Spec.Template.Spec, name) + return true, nil } -func (w *CronJobWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.cronjob.OwnerReferences +func (w *CronJobWorkload) DeepCopy() Workload { + return NewCronJobWorkload(w.Object().DeepCopy()) } // GetCronJob returns the underlying CronJob for special handling. func (w *CronJobWorkload) GetCronJob() *batchv1.CronJob { - return w.cronjob + return w.Object() } diff --git a/internal/pkg/workload/daemonset.go b/internal/pkg/workload/daemonset.go index c2294a4c3..ee6b121e7 100644 --- a/internal/pkg/workload/daemonset.go +++ b/internal/pkg/workload/daemonset.go @@ -1,119 +1,46 @@ package workload import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// DaemonSetWorkload wraps a Kubernetes DaemonSet. -type DaemonSetWorkload struct { +// daemonSetAccessor implements PodTemplateAccessor for DaemonSet. +type daemonSetAccessor struct { daemonset *appsv1.DaemonSet - original *appsv1.DaemonSet -} - -// NewDaemonSetWorkload creates a new DaemonSetWorkload. -func NewDaemonSetWorkload(d *appsv1.DaemonSet) *DaemonSetWorkload { - return &DaemonSetWorkload{ - daemonset: d, - original: d.DeepCopy(), - } -} - -// Ensure DaemonSetWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*DaemonSetWorkload)(nil) - -func (w *DaemonSetWorkload) Kind() Kind { - return KindDaemonSet } -func (w *DaemonSetWorkload) GetObject() client.Object { - return w.daemonset +func (a *daemonSetAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.daemonset.Spec.Template } -func (w *DaemonSetWorkload) GetName() string { - return w.daemonset.Name +func (a *daemonSetAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.daemonset.ObjectMeta } -func (w *DaemonSetWorkload) GetNamespace() string { - return w.daemonset.Namespace -} - -func (w *DaemonSetWorkload) GetAnnotations() map[string]string { - return w.daemonset.Annotations -} - -func (w *DaemonSetWorkload) GetPodTemplateAnnotations() map[string]string { - if w.daemonset.Spec.Template.Annotations == nil { - w.daemonset.Spec.Template.Annotations = make(map[string]string) - } - return w.daemonset.Spec.Template.Annotations -} - -func (w *DaemonSetWorkload) SetPodTemplateAnnotation(key, value string) { - if w.daemonset.Spec.Template.Annotations == nil { - w.daemonset.Spec.Template.Annotations = make(map[string]string) - } - w.daemonset.Spec.Template.Annotations[key] = value -} - -func (w *DaemonSetWorkload) GetContainers() []corev1.Container { - return w.daemonset.Spec.Template.Spec.Containers -} - -func (w *DaemonSetWorkload) SetContainers(containers []corev1.Container) { - w.daemonset.Spec.Template.Spec.Containers = containers -} - -func (w *DaemonSetWorkload) GetInitContainers() []corev1.Container { - return w.daemonset.Spec.Template.Spec.InitContainers -} - -func (w *DaemonSetWorkload) SetInitContainers(containers []corev1.Container) { - w.daemonset.Spec.Template.Spec.InitContainers = containers -} - -func (w *DaemonSetWorkload) GetVolumes() []corev1.Volume { - return w.daemonset.Spec.Template.Spec.Volumes -} - -func (w *DaemonSetWorkload) Update(ctx context.Context, c client.Client) error { - return c.Patch(ctx, w.daemonset, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) +// DaemonSetWorkload wraps a Kubernetes DaemonSet. +type DaemonSetWorkload struct { + *BaseWorkload[*appsv1.DaemonSet] } -func (w *DaemonSetWorkload) DeepCopy() Workload { +// NewDaemonSetWorkload creates a new DaemonSetWorkload. +func NewDaemonSetWorkload(d *appsv1.DaemonSet) *DaemonSetWorkload { + original := d.DeepCopy() + accessor := &daemonSetAccessor{daemonset: d} return &DaemonSetWorkload{ - daemonset: w.daemonset.DeepCopy(), - original: w.original.DeepCopy(), + BaseWorkload: NewBaseWorkload(d, original, accessor, KindDaemonSet), } } -func (w *DaemonSetWorkload) ResetOriginal() { - w.original = w.daemonset.DeepCopy() -} +// Ensure DaemonSetWorkload implements Workload. +var _ Workload = (*DaemonSetWorkload)(nil) -func (w *DaemonSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.daemonset.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.daemonset.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources -} - -func (w *DaemonSetWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.daemonset.Spec.Template.Spec, name) -} - -func (w *DaemonSetWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.daemonset.Spec.Template.Spec, name) +func (w *DaemonSetWorkload) DeepCopy() Workload { + return NewDaemonSetWorkload(w.Object().DeepCopy()) } -func (w *DaemonSetWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.daemonset.OwnerReferences +// GetDaemonSet returns the underlying DaemonSet for special handling. +func (w *DaemonSetWorkload) GetDaemonSet() *appsv1.DaemonSet { + return w.Object() } diff --git a/internal/pkg/workload/deployment.go b/internal/pkg/workload/deployment.go index 747e99452..ddb621cf3 100644 --- a/internal/pkg/workload/deployment.go +++ b/internal/pkg/workload/deployment.go @@ -1,124 +1,46 @@ package workload import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// DeploymentWorkload wraps a Kubernetes Deployment. -type DeploymentWorkload struct { +// deploymentAccessor implements PodTemplateAccessor for Deployment. +type deploymentAccessor struct { deployment *appsv1.Deployment - original *appsv1.Deployment -} - -// NewDeploymentWorkload creates a new DeploymentWorkload. -func NewDeploymentWorkload(d *appsv1.Deployment) *DeploymentWorkload { - return &DeploymentWorkload{ - deployment: d, - original: d.DeepCopy(), - } -} - -// Ensure DeploymentWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*DeploymentWorkload)(nil) - -func (w *DeploymentWorkload) Kind() Kind { - return KindDeployment } -func (w *DeploymentWorkload) GetObject() client.Object { - return w.deployment -} - -func (w *DeploymentWorkload) GetName() string { - return w.deployment.Name -} - -func (w *DeploymentWorkload) GetNamespace() string { - return w.deployment.Namespace -} - -func (w *DeploymentWorkload) GetAnnotations() map[string]string { - return w.deployment.Annotations -} - -func (w *DeploymentWorkload) GetPodTemplateAnnotations() map[string]string { - if w.deployment.Spec.Template.Annotations == nil { - w.deployment.Spec.Template.Annotations = make(map[string]string) - } - return w.deployment.Spec.Template.Annotations -} - -func (w *DeploymentWorkload) SetPodTemplateAnnotation(key, value string) { - if w.deployment.Spec.Template.Annotations == nil { - w.deployment.Spec.Template.Annotations = make(map[string]string) - } - w.deployment.Spec.Template.Annotations[key] = value +func (a *deploymentAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.deployment.Spec.Template } -func (w *DeploymentWorkload) GetContainers() []corev1.Container { - return w.deployment.Spec.Template.Spec.Containers +func (a *deploymentAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.deployment.ObjectMeta } -func (w *DeploymentWorkload) SetContainers(containers []corev1.Container) { - w.deployment.Spec.Template.Spec.Containers = containers -} - -func (w *DeploymentWorkload) GetInitContainers() []corev1.Container { - return w.deployment.Spec.Template.Spec.InitContainers -} - -func (w *DeploymentWorkload) SetInitContainers(containers []corev1.Container) { - w.deployment.Spec.Template.Spec.InitContainers = containers -} - -func (w *DeploymentWorkload) GetVolumes() []corev1.Volume { - return w.deployment.Spec.Template.Spec.Volumes -} - -func (w *DeploymentWorkload) Update(ctx context.Context, c client.Client) error { - return c.Patch(ctx, w.deployment, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) +// DeploymentWorkload wraps a Kubernetes Deployment. +type DeploymentWorkload struct { + *BaseWorkload[*appsv1.Deployment] } -func (w *DeploymentWorkload) DeepCopy() Workload { +// NewDeploymentWorkload creates a new DeploymentWorkload. +func NewDeploymentWorkload(d *appsv1.Deployment) *DeploymentWorkload { + original := d.DeepCopy() + accessor := &deploymentAccessor{deployment: d} return &DeploymentWorkload{ - deployment: w.deployment.DeepCopy(), - original: w.original.DeepCopy(), + BaseWorkload: NewBaseWorkload(d, original, accessor, KindDeployment), } } -func (w *DeploymentWorkload) ResetOriginal() { - w.original = w.deployment.DeepCopy() -} - -func (w *DeploymentWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.deployment.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.deployment.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources -} +// Ensure DeploymentWorkload implements Workload. +var _ Workload = (*DeploymentWorkload)(nil) -func (w *DeploymentWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.deployment.Spec.Template.Spec, name) -} - -func (w *DeploymentWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.deployment.Spec.Template.Spec, name) -} - -func (w *DeploymentWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.deployment.OwnerReferences +func (w *DeploymentWorkload) DeepCopy() Workload { + return NewDeploymentWorkload(w.Object().DeepCopy()) } // GetDeployment returns the underlying Deployment for special handling. func (w *DeploymentWorkload) GetDeployment() *appsv1.Deployment { - return w.deployment + return w.Object() } diff --git a/internal/pkg/workload/deploymentconfig.go b/internal/pkg/workload/deploymentconfig.go index 680a78b63..736a486ed 100644 --- a/internal/pkg/workload/deploymentconfig.go +++ b/internal/pkg/workload/deploymentconfig.go @@ -1,154 +1,77 @@ package workload import ( - "context" - openshiftv1 "github.com/openshift/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// DeploymentConfigWorkload wraps an OpenShift DeploymentConfig. -type DeploymentConfigWorkload struct { - dc *openshiftv1.DeploymentConfig - original *openshiftv1.DeploymentConfig +// deploymentConfigAccessor implements PodTemplateAccessor for DeploymentConfig. +type deploymentConfigAccessor struct { + dc *openshiftv1.DeploymentConfig } -// NewDeploymentConfigWorkload creates a new DeploymentConfigWorkload. -func NewDeploymentConfigWorkload(dc *openshiftv1.DeploymentConfig) *DeploymentConfigWorkload { - return &DeploymentConfigWorkload{ - dc: dc, - original: dc.DeepCopy(), - } +func (a *deploymentConfigAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + // DeploymentConfig has a pointer to PodTemplateSpec which may be nil + return a.dc.Spec.Template } -// Ensure DeploymentConfigWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*DeploymentConfigWorkload)(nil) - -func (w *DeploymentConfigWorkload) Kind() Kind { - return KindDeploymentConfig -} - -func (w *DeploymentConfigWorkload) GetObject() client.Object { - return w.dc +func (a *deploymentConfigAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.dc.ObjectMeta } -func (w *DeploymentConfigWorkload) GetName() string { - return w.dc.Name -} - -func (w *DeploymentConfigWorkload) GetNamespace() string { - return w.dc.Namespace -} - -func (w *DeploymentConfigWorkload) GetAnnotations() map[string]string { - return w.dc.Annotations +// DeploymentConfigWorkload wraps an OpenShift DeploymentConfig. +type DeploymentConfigWorkload struct { + *BaseWorkload[*openshiftv1.DeploymentConfig] } -func (w *DeploymentConfigWorkload) GetPodTemplateAnnotations() map[string]string { - if w.dc.Spec.Template == nil { - return nil - } - if w.dc.Spec.Template.Annotations == nil { - w.dc.Spec.Template.Annotations = make(map[string]string) +// NewDeploymentConfigWorkload creates a new DeploymentConfigWorkload. +func NewDeploymentConfigWorkload(dc *openshiftv1.DeploymentConfig) *DeploymentConfigWorkload { + original := dc.DeepCopy() + accessor := &deploymentConfigAccessor{dc: dc} + return &DeploymentConfigWorkload{ + BaseWorkload: NewBaseWorkload(dc, original, accessor, KindDeploymentConfig), } - return w.dc.Spec.Template.Annotations } +// Ensure DeploymentConfigWorkload implements Workload. +var _ Workload = (*DeploymentConfigWorkload)(nil) + +// SetPodTemplateAnnotation overrides the base to ensure Template is initialized. func (w *DeploymentConfigWorkload) SetPodTemplateAnnotation(key, value string) { - if w.dc.Spec.Template == nil { - w.dc.Spec.Template = &corev1.PodTemplateSpec{} - } - if w.dc.Spec.Template.Annotations == nil { - w.dc.Spec.Template.Annotations = make(map[string]string) + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} } - w.dc.Spec.Template.Annotations[key] = value -} - -func (w *DeploymentConfigWorkload) GetContainers() []corev1.Container { - if w.dc.Spec.Template == nil { - return nil + if dc.Spec.Template.Annotations == nil { + dc.Spec.Template.Annotations = make(map[string]string) } - return w.dc.Spec.Template.Spec.Containers + dc.Spec.Template.Annotations[key] = value } +// SetContainers overrides the base to ensure Template is initialized. func (w *DeploymentConfigWorkload) SetContainers(containers []corev1.Container) { - if w.dc.Spec.Template == nil { - w.dc.Spec.Template = &corev1.PodTemplateSpec{} + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} } - w.dc.Spec.Template.Spec.Containers = containers -} - -func (w *DeploymentConfigWorkload) GetInitContainers() []corev1.Container { - if w.dc.Spec.Template == nil { - return nil - } - return w.dc.Spec.Template.Spec.InitContainers + dc.Spec.Template.Spec.Containers = containers } +// SetInitContainers overrides the base to ensure Template is initialized. func (w *DeploymentConfigWorkload) SetInitContainers(containers []corev1.Container) { - if w.dc.Spec.Template == nil { - w.dc.Spec.Template = &corev1.PodTemplateSpec{} - } - w.dc.Spec.Template.Spec.InitContainers = containers -} - -func (w *DeploymentConfigWorkload) GetVolumes() []corev1.Volume { - if w.dc.Spec.Template == nil { - return nil + dc := w.Object() + if dc.Spec.Template == nil { + dc.Spec.Template = &corev1.PodTemplateSpec{} } - return w.dc.Spec.Template.Spec.Volumes -} - -func (w *DeploymentConfigWorkload) Update(ctx context.Context, c client.Client) error { - return c.Patch(ctx, w.dc, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) + dc.Spec.Template.Spec.InitContainers = containers } func (w *DeploymentConfigWorkload) DeepCopy() Workload { - return &DeploymentConfigWorkload{ - dc: w.dc.DeepCopy(), - original: w.original.DeepCopy(), - } -} - -func (w *DeploymentConfigWorkload) ResetOriginal() { - w.original = w.dc.DeepCopy() -} - -func (w *DeploymentConfigWorkload) GetEnvFromSources() []corev1.EnvFromSource { - if w.dc.Spec.Template == nil { - return nil - } - var sources []corev1.EnvFromSource - for _, container := range w.dc.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.dc.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources -} - -func (w *DeploymentConfigWorkload) UsesConfigMap(name string) bool { - if w.dc.Spec.Template == nil { - return false - } - return SpecUsesConfigMap(&w.dc.Spec.Template.Spec, name) -} - -func (w *DeploymentConfigWorkload) UsesSecret(name string) bool { - if w.dc.Spec.Template == nil { - return false - } - return SpecUsesSecret(&w.dc.Spec.Template.Spec, name) -} - -func (w *DeploymentConfigWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.dc.OwnerReferences + return NewDeploymentConfigWorkload(w.Object().DeepCopy()) } // GetDeploymentConfig returns the underlying DeploymentConfig for special handling. func (w *DeploymentConfigWorkload) GetDeploymentConfig() *openshiftv1.DeploymentConfig { - return w.dc + return w.Object() } diff --git a/internal/pkg/workload/interface.go b/internal/pkg/workload/interface.go index 40249edb4..6b4ccf7d4 100644 --- a/internal/pkg/workload/interface.go +++ b/internal/pkg/workload/interface.go @@ -31,9 +31,20 @@ const ( KindDeploymentConfig Kind = "DeploymentConfig" ) -// Workload provides a uniform interface for managing Kubernetes workloads. -// All implementations must be safe for concurrent use. -type Workload interface { +// UpdateStrategy defines how a workload should be updated. +type UpdateStrategy int + +const ( + // UpdateStrategyPatch uses strategic merge patch (default for most workloads). + UpdateStrategyPatch UpdateStrategy = iota + // UpdateStrategyRecreate deletes and recreates the workload (Jobs). + UpdateStrategyRecreate + // UpdateStrategyCreateNew creates a new resource from template (CronJobs). + UpdateStrategyCreateNew +) + +// WorkloadIdentity provides basic identification for a workload. +type WorkloadIdentity interface { // Kind returns the workload type. Kind() Kind @@ -45,6 +56,11 @@ type Workload interface { // GetNamespace returns the workload namespace. GetNamespace() string +} + +// WorkloadReader provides read-only access to workload state. +type WorkloadReader interface { + WorkloadIdentity // GetAnnotations returns the workload's annotations. GetAnnotations() map[string]string @@ -52,79 +68,74 @@ type Workload interface { // GetPodTemplateAnnotations returns annotations from the pod template spec. GetPodTemplateAnnotations() map[string]string - // SetPodTemplateAnnotation sets an annotation on the pod template. - SetPodTemplateAnnotation(key, value string) - // GetContainers returns all containers (including init containers). GetContainers() []corev1.Container - // SetContainers updates the containers. - SetContainers(containers []corev1.Container) - // GetInitContainers returns all init containers. GetInitContainers() []corev1.Container - // SetInitContainers updates the init containers. - SetInitContainers(containers []corev1.Container) - // GetVolumes returns the pod template volumes. GetVolumes() []corev1.Volume - // Update persists changes to the workload. - Update(ctx context.Context, c client.Client) error - - // ResetOriginal resets the original state to the current object state. - // This should be called after re-fetching the object (e.g., after a conflict) - // to ensure strategic merge patch diffs are calculated correctly. - ResetOriginal() + // GetEnvFromSources returns all envFrom sources from all containers. + GetEnvFromSources() []corev1.EnvFromSource - // DeepCopy returns a deep copy of the workload. - DeepCopy() Workload + // GetOwnerReferences returns the owner references of the workload. + GetOwnerReferences() []metav1.OwnerReference } -// Accessor provides read-only access to workload configuration. -// Use this interface when you only need to inspect workload state. -type Accessor interface { - // Kind returns the workload type. - Kind() Kind - - // GetName returns the workload name. - GetName() string - - // GetNamespace returns the workload namespace. - GetNamespace() string +// WorkloadMatcher provides methods for checking resource usage. +type WorkloadMatcher interface { + // UsesConfigMap checks if the workload uses a specific ConfigMap. + UsesConfigMap(name string) bool - // GetAnnotations returns the workload's annotations. - GetAnnotations() map[string]string + // UsesSecret checks if the workload uses a specific Secret. + UsesSecret(name string) bool +} - // GetPodTemplateAnnotations returns annotations from the pod template spec. - GetPodTemplateAnnotations() map[string]string +// WorkloadMutator provides methods for modifying workload state. +type WorkloadMutator interface { + // SetPodTemplateAnnotation sets an annotation on the pod template. + SetPodTemplateAnnotation(key, value string) - // GetContainers returns all containers (including init containers). - GetContainers() []corev1.Container + // SetContainers updates the containers. + SetContainers(containers []corev1.Container) - // GetInitContainers returns all init containers. - GetInitContainers() []corev1.Container + // SetInitContainers updates the init containers. + SetInitContainers(containers []corev1.Container) +} - // GetVolumes returns the pod template volumes. - GetVolumes() []corev1.Volume +// WorkloadUpdater provides methods for persisting workload changes. +type WorkloadUpdater interface { + // Update persists changes to the workload. + Update(ctx context.Context, c client.Client) error - // GetEnvFromSources returns all envFrom sources from all containers. - GetEnvFromSources() []corev1.EnvFromSource + // UpdateStrategy returns how this workload should be updated. + // Most workloads use UpdateStrategyPatch (strategic merge patch). + // Jobs use UpdateStrategyRecreate (delete and recreate). + // CronJobs use UpdateStrategyCreateNew (create a new Job from template). + UpdateStrategy() UpdateStrategy - // UsesConfigMap checks if the workload uses a specific ConfigMap. - UsesConfigMap(name string) bool + // PerformSpecialUpdate handles non-standard update logic. + // This is called when UpdateStrategy() != UpdateStrategyPatch. + // For UpdateStrategyPatch workloads, this returns (false, nil). + PerformSpecialUpdate(ctx context.Context, c client.Client) (updated bool, err error) - // UsesSecret checks if the workload uses a specific Secret. - UsesSecret(name string) bool + // ResetOriginal resets the original state to the current object state. + // This should be called after re-fetching the object (e.g., after a conflict) + // to ensure strategic merge patch diffs are calculated correctly. + ResetOriginal() - // GetOwnerReferences returns the owner references of the workload. - GetOwnerReferences() []metav1.OwnerReference + // DeepCopy returns a deep copy of the workload. + DeepCopy() Workload } -// WorkloadAccessor provides both Workload and Accessor interfaces. -// This is the primary type returned by the registry. -type WorkloadAccessor interface { - Workload - Accessor +// Workload combines all workload interfaces for full workload access. +// Use specific interfaces (WorkloadReader, WorkloadMatcher, etc.) when possible +// to limit scope and improve testability. +type Workload interface { + WorkloadReader + WorkloadMatcher + WorkloadMutator + WorkloadUpdater } diff --git a/internal/pkg/workload/job.go b/internal/pkg/workload/job.go index 291249f89..557c8f6c4 100644 --- a/internal/pkg/workload/job.go +++ b/internal/pkg/workload/job.go @@ -5,119 +5,103 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) -// JobWorkload wraps a Kubernetes Job. -// Note: Jobs have a special update mechanism - instead of updating the Job, -// Reloader deletes and recreates it with the same spec. -type JobWorkload struct { +// jobAccessor implements PodTemplateAccessor for Job. +type jobAccessor struct { job *batchv1.Job } -// NewJobWorkload creates a new JobWorkload. -func NewJobWorkload(j *batchv1.Job) *JobWorkload { - return &JobWorkload{job: j} -} - -// Ensure JobWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*JobWorkload)(nil) - -func (w *JobWorkload) Kind() Kind { - return KindJob -} - -func (w *JobWorkload) GetObject() client.Object { - return w.job +func (a *jobAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.job.Spec.Template } -func (w *JobWorkload) GetName() string { - return w.job.Name +func (a *jobAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.job.ObjectMeta } -func (w *JobWorkload) GetNamespace() string { - return w.job.Namespace -} - -func (w *JobWorkload) GetAnnotations() map[string]string { - return w.job.Annotations -} - -func (w *JobWorkload) GetPodTemplateAnnotations() map[string]string { - if w.job.Spec.Template.Annotations == nil { - w.job.Spec.Template.Annotations = make(map[string]string) - } - return w.job.Spec.Template.Annotations +// JobWorkload wraps a Kubernetes Job. +// Note: Jobs have a special update mechanism - instead of updating the Job, +// Reloader deletes and recreates it with the same spec. +type JobWorkload struct { + *BaseWorkload[*batchv1.Job] } -func (w *JobWorkload) SetPodTemplateAnnotation(key, value string) { - if w.job.Spec.Template.Annotations == nil { - w.job.Spec.Template.Annotations = make(map[string]string) +// NewJobWorkload creates a new JobWorkload. +func NewJobWorkload(j *batchv1.Job) *JobWorkload { + original := j.DeepCopy() + accessor := &jobAccessor{job: j} + return &JobWorkload{ + BaseWorkload: NewBaseWorkload(j, original, accessor, KindJob), } - w.job.Spec.Template.Annotations[key] = value } -func (w *JobWorkload) GetContainers() []corev1.Container { - return w.job.Spec.Template.Spec.Containers -} +// Ensure JobWorkload implements Workload. +var _ Workload = (*JobWorkload)(nil) -func (w *JobWorkload) SetContainers(containers []corev1.Container) { - w.job.Spec.Template.Spec.Containers = containers -} - -func (w *JobWorkload) GetInitContainers() []corev1.Container { - return w.job.Spec.Template.Spec.InitContainers -} - -func (w *JobWorkload) SetInitContainers(containers []corev1.Container) { - w.job.Spec.Template.Spec.InitContainers = containers -} - -func (w *JobWorkload) GetVolumes() []corev1.Volume { - return w.job.Spec.Template.Spec.Volumes -} - -// Update for Job is a no-op - use RecreateJob instead. +// Update for Job is a no-op - use PerformSpecialUpdate instead. // Jobs trigger reloads by being deleted and recreated. func (w *JobWorkload) Update(ctx context.Context, c client.Client) error { // Jobs don't get updated directly - they are deleted and recreated - // This is handled by the reload package's special Job logic + // This is handled by PerformSpecialUpdate return nil } -func (w *JobWorkload) DeepCopy() Workload { - return &JobWorkload{job: w.job.DeepCopy()} -} - // ResetOriginal is a no-op for Jobs since they don't use strategic merge patch. // Jobs are deleted and recreated instead of being patched. func (w *JobWorkload) ResetOriginal() {} -func (w *JobWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.job.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.job.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources +func (w *JobWorkload) UpdateStrategy() UpdateStrategy { + return UpdateStrategyRecreate } -func (w *JobWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.job.Spec.Template.Spec, name) -} +// PerformSpecialUpdate deletes the Job and recreates it with the updated spec. +// This is necessary because Jobs are immutable after creation. +func (w *JobWorkload) PerformSpecialUpdate(ctx context.Context, c client.Client) (bool, error) { + oldJob := w.Object() + newJob := oldJob.DeepCopy() + + // Delete the old job with background propagation + policy := metav1.DeletePropagationBackground + if err := c.Delete(ctx, oldJob, &client.DeleteOptions{ + PropagationPolicy: &policy, + }); err != nil { + if !errors.IsNotFound(err) { + return false, err + } + } + + // Clear fields that should not be specified when creating a new Job + newJob.ResourceVersion = "" + newJob.UID = "" + newJob.CreationTimestamp = metav1.Time{} + newJob.Status = batchv1.JobStatus{} -func (w *JobWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.job.Spec.Template.Spec, name) + // Remove problematic labels that are auto-generated + delete(newJob.Spec.Template.Labels, "controller-uid") + delete(newJob.Spec.Template.Labels, batchv1.ControllerUidLabel) + delete(newJob.Spec.Template.Labels, batchv1.JobNameLabel) + delete(newJob.Spec.Template.Labels, "job-name") + + // Remove the selector to allow it to be auto-generated + newJob.Spec.Selector = nil + + // Create the new job with same spec + if err := c.Create(ctx, newJob, client.FieldOwner(FieldManager)); err != nil { + return false, err + } + + return true, nil } -func (w *JobWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.job.OwnerReferences +func (w *JobWorkload) DeepCopy() Workload { + return NewJobWorkload(w.Object().DeepCopy()) } // GetJob returns the underlying Job for special handling. func (w *JobWorkload) GetJob() *batchv1.Job { - return w.job + return w.Object() } diff --git a/internal/pkg/workload/lister.go b/internal/pkg/workload/lister.go index 07cde6155..1b982fead 100644 --- a/internal/pkg/workload/lister.go +++ b/internal/pkg/workload/lister.go @@ -3,7 +3,6 @@ package workload import ( "context" - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" openshiftv1 "github.com/openshift/api/apps/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -32,8 +31,8 @@ func NewLister(c client.Client, registry *Registry, checker IgnoreChecker) *List } // List returns all workloads in the given namespace. -func (l *Lister) List(ctx context.Context, namespace string) ([]WorkloadAccessor, error) { - var result []WorkloadAccessor +func (l *Lister) List(ctx context.Context, namespace string) ([]Workload, error) { + var result []Workload for _, kind := range l.Registry.SupportedKinds() { if l.Checker != nil && l.Checker.IsWorkloadIgnored(string(kind)) { @@ -50,7 +49,7 @@ func (l *Lister) List(ctx context.Context, namespace string) ([]WorkloadAccessor return result, nil } -func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([]WorkloadAccessor, error) { +func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([]Workload, error) { lister := l.Registry.ListerFor(kind) if lister == nil { return nil, nil @@ -58,84 +57,72 @@ func (l *Lister) listByKind(ctx context.Context, namespace string, kind Kind) ([ return lister(ctx, l.Client, namespace) } -func listDeployments(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listDeployments(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list appsv1.DeploymentList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewDeploymentWorkload(&list.Items[i]) } return result, nil } -func listDaemonSets(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listDaemonSets(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list appsv1.DaemonSetList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewDaemonSetWorkload(&list.Items[i]) } return result, nil } -func listStatefulSets(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listStatefulSets(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list appsv1.StatefulSetList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewStatefulSetWorkload(&list.Items[i]) } return result, nil } -func listJobs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listJobs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list batchv1.JobList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewJobWorkload(&list.Items[i]) } return result, nil } -func listCronJobs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listCronJobs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list batchv1.CronJobList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewCronJobWorkload(&list.Items[i]) } return result, nil } -func listRollouts(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { - var list argorolloutv1alpha1.RolloutList - if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { - return nil, err - } - result := make([]WorkloadAccessor, len(list.Items)) - for i := range list.Items { - result[i] = NewRolloutWorkload(&list.Items[i]) - } - return result, nil -} - -func listDeploymentConfigs(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) { +func listDeploymentConfigs(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { var list openshiftv1.DeploymentConfigList if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { return nil, err } - result := make([]WorkloadAccessor, len(list.Items)) + result := make([]Workload, len(list.Items)) for i := range list.Items { result[i] = NewDeploymentConfigWorkload(&list.Items[i]) } diff --git a/internal/pkg/workload/registry.go b/internal/pkg/workload/registry.go index 3516338d0..5392eca1b 100644 --- a/internal/pkg/workload/registry.go +++ b/internal/pkg/workload/registry.go @@ -13,26 +13,29 @@ import ( ) // WorkloadLister is a function that lists workloads of a specific kind. -type WorkloadLister func(ctx context.Context, c client.Client, namespace string) ([]WorkloadAccessor, error) +type WorkloadLister func(ctx context.Context, c client.Client, namespace string) ([]Workload, error) // RegistryOptions configures the workload registry. type RegistryOptions struct { - ArgoRolloutsEnabled bool - DeploymentConfigEnabled bool + ArgoRolloutsEnabled bool + DeploymentConfigEnabled bool + RolloutStrategyAnnotation string } // Registry provides factory methods for creating Workload instances. type Registry struct { - argoRolloutsEnabled bool - deploymentConfigEnabled bool - listers map[Kind]WorkloadLister + argoRolloutsEnabled bool + deploymentConfigEnabled bool + rolloutStrategyAnnotation string + listers map[Kind]WorkloadLister } // NewRegistry creates a new workload registry. func NewRegistry(opts RegistryOptions) *Registry { r := &Registry{ - argoRolloutsEnabled: opts.ArgoRolloutsEnabled, - deploymentConfigEnabled: opts.DeploymentConfigEnabled, + argoRolloutsEnabled: opts.ArgoRolloutsEnabled, + deploymentConfigEnabled: opts.DeploymentConfigEnabled, + rolloutStrategyAnnotation: opts.RolloutStrategyAnnotation, listers: map[Kind]WorkloadLister{ KindDeployment: listDeployments, KindDaemonSet: listDaemonSets, @@ -42,7 +45,19 @@ func NewRegistry(opts RegistryOptions) *Registry { }, } if opts.ArgoRolloutsEnabled { - r.listers[KindArgoRollout] = listRollouts + // Use closure to capture the strategy annotation + strategyAnnotation := opts.RolloutStrategyAnnotation + r.listers[KindArgoRollout] = func(ctx context.Context, c client.Client, namespace string) ([]Workload, error) { + var list argorolloutv1alpha1.RolloutList + if err := c.List(ctx, &list, client.InNamespace(namespace)); err != nil { + return nil, err + } + result := make([]Workload, len(list.Items)) + for i := range list.Items { + result[i] = NewRolloutWorkload(&list.Items[i], strategyAnnotation) + } + return result, nil + } } if opts.DeploymentConfigEnabled { r.listers[KindDeploymentConfig] = listDeploymentConfigs @@ -73,8 +88,8 @@ func (r *Registry) SupportedKinds() []Kind { return kinds } -// FromObject creates a WorkloadAccessor from a Kubernetes object. -func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { +// FromObject creates a Workload from a Kubernetes object. +func (r *Registry) FromObject(obj client.Object) (Workload, error) { switch o := obj.(type) { case *appsv1.Deployment: return NewDeploymentWorkload(o), nil @@ -90,7 +105,7 @@ func (r *Registry) FromObject(obj client.Object) (WorkloadAccessor, error) { if !r.argoRolloutsEnabled { return nil, fmt.Errorf("argo Rollouts support is not enabled") } - return NewRolloutWorkload(o), nil + return NewRolloutWorkload(o, r.rolloutStrategyAnnotation), nil case *openshiftv1.DeploymentConfig: if !r.deploymentConfigEnabled { return nil, fmt.Errorf("openShift DeploymentConfig support is not enabled") diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go index 8e78d3e9e..39d70fc73 100644 --- a/internal/pkg/workload/rollout.go +++ b/internal/pkg/workload/rollout.go @@ -22,79 +22,39 @@ const ( RolloutStrategyRestart RolloutStrategy = "restart" ) -// RolloutStrategyAnnotation is the annotation key for specifying the rollout strategy. -const RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" - -// RolloutWorkload wraps an Argo Rollout. -type RolloutWorkload struct { - rollout *argorolloutv1alpha1.Rollout - original *argorolloutv1alpha1.Rollout -} - -// NewRolloutWorkload creates a new RolloutWorkload. -func NewRolloutWorkload(r *argorolloutv1alpha1.Rollout) *RolloutWorkload { - return &RolloutWorkload{ - rollout: r, - original: r.DeepCopy(), - } -} - -// Ensure RolloutWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*RolloutWorkload)(nil) - -func (w *RolloutWorkload) Kind() Kind { - return KindArgoRollout +// rolloutAccessor implements PodTemplateAccessor for Rollout. +type rolloutAccessor struct { + rollout *argorolloutv1alpha1.Rollout } -func (w *RolloutWorkload) GetObject() client.Object { - return w.rollout +func (a *rolloutAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.rollout.Spec.Template } -func (w *RolloutWorkload) GetName() string { - return w.rollout.Name +func (a *rolloutAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.rollout.ObjectMeta } -func (w *RolloutWorkload) GetNamespace() string { - return w.rollout.Namespace -} - -func (w *RolloutWorkload) GetAnnotations() map[string]string { - return w.rollout.Annotations -} - -func (w *RolloutWorkload) GetPodTemplateAnnotations() map[string]string { - if w.rollout.Spec.Template.Annotations == nil { - w.rollout.Spec.Template.Annotations = make(map[string]string) - } - return w.rollout.Spec.Template.Annotations +// RolloutWorkload wraps an Argo Rollout. +type RolloutWorkload struct { + *BaseWorkload[*argorolloutv1alpha1.Rollout] + strategyAnnotation string } -func (w *RolloutWorkload) SetPodTemplateAnnotation(key, value string) { - if w.rollout.Spec.Template.Annotations == nil { - w.rollout.Spec.Template.Annotations = make(map[string]string) +// NewRolloutWorkload creates a new RolloutWorkload. +// The strategyAnnotation parameter specifies the annotation key used to determine +// the rollout strategy (from config.Annotations.RolloutStrategy). +func NewRolloutWorkload(r *argorolloutv1alpha1.Rollout, strategyAnnotation string) *RolloutWorkload { + original := r.DeepCopy() + accessor := &rolloutAccessor{rollout: r} + return &RolloutWorkload{ + BaseWorkload: NewBaseWorkload(r, original, accessor, KindArgoRollout), + strategyAnnotation: strategyAnnotation, } - w.rollout.Spec.Template.Annotations[key] = value } -func (w *RolloutWorkload) GetContainers() []corev1.Container { - return w.rollout.Spec.Template.Spec.Containers -} - -func (w *RolloutWorkload) SetContainers(containers []corev1.Container) { - w.rollout.Spec.Template.Spec.Containers = containers -} - -func (w *RolloutWorkload) GetInitContainers() []corev1.Container { - return w.rollout.Spec.Template.Spec.InitContainers -} - -func (w *RolloutWorkload) SetInitContainers(containers []corev1.Container) { - w.rollout.Spec.Template.Spec.InitContainers = containers -} - -func (w *RolloutWorkload) GetVolumes() []corev1.Volume { - return w.rollout.Spec.Template.Spec.Volumes -} +// Ensure RolloutWorkload implements Workload. +var _ Workload = (*RolloutWorkload)(nil) // Update updates the Rollout. It uses the rollout strategy annotation to determine // whether to do a standard rollout or set the restartAt field. @@ -104,18 +64,18 @@ func (w *RolloutWorkload) Update(ctx context.Context, c client.Client) error { case RolloutStrategyRestart: // Set restartAt field to trigger a restart restartAt := metav1.NewTime(time.Now()) - w.rollout.Spec.RestartAt = &restartAt + w.Object().Spec.RestartAt = &restartAt } - return c.Patch(ctx, w.rollout, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) + return c.Patch(ctx, w.Object(), client.StrategicMergeFrom(w.Original()), client.FieldOwner(FieldManager)) } // getStrategy returns the rollout strategy from the annotation. func (w *RolloutWorkload) getStrategy() RolloutStrategy { - annotations := w.rollout.GetAnnotations() + annotations := w.Object().GetAnnotations() if annotations == nil { return RolloutStrategyRollout } - strategy := annotations[RolloutStrategyAnnotation] + strategy := annotations[w.strategyAnnotation] switch RolloutStrategy(strategy) { case RolloutStrategyRestart: return RolloutStrategyRestart @@ -125,42 +85,12 @@ func (w *RolloutWorkload) getStrategy() RolloutStrategy { } func (w *RolloutWorkload) DeepCopy() Workload { - return &RolloutWorkload{ - rollout: w.rollout.DeepCopy(), - original: w.original.DeepCopy(), - } -} - -func (w *RolloutWorkload) ResetOriginal() { - w.original = w.rollout.DeepCopy() -} - -func (w *RolloutWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.rollout.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.rollout.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources -} - -func (w *RolloutWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.rollout.Spec.Template.Spec, name) -} - -func (w *RolloutWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.rollout.Spec.Template.Spec, name) -} - -func (w *RolloutWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.rollout.OwnerReferences + return NewRolloutWorkload(w.Object().DeepCopy(), w.strategyAnnotation) } // GetRollout returns the underlying Rollout for special handling. func (w *RolloutWorkload) GetRollout() *argorolloutv1alpha1.Rollout { - return w.rollout + return w.Object() } // GetStrategy returns the configured rollout strategy. diff --git a/internal/pkg/workload/statefulset.go b/internal/pkg/workload/statefulset.go index ebec4a007..8e9d1e48c 100644 --- a/internal/pkg/workload/statefulset.go +++ b/internal/pkg/workload/statefulset.go @@ -1,119 +1,46 @@ package workload import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// StatefulSetWorkload wraps a Kubernetes StatefulSet. -type StatefulSetWorkload struct { +// statefulSetAccessor implements PodTemplateAccessor for StatefulSet. +type statefulSetAccessor struct { statefulset *appsv1.StatefulSet - original *appsv1.StatefulSet -} - -// NewStatefulSetWorkload creates a new StatefulSetWorkload. -func NewStatefulSetWorkload(s *appsv1.StatefulSet) *StatefulSetWorkload { - return &StatefulSetWorkload{ - statefulset: s, - original: s.DeepCopy(), - } -} - -// Ensure StatefulSetWorkload implements WorkloadAccessor. -var _ WorkloadAccessor = (*StatefulSetWorkload)(nil) - -func (w *StatefulSetWorkload) Kind() Kind { - return KindStatefulSet } -func (w *StatefulSetWorkload) GetObject() client.Object { - return w.statefulset +func (a *statefulSetAccessor) GetPodTemplateSpec() *corev1.PodTemplateSpec { + return &a.statefulset.Spec.Template } -func (w *StatefulSetWorkload) GetName() string { - return w.statefulset.Name +func (a *statefulSetAccessor) GetObjectMeta() *metav1.ObjectMeta { + return &a.statefulset.ObjectMeta } -func (w *StatefulSetWorkload) GetNamespace() string { - return w.statefulset.Namespace -} - -func (w *StatefulSetWorkload) GetAnnotations() map[string]string { - return w.statefulset.Annotations -} - -func (w *StatefulSetWorkload) GetPodTemplateAnnotations() map[string]string { - if w.statefulset.Spec.Template.Annotations == nil { - w.statefulset.Spec.Template.Annotations = make(map[string]string) - } - return w.statefulset.Spec.Template.Annotations -} - -func (w *StatefulSetWorkload) SetPodTemplateAnnotation(key, value string) { - if w.statefulset.Spec.Template.Annotations == nil { - w.statefulset.Spec.Template.Annotations = make(map[string]string) - } - w.statefulset.Spec.Template.Annotations[key] = value -} - -func (w *StatefulSetWorkload) GetContainers() []corev1.Container { - return w.statefulset.Spec.Template.Spec.Containers -} - -func (w *StatefulSetWorkload) SetContainers(containers []corev1.Container) { - w.statefulset.Spec.Template.Spec.Containers = containers -} - -func (w *StatefulSetWorkload) GetInitContainers() []corev1.Container { - return w.statefulset.Spec.Template.Spec.InitContainers -} - -func (w *StatefulSetWorkload) SetInitContainers(containers []corev1.Container) { - w.statefulset.Spec.Template.Spec.InitContainers = containers -} - -func (w *StatefulSetWorkload) GetVolumes() []corev1.Volume { - return w.statefulset.Spec.Template.Spec.Volumes -} - -func (w *StatefulSetWorkload) Update(ctx context.Context, c client.Client) error { - return c.Patch(ctx, w.statefulset, client.StrategicMergeFrom(w.original), client.FieldOwner(FieldManager)) +// StatefulSetWorkload wraps a Kubernetes StatefulSet. +type StatefulSetWorkload struct { + *BaseWorkload[*appsv1.StatefulSet] } -func (w *StatefulSetWorkload) DeepCopy() Workload { +// NewStatefulSetWorkload creates a new StatefulSetWorkload. +func NewStatefulSetWorkload(s *appsv1.StatefulSet) *StatefulSetWorkload { + original := s.DeepCopy() + accessor := &statefulSetAccessor{statefulset: s} return &StatefulSetWorkload{ - statefulset: w.statefulset.DeepCopy(), - original: w.original.DeepCopy(), + BaseWorkload: NewBaseWorkload(s, original, accessor, KindStatefulSet), } } -func (w *StatefulSetWorkload) ResetOriginal() { - w.original = w.statefulset.DeepCopy() -} +// Ensure StatefulSetWorkload implements Workload. +var _ Workload = (*StatefulSetWorkload)(nil) -func (w *StatefulSetWorkload) GetEnvFromSources() []corev1.EnvFromSource { - var sources []corev1.EnvFromSource - for _, container := range w.statefulset.Spec.Template.Spec.Containers { - sources = append(sources, container.EnvFrom...) - } - for _, container := range w.statefulset.Spec.Template.Spec.InitContainers { - sources = append(sources, container.EnvFrom...) - } - return sources -} - -func (w *StatefulSetWorkload) UsesConfigMap(name string) bool { - return SpecUsesConfigMap(&w.statefulset.Spec.Template.Spec, name) -} - -func (w *StatefulSetWorkload) UsesSecret(name string) bool { - return SpecUsesSecret(&w.statefulset.Spec.Template.Spec, name) +func (w *StatefulSetWorkload) DeepCopy() Workload { + return NewStatefulSetWorkload(w.Object().DeepCopy()) } -func (w *StatefulSetWorkload) GetOwnerReferences() []metav1.OwnerReference { - return w.statefulset.OwnerReferences +// GetStatefulSet returns the underlying StatefulSet for special handling. +func (w *StatefulSetWorkload) GetStatefulSet() *appsv1.StatefulSet { + return w.Object() } diff --git a/internal/pkg/workload/workload_test.go b/internal/pkg/workload/workload_test.go index 91139d85a..084eb1e57 100644 --- a/internal/pkg/workload/workload_test.go +++ b/internal/pkg/workload/workload_test.go @@ -10,6 +10,9 @@ import ( "github.com/stakater/Reloader/internal/pkg/testutil" ) +// testRolloutStrategyAnnotation is the annotation key used in tests for rollout strategy. +const testRolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy" + // addEnvVar adds an environment variable with a ConfigMapKeyRef or SecretKeyRef to a container. func addEnvVarConfigMapRef(containers []corev1.Container, envName, configMapName, key string) { if len(containers) > 0 { @@ -763,10 +766,10 @@ func TestStatefulSetWorkload_GetOwnerReferences(t *testing.T) { // Test that workloads implement the interface func TestWorkloadInterface(t *testing.T) { - var _ WorkloadAccessor = (*DeploymentWorkload)(nil) - var _ WorkloadAccessor = (*DaemonSetWorkload)(nil) - var _ WorkloadAccessor = (*StatefulSetWorkload)(nil) - var _ WorkloadAccessor = (*RolloutWorkload)(nil) + var _ Workload = (*DeploymentWorkload)(nil) + var _ Workload = (*DaemonSetWorkload)(nil) + var _ Workload = (*StatefulSetWorkload)(nil) + var _ Workload = (*RolloutWorkload)(nil) } // RolloutWorkload tests @@ -781,7 +784,7 @@ func TestRolloutWorkload_BasicGetters(t *testing.T) { }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) if w.Kind() != KindArgoRollout { t.Errorf("Kind() = %v, want %v", w.Kind(), KindArgoRollout) @@ -814,7 +817,7 @@ func TestRolloutWorkload_PodTemplateAnnotations(t *testing.T) { }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) // Test get annotations := w.GetPodTemplateAnnotations() @@ -834,7 +837,7 @@ func TestRolloutWorkload_GetStrategy_Default(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test"}, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) if w.GetStrategy() != RolloutStrategyRollout { t.Errorf("GetStrategy() = %v, want %v (default)", w.GetStrategy(), RolloutStrategyRollout) @@ -846,12 +849,12 @@ func TestRolloutWorkload_GetStrategy_Restart(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Annotations: map[string]string{ - RolloutStrategyAnnotation: "restart", + testRolloutStrategyAnnotation: "restart", }, }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) if w.GetStrategy() != RolloutStrategyRestart { t.Errorf("GetStrategy() = %v, want %v", w.GetStrategy(), RolloutStrategyRestart) @@ -881,7 +884,7 @@ func TestRolloutWorkload_UsesConfigMap_Volume(t *testing.T) { }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) if !w.UsesConfigMap("rollout-config") { t.Error("Rollout UsesConfigMap should return true for ConfigMap volume") @@ -916,7 +919,7 @@ func TestRolloutWorkload_UsesSecret_EnvFrom(t *testing.T) { }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) if !w.UsesSecret("rollout-secret") { t.Error("Rollout UsesSecret should return true for Secret envFrom") @@ -940,7 +943,7 @@ func TestRolloutWorkload_DeepCopy(t *testing.T) { }, } - w := NewRolloutWorkload(rollout) + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) copy := w.DeepCopy() // Verify copy is independent @@ -1176,8 +1179,8 @@ func TestCronJobWorkload_DeepCopy(t *testing.T) { // Test that Job and CronJob implement the interface func TestJobCronJobWorkloadInterface(t *testing.T) { - var _ WorkloadAccessor = (*JobWorkload)(nil) - var _ WorkloadAccessor = (*CronJobWorkload)(nil) + var _ Workload = (*JobWorkload)(nil) + var _ Workload = (*CronJobWorkload)(nil) } // DeploymentConfig tests @@ -1538,5 +1541,228 @@ func TestDeploymentConfigWorkload_GetDeploymentConfig(t *testing.T) { // Test that DeploymentConfig implements the interface func TestDeploymentConfigWorkloadInterface(t *testing.T) { - var _ WorkloadAccessor = (*DeploymentConfigWorkload)(nil) + var _ Workload = (*DeploymentConfigWorkload)(nil) +} + +// Tests for UpdateStrategy +func TestWorkload_UpdateStrategy(t *testing.T) { + tests := []struct { + name string + workload Workload + expected UpdateStrategy + }{ + { + name: "Deployment uses Patch strategy", + workload: NewDeploymentWorkload(testutil.NewDeployment("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "DaemonSet uses Patch strategy", + workload: NewDaemonSetWorkload(testutil.NewDaemonSet("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "StatefulSet uses Patch strategy", + workload: NewStatefulSetWorkload(testutil.NewStatefulSet("test", "default", nil)), + expected: UpdateStrategyPatch, + }, + { + name: "Job uses Recreate strategy", + workload: NewJobWorkload(testutil.NewJob("test", "default")), + expected: UpdateStrategyRecreate, + }, + { + name: "CronJob uses CreateNew strategy", + workload: NewCronJobWorkload(testutil.NewCronJob("test", "default")), + expected: UpdateStrategyCreateNew, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.workload.UpdateStrategy(); got != tt.expected { + t.Errorf("UpdateStrategy() = %v, want %v", got, tt.expected) + } + }) + } +} + +// Tests for ResetOriginal +func TestDeploymentWorkload_ResetOriginal(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + // Modify the workload + w.SetPodTemplateAnnotation("modified", "true") + + // Original should still not have the annotation + originalAnnotations := w.Original().Spec.Template.Annotations + if originalAnnotations != nil && originalAnnotations["modified"] == "true" { + t.Error("Original should not be modified yet") + } + + // Reset original + w.ResetOriginal() + + // Now original should have the annotation + if w.Original().Spec.Template.Annotations["modified"] != "true" { + t.Error("ResetOriginal should update original to match current state") + } +} + +func TestJobWorkload_ResetOriginal(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + // ResetOriginal should be a no-op for Jobs (they don't use strategic merge patch) + w.SetPodTemplateAnnotation("modified", "true") + w.ResetOriginal() // Should not panic or error +} + +func TestCronJobWorkload_ResetOriginal(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + // ResetOriginal should be a no-op for CronJobs + w.SetPodTemplateAnnotation("modified", "true") + w.ResetOriginal() // Should not panic or error +} + +// Tests for BaseWorkload.Original() +func TestDeploymentWorkload_Original(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + deploy.Spec.Template.Annotations = map[string]string{"initial": "value"} + + w := NewDeploymentWorkload(deploy) + + // Modify the current object + w.SetPodTemplateAnnotation("new", "annotation") + + // Original should still have only the initial annotation + original := w.Original() + if original.Spec.Template.Annotations["new"] == "annotation" { + t.Error("Original should not reflect changes to current object") + } + if original.Spec.Template.Annotations["initial"] != "value" { + t.Error("Original should retain initial state") + } +} + +// Tests for PerformSpecialUpdate returning false for standard workloads +func TestDeploymentWorkload_PerformSpecialUpdate(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for Deployment") + } +} + +func TestDaemonSetWorkload_PerformSpecialUpdate(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + w := NewDaemonSetWorkload(ds) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for DaemonSet") + } +} + +func TestStatefulSetWorkload_PerformSpecialUpdate(t *testing.T) { + ss := testutil.NewStatefulSet("test", "default", nil) + w := NewStatefulSetWorkload(ss) + + updated, err := w.PerformSpecialUpdate(t.Context(), nil) + if err != nil { + t.Errorf("PerformSpecialUpdate() error = %v", err) + } + if updated { + t.Error("PerformSpecialUpdate() should return false for StatefulSet") + } +} + +// Test Update returns nil for Job (no-op, uses PerformSpecialUpdate instead) +func TestJobWorkload_Update(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + err := w.Update(t.Context(), nil) + if err != nil { + t.Errorf("Update() should return nil for Job, got %v", err) + } +} + +// Test Update returns nil for CronJob (no-op, uses PerformSpecialUpdate instead) +func TestCronJobWorkload_Update(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + err := w.Update(t.Context(), nil) + if err != nil { + t.Errorf("Update() should return nil for CronJob, got %v", err) + } +} + +// Test GetJob and GetCronJob accessors +func TestJobWorkload_GetJob(t *testing.T) { + job := testutil.NewJob("test", "default") + w := NewJobWorkload(job) + + if w.GetJob() != job { + t.Error("GetJob should return the underlying Job") + } +} + +func TestCronJobWorkload_GetCronJob(t *testing.T) { + cj := testutil.NewCronJob("test", "default") + w := NewCronJobWorkload(cj) + + if w.GetCronJob() != cj { + t.Error("GetCronJob should return the underlying CronJob") + } +} + +func TestDeploymentWorkload_GetDeployment(t *testing.T) { + deploy := testutil.NewDeployment("test", "default", nil) + w := NewDeploymentWorkload(deploy) + + if w.GetDeployment() != deploy { + t.Error("GetDeployment should return the underlying Deployment") + } +} + +func TestDaemonSetWorkload_GetDaemonSet(t *testing.T) { + ds := testutil.NewDaemonSet("test", "default", nil) + w := NewDaemonSetWorkload(ds) + + if w.GetDaemonSet() != ds { + t.Error("GetDaemonSet should return the underlying DaemonSet") + } +} + +func TestStatefulSetWorkload_GetStatefulSet(t *testing.T) { + ss := testutil.NewStatefulSet("test", "default", nil) + w := NewStatefulSetWorkload(ss) + + if w.GetStatefulSet() != ss { + t.Error("GetStatefulSet should return the underlying StatefulSet") + } +} + +func TestRolloutWorkload_GetRollout(t *testing.T) { + rollout := &argorolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + w := NewRolloutWorkload(rollout, testRolloutStrategyAnnotation) + + if w.GetRollout() != rollout { + t.Error("GetRollout should return the underlying Rollout") + } } From bd030fe795525ae9c11add1808cc9700be84101e Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Wed, 7 Jan 2026 17:32:44 +0100 Subject: [PATCH 34/35] fix: A bunch of issues surfaced up via e2e tests and remove old e2e ones --- .gitignore | 3 +- cmd/reloader/main.go | 17 +- internal/pkg/config/config.go | 2 +- internal/pkg/config/config_test.go | 4 +- internal/pkg/config/flags.go | 52 +- internal/pkg/config/flags_test.go | 4 +- internal/pkg/config/validation.go | 8 +- internal/pkg/config/validation_test.go | 3 +- .../pkg/controller/configmap_reconciler.go | 22 +- internal/pkg/controller/manager.go | 50 +- .../pkg/controller/resource_reconciler.go | 60 +- internal/pkg/controller/secret_reconciler.go | 22 +- internal/pkg/controller/test_helpers_test.go | 14 +- internal/pkg/workload/rollout.go | 2 +- test/e2e/annotations/e2e_test.go | 1118 ----------------- test/e2e/envvars/e2e_test.go | 474 ------- 16 files changed, 171 insertions(+), 1684 deletions(-) delete mode 100644 test/e2e/annotations/e2e_test.go delete mode 100644 test/e2e/envvars/e2e_test.go diff --git a/.gitignore b/.gitignore index defc67d2d..5beaa6281 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,5 @@ styles/ site/ /mkdocs.yml yq -bin \ No newline at end of file +bin +*.test diff --git a/cmd/reloader/main.go b/cmd/reloader/main.go index b603df654..83b78c4dc 100644 --- a/cmd/reloader/main.go +++ b/cmd/reloader/main.go @@ -80,8 +80,10 @@ func run(cmd *cobra.Command, args []string) error { log.Info("Starting Reloader") - if ns := os.Getenv("KUBERNETES_NAMESPACE"); ns == "" { - log.Info("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces") + if cfg.WatchedNamespace != "" { + log.Info("watching single namespace", "namespace", cfg.WatchedNamespace) + } else { + log.Info("watching all namespaces") } if len(cfg.NamespaceSelectors) > 0 { @@ -133,9 +135,14 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("setting up reconcilers: %w", err) } - if err := mgr.Add(metadata.Runnable(mgr.GetClient(), cfg, log)); err != nil { - log.Error(err, "Failed to add metadata publisher") - // Non-fatal, continue starting + // Skip metadata publisher when ConfigMaps are ignored (no RBAC permissions) + if !cfg.IsResourceIgnored("configmaps") { + if err := mgr.Add(metadata.Runnable(mgr.GetClient(), cfg, log)); err != nil { + log.Error(err, "Failed to add metadata publisher") + // Non-fatal, continue starting + } + } else { + log.Info("skipping metadata publisher (configmaps ignored)") } if cfg.EnablePProf { diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index ead11abcd..c33b78adc 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -121,7 +121,7 @@ func NewDefault() *Config { LogFormat: "", LogLevel: "info", MetricsAddr: ":9090", - HealthAddr: ":8081", + HealthAddr: ":8080", EnablePProf: false, PProfAddr: ":6060", Alerting: AlertingConfig{}, diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 5ec7f7584..f117ad609 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -48,8 +48,8 @@ func TestNewDefault(t *testing.T) { t.Errorf("MetricsAddr = %q, want %q", cfg.MetricsAddr, ":9090") } - if cfg.HealthAddr != ":8081" { - t.Errorf("HealthAddr = %q, want %q", cfg.HealthAddr, ":8081") + if cfg.HealthAddr != ":8080" { + t.Errorf("HealthAddr = %q, want %q", cfg.HealthAddr, ":8080") } if cfg.PProfAddr != ":6060" { diff --git a/internal/pkg/config/flags.go b/internal/pkg/config/flags.go index 9c2556ddb..195b84efa 100644 --- a/internal/pkg/config/flags.go +++ b/internal/pkg/config/flags.go @@ -1,11 +1,13 @@ package config import ( + "fmt" "strings" "time" "github.com/spf13/pflag" "github.com/spf13/viper" + "k8s.io/apimachinery/pkg/labels" ) // v is the viper instance for configuration. @@ -112,13 +114,13 @@ func BindFlags(fs *pflag.FlagSet, cfg *Config) { ) // Filtering - selectors - fs.String( - "namespace-selector", "", - "Comma-separated list of namespace label selectors", + fs.StringSlice( + "namespace-selector", nil, + "Namespace label selectors (can be specified multiple times)", ) - fs.String( - "resource-label-selector", "", - "Comma-separated list of resource label selectors", + fs.StringSlice( + "resource-label-selector", nil, + "Resource label selectors (can be specified multiple times)", ) // Logging @@ -262,6 +264,9 @@ func ApplyFlags(cfg *Config) error { cfg.HealthAddr = v.GetString("health-addr") cfg.PProfAddr = v.GetString("pprof-addr") cfg.WatchedNamespace = v.GetString("watch-namespace") + if cfg.WatchedNamespace == "" { + cfg.WatchedNamespace = v.GetString("KUBERNETES_NAMESPACE") + } // Leader election cfg.LeaderElection.LockName = v.GetString("leader-election-id") @@ -300,19 +305,32 @@ func ApplyFlags(cfg *Config) error { cfg.IgnoredWorkloads = splitAndTrim(v.GetString("ignored-workload-types")) cfg.IgnoredNamespaces = splitAndTrim(v.GetString("namespaces-to-ignore")) - // Store raw selector strings - cfg.NamespaceSelectorStrings = splitAndTrim(v.GetString("namespace-selector")) - cfg.ResourceSelectorStrings = splitAndTrim(v.GetString("resource-label-selector")) + // Get selector slices and join with comma + nsSelectors := v.GetStringSlice("namespace-selector") + resSelectors := v.GetStringSlice("resource-label-selector") + + if len(nsSelectors) > 0 { + cfg.NamespaceSelectorStrings = nsSelectors + } + if len(resSelectors) > 0 { + cfg.ResourceSelectorStrings = resSelectors + } - // Parse selectors into labels.Selector - var err error - cfg.NamespaceSelectors, err = ParseSelectors(cfg.NamespaceSelectorStrings) - if err != nil { - return err + if len(nsSelectors) > 0 { + joinedNS := strings.Join(nsSelectors, ",") + selector, err := labels.Parse(joinedNS) + if err != nil { + return fmt.Errorf("invalid selector %q: %w", joinedNS, err) + } + cfg.NamespaceSelectors = []labels.Selector{selector} } - cfg.ResourceSelectors, err = ParseSelectors(cfg.ResourceSelectorStrings) - if err != nil { - return err + if len(resSelectors) > 0 { + joinedRes := strings.Join(resSelectors, ",") + selector, err := labels.Parse(joinedRes) + if err != nil { + return fmt.Errorf("invalid selector %q: %w", joinedRes, err) + } + cfg.ResourceSelectors = []labels.Selector{selector} } // Ensure duration defaults are preserved if not set diff --git a/internal/pkg/config/flags_test.go b/internal/pkg/config/flags_test.go index 0bdb86083..6a5b3fb26 100644 --- a/internal/pkg/config/flags_test.go +++ b/internal/pkg/config/flags_test.go @@ -249,8 +249,8 @@ func TestApplyFlags_Selectors(t *testing.T) { t.Fatalf("ApplyFlags() error = %v", err) } - if len(cfg.NamespaceSelectors) != 2 { - t.Errorf("NamespaceSelectors length = %d, want 2", len(cfg.NamespaceSelectors)) + if len(cfg.NamespaceSelectors) != 1 { + t.Errorf("NamespaceSelectors length = %d, want 1", len(cfg.NamespaceSelectors)) } if len(cfg.ResourceSelectors) != 1 { diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 7d559fc6d..3b89d2924 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -98,17 +98,23 @@ func (c *Config) Validate() error { c.IgnoredResources = normalizeToLower(c.IgnoredResources) + // Normalize ignored workloads to canonical Kind values (e.g., "cronjobs" -> "CronJob") c.IgnoredWorkloads = normalizeToLower(c.IgnoredWorkloads) + normalizedWorkloads := make([]string, 0, len(c.IgnoredWorkloads)) for _, w := range c.IgnoredWorkloads { - if _, err := workload.KindFromString(w); err != nil { + kind, err := workload.KindFromString(w) + if err != nil { errs = append( errs, ValidationError{ Field: "IgnoredWorkloads", Message: fmt.Sprintf("unknown workload type %q", w), }, ) + } else { + normalizedWorkloads = append(normalizedWorkloads, string(kind)) } } + c.IgnoredWorkloads = normalizedWorkloads if len(errs) > 0 { return errs diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go index 45eafb732..ae495276a 100644 --- a/internal/pkg/config/validation_test.go +++ b/internal/pkg/config/validation_test.go @@ -166,7 +166,8 @@ func TestConfig_Validate_NormalizesIgnoredWorkloads(t *testing.T) { t.Fatalf("Validate() error = %v", err) } - expected := []string{"jobs", "cronjobs"} + // Should be normalized to canonical Kind values (e.g., "CronJob" not "cronjobs") + expected := []string{"Job", "CronJob"} if len(cfg.IgnoredWorkloads) != len(expected) { t.Fatalf("IgnoredWorkloads length = %d, want %d", len(cfg.IgnoredWorkloads), len(expected)) } diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index bfa40622b..8aa19d5e9 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -31,19 +31,21 @@ func NewConfigMapReconciler( webhookClient *webhook.Client, alerter alerting.Alerter, pauseHandler *reload.PauseHandler, + nsCache *NamespaceCache, ) *ConfigMapReconciler { return NewResourceReconciler( ResourceReconcilerDeps{ - Client: c, - Log: log, - Config: cfg, - ReloadService: reloadService, - Registry: registry, - Collectors: collectors, - EventRecorder: eventRecorder, - WebhookClient: webhookClient, - Alerter: alerter, - PauseHandler: pauseHandler, + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + NamespaceCache: nsCache, }, ResourceConfig[*corev1.ConfigMap]{ ResourceType: reload.ResourceTypeConfigMap, diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index 6994c8811..785da3dd8 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -19,6 +19,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) @@ -73,6 +74,15 @@ func NewManager(opts ManagerOptions) (ctrl.Manager, error) { RetryPeriod: &le.RetryPeriod, } + if cfg.WatchedNamespace != "" { + mgrOpts.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfg.WatchedNamespace: {}, + }, + } + opts.Log.Info("namespace filtering enabled", "namespace", cfg.WatchedNamespace) + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOpts) if err != nil { return nil, fmt.Errorf("creating manager: %w", err) @@ -115,6 +125,14 @@ func NewManagerWithRestConfig(opts ManagerOptions, restConfig *rest.Config) (ctr RetryPeriod: &le.RetryPeriod, } + if cfg.WatchedNamespace != "" { + mgrOpts.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfg.WatchedNamespace: {}, + }, + } + } + mgr, err := ctrl.NewManager(restConfig, mgrOpts) if err != nil { return nil, fmt.Errorf("creating manager: %w", err) @@ -149,6 +167,22 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col log.Info("webhook mode enabled", "url", cfg.WebhookURL) } + // Create namespace cache if namespace selectors are configured. + // This cache is shared between the namespace reconciler and resource reconcilers. + var nsCache *NamespaceCache + if len(cfg.NamespaceSelectors) > 0 { + nsCache = NewNamespaceCache(true) + if err := (&NamespaceReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("namespace-reconciler"), + Config: cfg, + Cache: nsCache, + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("setting up namespace reconciler: %w", err) + } + log.Info("namespace reconciler enabled for label selector filtering") + } + // Setup ConfigMap reconciler if !cfg.IsResourceIgnored("configmaps") { cmReconciler := NewConfigMapReconciler( @@ -162,6 +196,7 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col webhookClient, alerter, pauseHandler, + nsCache, ) if err := SetupConfigMapReconciler(mgr, cmReconciler); err != nil { return fmt.Errorf("setting up configmap reconciler: %w", err) @@ -181,26 +216,13 @@ func SetupReconcilers(mgr ctrl.Manager, cfg *config.Config, log logr.Logger, col webhookClient, alerter, pauseHandler, + nsCache, ) if err := SetupSecretReconciler(mgr, secretReconciler); err != nil { return fmt.Errorf("setting up secret reconciler: %w", err) } } - // Setup Namespace reconciler if namespace selectors are configured - if len(cfg.NamespaceSelectors) > 0 { - nsCache := NewNamespaceCache(true) - if err := (&NamespaceReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("namespace-reconciler"), - Config: cfg, - Cache: nsCache, - }).SetupWithManager(mgr); err != nil { - return fmt.Errorf("setting up namespace reconciler: %w", err) - } - log.Info("namespace reconciler enabled for label selector filtering") - } - // Setup Deployment reconciler for pause handling if err := (&DeploymentReconciler{ Client: mgr.GetClient(), diff --git a/internal/pkg/controller/resource_reconciler.go b/internal/pkg/controller/resource_reconciler.go index 0bd694dc2..6dd42c1f2 100644 --- a/internal/pkg/controller/resource_reconciler.go +++ b/internal/pkg/controller/resource_reconciler.go @@ -21,16 +21,17 @@ import ( // ResourceReconcilerDeps holds shared dependencies for resource reconcilers. type ResourceReconcilerDeps struct { - Client client.Client - Log logr.Logger - Config *config.Config - ReloadService *reload.Service - Registry *workload.Registry - Collectors *metrics.Collectors - EventRecorder *events.Recorder - WebhookClient *webhook.Client - Alerter alerting.Alerter - PauseHandler *reload.PauseHandler + Client client.Client + Log logr.Logger + Config *config.Config + ReloadService *reload.Service + Registry *workload.Registry + Collectors *metrics.Collectors + EventRecorder *events.Recorder + WebhookClient *webhook.Client + Alerter alerting.Alerter + PauseHandler *reload.PauseHandler + NamespaceCache *NamespaceCache } // ResourceConfig provides type-specific configuration for a resource reconciler. @@ -75,10 +76,12 @@ func (r *ResourceReconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) resourceType := string(r.ResourceType) log := r.Log.WithValues(resourceType, req.NamespacedName) - r.initOnce.Do(func() { - r.initialized = true - log.Info(resourceType + " controller initialized") - }) + r.initOnce.Do( + func() { + r.initialized = true + log.Info(resourceType + " controller initialized") + }, + ) r.Collectors.RecordEventReceived("reconcile", resourceType) @@ -101,10 +104,19 @@ func (r *ResourceReconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - result, err := r.reloadHandler().Process(ctx, req.Namespace, req.Name, r.ResourceType, + if r.NamespaceCache != nil && r.NamespaceCache.IsEnabled() && !r.NamespaceCache.Contains(namespace) { + log.V(1).Info("skipping "+resourceType+" in namespace not matching selector", "namespace", namespace) + r.Collectors.RecordSkipped("namespace_selector") + r.Collectors.RecordReconcile("success", time.Since(startTime)) + return ctrl.Result{}, nil + } + + result, err := r.reloadHandler().Process( + ctx, req.Namespace, req.Name, r.ResourceType, func(workloads []workload.Workload) []reload.ReloadDecision { return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeUpdate), workloads) - }, log) + }, log, + ) r.recordReconcile(startTime, err) return result, err @@ -139,10 +151,12 @@ func (r *ResourceReconciler[T]) handleDelete( resource.SetName(req.Name) resource.SetNamespace(req.Namespace) - return r.reloadHandler().Process(ctx, req.Namespace, req.Name, r.ResourceType, + return r.reloadHandler().Process( + ctx, req.Namespace, req.Name, r.ResourceType, func(workloads []workload.Workload) []reload.ReloadDecision { return r.ReloadService.Process(r.CreateChange(resource, reload.EventTypeDelete), workloads) - }, log) + }, log, + ) } func (r *ResourceReconciler[T]) recordReconcile(startTime time.Time, err error) { @@ -178,9 +192,11 @@ func (r *ResourceReconciler[T]) Initialized() *bool { func (r *ResourceReconciler[T]) SetupWithManager(mgr ctrl.Manager, forObject T) error { return ctrl.NewControllerManagedBy(mgr). For(forObject). - WithEventFilter(BuildEventFilter( - r.CreatePredicates(r.Config, r.ReloadService.Hasher()), - r.Config, r.Initialized(), - )). + WithEventFilter( + BuildEventFilter( + r.CreatePredicates(r.Config, r.ReloadService.Hasher()), + r.Config, r.Initialized(), + ), + ). Complete(r) } diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index f20f25a4a..ddc8f328b 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -31,19 +31,21 @@ func NewSecretReconciler( webhookClient *webhook.Client, alerter alerting.Alerter, pauseHandler *reload.PauseHandler, + nsCache *NamespaceCache, ) *SecretReconciler { return NewResourceReconciler( ResourceReconcilerDeps{ - Client: c, - Log: log, - Config: cfg, - ReloadService: reloadService, - Registry: registry, - Collectors: collectors, - EventRecorder: eventRecorder, - WebhookClient: webhookClient, - Alerter: alerter, - PauseHandler: pauseHandler, + Client: c, + Log: log, + Config: cfg, + ReloadService: reloadService, + Registry: registry, + Collectors: collectors, + EventRecorder: eventRecorder, + WebhookClient: webhookClient, + Alerter: alerter, + PauseHandler: pauseHandler, + NamespaceCache: nsCache, }, ResourceConfig[*corev1.Secret]{ ResourceType: reload.ResourceTypeSecret, diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index 019be7898..c34f34329 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -47,11 +47,13 @@ func newTestDeps(t *testing.T, cfg *config.Config, objects ...runtime.Object) te log: log, cfg: cfg, reloadService: reload.NewService(cfg, log), - registry: workload.NewRegistry(workload.RegistryOptions{ - ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, - DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, - RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, - }), + registry: workload.NewRegistry( + workload.RegistryOptions{ + ArgoRolloutsEnabled: cfg.ArgoRolloutsEnabled, + DeploymentConfigEnabled: cfg.DeploymentConfigEnabled, + RolloutStrategyAnnotation: cfg.Annotations.RolloutStrategy, + }, + ), collectors: &collectors, eventRecorder: events.NewRecorder(nil), webhookClient: webhook.NewClient("", log), @@ -74,6 +76,7 @@ func newConfigMapReconciler(t *testing.T, cfg *config.Config, objects ...runtime deps.webhookClient, deps.alerter, nil, + nil, ) } @@ -92,6 +95,7 @@ func newSecretReconciler(t *testing.T, cfg *config.Config, objects ...runtime.Ob deps.webhookClient, deps.alerter, nil, + nil, ) } diff --git a/internal/pkg/workload/rollout.go b/internal/pkg/workload/rollout.go index 39d70fc73..ad8d8989f 100644 --- a/internal/pkg/workload/rollout.go +++ b/internal/pkg/workload/rollout.go @@ -66,7 +66,7 @@ func (w *RolloutWorkload) Update(ctx context.Context, c client.Client) error { restartAt := metav1.NewTime(time.Now()) w.Object().Spec.RestartAt = &restartAt } - return c.Patch(ctx, w.Object(), client.StrategicMergeFrom(w.Original()), client.FieldOwner(FieldManager)) + return c.Patch(ctx, w.Object(), client.MergeFrom(w.Original()), client.FieldOwner(FieldManager)) } // getStrategy returns the rollout strategy from the annotation. diff --git a/test/e2e/annotations/e2e_test.go b/test/e2e/annotations/e2e_test.go deleted file mode 100644 index cdb88b6c2..000000000 --- a/test/e2e/annotations/e2e_test.go +++ /dev/null @@ -1,1118 +0,0 @@ -// Package annotations contains end-to-end tests for Reloader's Annotations Reload Strategy. -package annotations - -import ( - "context" - "flag" - "log" - "os" - "testing" - "time" - - "github.com/go-logr/zerologr" - openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/rs/zerolog" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/openshift" - "github.com/stakater/Reloader/internal/pkg/testutil" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - ctrl "sigs.k8s.io/controller-runtime" - ctrllog "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -const ( - testNamespacePrefix = "test-reloader-e2e-" - waitTimeout = 30 * time.Second - setupDelay = 2 * time.Second - negativeTestTimeout = 5 * time.Second -) - -var ( - k8sClient kubernetes.Interface - osClient openshiftclient.Interface - cfg *config.Config - namespace string - skipE2ETests bool - skipDeploymentConfigTests bool - cancelManager context.CancelFunc - restCfg *rest.Config -) - -// testFixture provides a clean way to set up and tear down test resources. -type testFixture struct { - t *testing.T - name string - configMaps []string - secrets []string - workloads []workloadInfo -} - -type workloadInfo struct { - name string - kind string // "deployment", "daemonset", "statefulset", "cronjob" -} - -// newFixture creates a new test fixture with a unique name prefix. -func newFixture(t *testing.T, prefix string) *testFixture { - t.Helper() - skipIfNoCluster(t) - return &testFixture{ - t: t, - name: prefix + "-" + testutil.RandSeq(5), - } -} - -// createConfigMap creates a ConfigMap and registers it for cleanup. -func (f *testFixture) createConfigMap(name, data string) { - f.t.Helper() - _, err := testutil.CreateConfigMap(k8sClient, namespace, name, data) - if err != nil { - f.t.Fatalf("Failed to create ConfigMap %s: %v", name, err) - } - f.configMaps = append(f.configMaps, name) -} - -// createSecret creates a Secret and registers it for cleanup. -func (f *testFixture) createSecret(name, data string) { - f.t.Helper() - _, err := testutil.CreateSecret(k8sClient, namespace, name, data) - if err != nil { - f.t.Fatalf("Failed to create Secret %s: %v", name, err) - } - f.secrets = append(f.secrets, name) -} - -// createDeployment creates a Deployment and registers it for cleanup. -func (f *testFixture) createDeployment(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateDeployment(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create Deployment %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deployment"}) -} - -// createDaemonSet creates a DaemonSet and registers it for cleanup. -func (f *testFixture) createDaemonSet(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateDaemonSet(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create DaemonSet %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "daemonset"}) -} - -// createStatefulSet creates a StatefulSet and registers it for cleanup. -func (f *testFixture) createStatefulSet(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateStatefulSet(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create StatefulSet %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "statefulset"}) -} - -// waitForReady waits for all workloads to be ready. -func (f *testFixture) waitForReady() { - time.Sleep(setupDelay) -} - -// updateConfigMap updates a ConfigMap's data. -func (f *testFixture) updateConfigMap(name, data string) { - f.t.Helper() - if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, "", data); err != nil { - f.t.Fatalf("Failed to update ConfigMap %s: %v", name, err) - } -} - -// updateConfigMapLabel updates only a ConfigMap's label (not data). -func (f *testFixture) updateConfigMapLabel(name, label string) { - f.t.Helper() - // Get current data first - cm, err := k8sClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - f.t.Fatalf("Failed to get ConfigMap %s: %v", name, err) - } - data := cm.Data["url"] - if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, label, data); err != nil { - f.t.Fatalf("Failed to update ConfigMap label %s: %v", name, err) - } -} - -// updateSecret updates a Secret's data. -func (f *testFixture) updateSecret(name, data string) { - f.t.Helper() - if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, "", data); err != nil { - f.t.Fatalf("Failed to update Secret %s: %v", name, err) - } -} - -// updateSecretLabel updates only a Secret's label (not data). -func (f *testFixture) updateSecretLabel(name, label string) { - f.t.Helper() - secret, err := k8sClient.CoreV1().Secrets(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - f.t.Fatalf("Failed to get Secret %s: %v", name, err) - } - var data string - if secret.Data != nil { - if d, ok := secret.Data["test"]; ok { - data = string(d) - } - } - if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, label, data); err != nil { - f.t.Fatalf("Failed to update Secret label %s: %v", name, err) - } -} - -// assertDeploymentReloaded asserts that a deployment was reloaded. -func (f *testFixture) assertDeploymentReloaded(name string, testCfg *config.Config) { - f.t.Helper() - if testCfg == nil { - testCfg = cfg - } - updated, err := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg.Annotations.LastReloadedFrom, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for deployment %s update: %v", name, err) - } - if !updated { - f.t.Errorf("Deployment %s was not updated after resource change", name) - } -} - -// assertDeploymentNotReloaded asserts that a deployment was NOT reloaded. -func (f *testFixture) assertDeploymentNotReloaded(name string, testCfg *config.Config) { - f.t.Helper() - if testCfg == nil { - testCfg = cfg - } - time.Sleep(negativeTestTimeout) - updated, _ := testutil.WaitForDeploymentReloadedAnnotation(k8sClient, namespace, name, testCfg.Annotations.LastReloadedFrom, negativeTestTimeout) - if updated { - f.t.Errorf("Deployment %s should not have been updated", name) - } -} - -// assertDaemonSetReloaded asserts that a daemonset was reloaded. -func (f *testFixture) assertDaemonSetReloaded(name string) { - f.t.Helper() - updated, err := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for daemonset %s update: %v", name, err) - } - if !updated { - f.t.Errorf("DaemonSet %s was not updated after resource change", name) - } -} - -// assertDaemonSetNotReloaded asserts that a daemonset was NOT reloaded. -func (f *testFixture) assertDaemonSetNotReloaded(name string) { - f.t.Helper() - time.Sleep(negativeTestTimeout) - updated, _ := testutil.WaitForDaemonSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, negativeTestTimeout) - if updated { - f.t.Errorf("DaemonSet %s should not have been updated", name) - } -} - -// assertStatefulSetReloaded asserts that a statefulset was reloaded. -func (f *testFixture) assertStatefulSetReloaded(name string) { - f.t.Helper() - updated, err := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for statefulset %s update: %v", name, err) - } - if !updated { - f.t.Errorf("StatefulSet %s was not updated after resource change", name) - } -} - -// assertStatefulSetNotReloaded asserts that a statefulset was NOT reloaded. -func (f *testFixture) assertStatefulSetNotReloaded(name string) { - f.t.Helper() - time.Sleep(negativeTestTimeout) - updated, _ := testutil.WaitForStatefulSetReloadedAnnotation(k8sClient, namespace, name, cfg.Annotations.LastReloadedFrom, negativeTestTimeout) - if updated { - f.t.Errorf("StatefulSet %s should not have been updated", name) - } -} - -// createDeploymentConfig creates a DeploymentConfig and registers it for cleanup. -func (f *testFixture) createDeploymentConfig(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateDeploymentConfig(osClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create DeploymentConfig %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deploymentconfig"}) -} - -// assertDeploymentConfigReloaded asserts that a DeploymentConfig was reloaded. -func (f *testFixture) assertDeploymentConfigReloaded(name string) { - f.t.Helper() - updated, err := testutil.WaitForDeploymentConfigReloadedAnnotation(osClient, namespace, name, cfg.Annotations.LastReloadedFrom, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for DeploymentConfig %s update: %v", name, err) - } - if !updated { - f.t.Errorf("DeploymentConfig %s was not updated after resource change", name) - } -} - -// assertDeploymentPaused asserts that a deployment is paused (spec.Paused=true). -func (f *testFixture) assertDeploymentPaused(name string) { - f.t.Helper() - paused, err := testutil.WaitForDeploymentPaused(k8sClient, namespace, name, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for deployment %s to be paused: %v", name, err) - } - if !paused { - f.t.Errorf("Deployment %s was not paused after reload", name) - } -} - -// assertDeploymentUnpaused asserts that a deployment is unpaused (spec.Paused=false). -func (f *testFixture) assertDeploymentUnpaused(name string, timeout time.Duration) { - f.t.Helper() - unpaused, err := testutil.WaitForDeploymentUnpaused(k8sClient, namespace, name, timeout) - if err != nil { - f.t.Fatalf("Error waiting for deployment %s to be unpaused: %v", name, err) - } - if !unpaused { - f.t.Errorf("Deployment %s was not unpaused after pause period", name) - } -} - -// assertDeploymentHasPausedAtAnnotation asserts that a deployment has the paused-at annotation. -func (f *testFixture) assertDeploymentHasPausedAtAnnotation(name string) { - f.t.Helper() - deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - f.t.Fatalf("Failed to get deployment %s: %v", name, err) - } - if deploy.Annotations == nil { - f.t.Errorf("Deployment %s has no annotations", name) - return - } - if _, ok := deploy.Annotations[cfg.Annotations.PausedAt]; !ok { - f.t.Errorf("Deployment %s does not have paused-at annotation", name) - } -} - -// cleanup removes all created resources. -func (f *testFixture) cleanup() { - for _, w := range f.workloads { - switch w.kind { - case "deployment": - _ = testutil.DeleteDeployment(k8sClient, namespace, w.name) - case "daemonset": - _ = testutil.DeleteDaemonSet(k8sClient, namespace, w.name) - case "statefulset": - _ = testutil.DeleteStatefulSet(k8sClient, namespace, w.name) - case "deploymentconfig": - if osClient != nil { - _ = testutil.DeleteDeploymentConfig(osClient, namespace, w.name) - } - case "cronjob": - _ = testutil.DeleteCronJob(k8sClient, namespace, w.name) - } - } - for _, name := range f.configMaps { - _ = testutil.DeleteConfigMap(k8sClient, namespace, name) - } - for _, name := range f.secrets { - _ = testutil.DeleteSecret(k8sClient, namespace, name) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - - if testing.Short() { - os.Exit(0) - } - - zl := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}). - Level(zerolog.WarnLevel). - With(). - Timestamp(). - Logger() - ctrllog.SetLogger(zerologr.New(&zl)) - - kubeconfig := os.Getenv("KUBECONFIG") - if kubeconfig == "" { - kubeconfig = os.Getenv("HOME") + "/.kube/config" - } - - var err error - restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - skipE2ETests = true - os.Exit(0) - } - - k8sClient, err = kubernetes.NewForConfig(restCfg) - if err != nil { - skipE2ETests = true - os.Exit(0) - } - - if _, err = k8sClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}); err != nil { - skipE2ETests = true - os.Exit(0) - } - - namespace = testNamespacePrefix + testutil.RandSeq(5) - if err := testutil.CreateNamespace(namespace, k8sClient); err != nil { - panic(err) - } - - cfg = config.NewDefault() - cfg.AutoReloadAll = false - - discoveryClient, err := discovery.NewDiscoveryClientForConfig(restCfg) - if err != nil { - skipDeploymentConfigTests = true - } else { - nopLog := ctrl.Log.WithName("dc-detection") - if openshift.HasDeploymentConfigSupport(discoveryClient, nopLog) { - cfg.DeploymentConfigEnabled = true - osClient, err = testutil.NewOpenshiftClient(restCfg) - if err != nil { - skipDeploymentConfigTests = true - } - } else { - skipDeploymentConfigTests = true - } - } - - _, cancelManager = startManagerWithConfig(cfg, restCfg) - - code := m.Run() - - if cancelManager != nil { - cancelManager() - time.Sleep(2 * time.Second) - } - - _ = testutil.DeleteNamespace(namespace, k8sClient) - os.Exit(code) -} - -func skipIfNoCluster(t *testing.T) { - if skipE2ETests { - t.Skip("Skipping e2e test: no Kubernetes cluster available") - } -} - -func skipIfNoDeploymentConfig(t *testing.T) { - skipIfNoCluster(t) - if skipDeploymentConfigTests { - t.Skip("Skipping DeploymentConfig test: cluster does not support DeploymentConfig API") - } -} - -// TestConfigMapUpdate tests that updating a ConfigMap triggers a workload reload. -func TestConfigMapUpdate(t *testing.T) { - f := newFixture(t, "cm-update") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestSecretUpdate tests that updating a Secret triggers a workload reload. -func TestSecretUpdate(t *testing.T) { - f := newFixture(t, "secret-update") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDeployment( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestAutoReloadAll tests the auto-reload-all feature. -func TestAutoReloadAll(t *testing.T) { - f := newFixture(t, "auto-reload") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.Auto: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestDaemonSetReload tests that DaemonSets are reloaded when ConfigMaps change. -func TestDaemonSetReload(t *testing.T) { - f := newFixture(t, "ds-reload") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDaemonSet( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDaemonSetReloaded(f.name) -} - -// TestStatefulSetReload tests that StatefulSets are reloaded when Secrets change. -func TestStatefulSetReload(t *testing.T) { - f := newFixture(t, "sts-reload") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createStatefulSet( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertStatefulSetReloaded(f.name) -} - -// TestLabelOnlyChange tests that label-only changes don't trigger reloads. -func TestLabelOnlyChange(t *testing.T) { - f := newFixture(t, "label-only") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMapLabel(f.name, "new-label") - f.assertDeploymentNotReloaded(f.name, nil) -} - -// TestMultipleConfigMaps tests watching multiple ConfigMaps in a single annotation. -func TestMultipleConfigMaps(t *testing.T) { - f := newFixture(t, "multi-cm") - defer f.cleanup() - - cm1 := f.name + "-a" - cm2 := f.name + "-b" - - f.createConfigMap(cm1, "data-a") - f.createConfigMap(cm2, "data-b") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: cm1 + "," + cm2, - }, - ) - f.waitForReady() - - f.updateConfigMap(cm1, "updated-data-a") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestAutoAnnotationDisabled tests that auto: "false" disables auto-reload. -func TestAutoAnnotationDisabled(t *testing.T) { - f := newFixture(t, "auto-disabled") - defer f.cleanup() - - testCfg := config.NewDefault() - testCfg.AutoReloadAll = true - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - testCfg.Annotations.Auto: "false", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentNotReloaded(f.name, testCfg) -} - -// TestAutoWithExplicitConfigMapAnnotation tests that a deployment with auto=true -// also reloads when an explicitly annotated (non-referenced) ConfigMap changes. -func TestAutoWithExplicitConfigMapAnnotation(t *testing.T) { - f := newFixture(t, "auto-explicit-cm") - defer f.cleanup() - - referencedCM := f.name + "-ref" - explicitCM := f.name + "-explicit" - - f.createConfigMap(referencedCM, "referenced-data") - f.createConfigMap(explicitCM, "explicit-data") - f.createDeployment( - referencedCM, true, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.ConfigmapReload: explicitCM, - }, - ) - f.waitForReady() - - f.updateConfigMap(explicitCM, "updated-explicit-data") - f.assertDeploymentReloaded(referencedCM, nil) -} - -// TestAutoWithExplicitSecretAnnotation tests that a deployment with auto=true -// also reloads when an explicitly annotated (non-referenced) Secret changes. -func TestAutoWithExplicitSecretAnnotation(t *testing.T) { - f := newFixture(t, "auto-explicit-secret") - defer f.cleanup() - - referencedSecret := f.name + "-ref" - explicitSecret := f.name + "-explicit" - - f.createSecret(referencedSecret, "referenced-secret") - f.createSecret(explicitSecret, "explicit-secret") - f.createDeployment( - referencedSecret, false, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.SecretReload: explicitSecret, - }, - ) - f.waitForReady() - - f.updateSecret(explicitSecret, "updated-explicit-secret") - f.assertDeploymentReloaded(referencedSecret, nil) -} - -// TestAutoWithBothExplicitAndReferencedChange tests that auto + explicit annotations -// work correctly when the referenced resource changes. -func TestAutoWithBothExplicitAndReferencedChange(t *testing.T) { - f := newFixture(t, "auto-both") - defer f.cleanup() - - referencedCM := f.name + "-ref" - explicitCM := f.name + "-explicit" - - f.createConfigMap(referencedCM, "referenced-data") - f.createConfigMap(explicitCM, "explicit-data") - f.createDeployment( - referencedCM, true, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.ConfigmapReload: explicitCM, - }, - ) - f.waitForReady() - - f.updateConfigMap(referencedCM, "updated-referenced-data") - f.assertDeploymentReloaded(referencedCM, nil) -} - -// newFixtureForDeploymentConfig creates a new test fixture for DeploymentConfig tests. -func newFixtureForDeploymentConfig(t *testing.T, prefix string) *testFixture { - t.Helper() - skipIfNoDeploymentConfig(t) - return &testFixture{ - t: t, - name: prefix + "-" + testutil.RandSeq(5), - } -} - -// TestDeploymentConfigReloadConfigMap tests that updating a ConfigMap triggers a DeploymentConfig reload. -func TestDeploymentConfigReloadConfigMap(t *testing.T) { - f := newFixtureForDeploymentConfig(t, "dc-cm-reload") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeploymentConfig( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentConfigReloaded(f.name) -} - -// TestDeploymentConfigReloadSecret tests that updating a Secret triggers a DeploymentConfig reload. -func TestDeploymentConfigReloadSecret(t *testing.T) { - f := newFixtureForDeploymentConfig(t, "dc-secret-reload") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDeploymentConfig( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertDeploymentConfigReloaded(f.name) -} - -// TestDeploymentConfigAutoReload tests the auto-reload annotation on DeploymentConfig. -func TestDeploymentConfigAutoReload(t *testing.T) { - f := newFixtureForDeploymentConfig(t, "dc-auto-reload") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeploymentConfig( - f.name, true, map[string]string{ - cfg.Annotations.Auto: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentConfigReloaded(f.name) -} - -// TestDeploymentPausePeriod tests the pause-period annotation on Deployment. -// It verifies that after a reload, the deployment is paused and then unpaused after the period expires. -func TestDeploymentPausePeriod(t *testing.T) { - f := newFixture(t, "pause-period") - defer f.cleanup() - - pausePeriod := "10s" - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - cfg.Annotations.PausePeriod: pausePeriod, - }, - ) - f.waitForReady() - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) - f.assertDeploymentPaused(f.name) - f.assertDeploymentHasPausedAtAnnotation(f.name) - t.Log("Waiting for pause period to expire...") - f.assertDeploymentUnpaused(f.name, 20*time.Second) -} - -// TestDeploymentPausePeriodWithAutoReload tests pause-period with auto reload annotation. -func TestDeploymentPausePeriodWithAutoReload(t *testing.T) { - f := newFixture(t, "pause-auto") - defer f.cleanup() - - pausePeriod := "10s" - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.Auto: "true", - cfg.Annotations.PausePeriod: pausePeriod, - }, - ) - f.waitForReady() - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) - f.assertDeploymentPaused(f.name) - t.Log("Waiting for pause period to expire...") - f.assertDeploymentUnpaused(f.name, 20*time.Second) -} - -// TestDeploymentNoPauseWithoutAnnotation tests that deployments without pause-period are not paused. -func TestDeploymentNoPauseWithoutAnnotation(t *testing.T) { - f := newFixture(t, "no-pause") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) - - time.Sleep(3 * time.Second) - deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - if deploy.Spec.Paused { - t.Errorf("Deployment should NOT be paused without pause-period annotation") - } -} - -// TestDaemonSetSecretReload tests that DaemonSets are reloaded when Secrets change. -func TestDaemonSetSecretReload(t *testing.T) { - f := newFixture(t, "ds-secret-reload") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDaemonSet( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertDaemonSetReloaded(f.name) -} - -// TestStatefulSetConfigMapReload tests that StatefulSets are reloaded when ConfigMaps change. -func TestStatefulSetConfigMapReload(t *testing.T) { - f := newFixture(t, "sts-cm-reload") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createStatefulSet( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertStatefulSetReloaded(f.name) -} - -// TestSecretLabelOnlyChange tests that Secret label-only changes don't trigger reloads. -func TestSecretLabelOnlyChange(t *testing.T) { - f := newFixture(t, "secret-label-only") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDeployment( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecretLabel(f.name, "new-label") - f.assertDeploymentNotReloaded(f.name, nil) -} - -// TestDaemonSetLabelOnlyChange tests that ConfigMap label-only changes don't trigger DaemonSet reloads. -func TestDaemonSetLabelOnlyChange(t *testing.T) { - f := newFixture(t, "ds-label-only") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDaemonSet( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMapLabel(f.name, "new-label") - f.assertDaemonSetNotReloaded(f.name) -} - -// TestStatefulSetLabelOnlyChange tests that Secret label-only changes don't trigger StatefulSet reloads. -func TestStatefulSetLabelOnlyChange(t *testing.T) { - f := newFixture(t, "sts-label-only") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createStatefulSet( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecretLabel(f.name, "new-label") - f.assertStatefulSetNotReloaded(f.name) -} - -// TestMultipleConfigMapUpdates tests that multiple updates to a ConfigMap all trigger reloads correctly. -func TestMultipleConfigMapUpdates(t *testing.T) { - f := newFixture(t, "multi-update") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data-1") - f.assertDeploymentReloaded(f.name, nil) - - time.Sleep(2 * time.Second) - - f.updateConfigMap(f.name, "updated-data-2") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestMultipleSecretUpdates tests that multiple updates to a Secret all trigger reloads correctly. -func TestMultipleSecretUpdates(t *testing.T) { - f := newFixture(t, "multi-secret-update") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDeployment( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret-1") - f.assertDeploymentReloaded(f.name, nil) - - time.Sleep(2 * time.Second) - - f.updateSecret(f.name, "updated-secret-2") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestSecretOnlyAuto tests the secret-only auto annotation (secret.reloader.stakater.com/auto). -func TestSecretOnlyAuto(t *testing.T) { - f := newFixture(t, "secret-auto") - defer f.cleanup() - - secretName := f.name + "-secret" - cmName := f.name + "-cm" - - f.createSecret(secretName, "initial-secret") - f.createConfigMap(cmName, "initial-data") - - _, err := testutil.CreateDeploymentWithBoth( - k8sClient, f.name, namespace, cmName, secretName, map[string]string{ - cfg.Annotations.SecretAuto: "true", - }, - ) - if err != nil { - t.Fatalf("Failed to create deployment: %v", err) - } - f.workloads = append(f.workloads, workloadInfo{name: f.name, kind: "deployment"}) - f.waitForReady() - - f.updateSecret(secretName, "updated-secret") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestConfigMapOnlyAuto tests the configmap-only auto annotation (configmap.reloader.stakater.com/auto). -func TestConfigMapOnlyAuto(t *testing.T) { - f := newFixture(t, "cm-auto") - defer f.cleanup() - - secretName := f.name + "-secret" - cmName := f.name + "-cm" - - f.createSecret(secretName, "initial-secret") - f.createConfigMap(cmName, "initial-data") - - _, err := testutil.CreateDeploymentWithBoth( - k8sClient, f.name, namespace, cmName, secretName, map[string]string{ - cfg.Annotations.ConfigmapAuto: "true", - }, - ) - if err != nil { - t.Fatalf("Failed to create deployment: %v", err) - } - f.workloads = append(f.workloads, workloadInfo{name: f.name, kind: "deployment"}) - f.waitForReady() - - f.updateConfigMap(cmName, "updated-data") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestSearchMatchAnnotations tests the search + match annotation pattern. -func TestSearchMatchAnnotations(t *testing.T) { - f := newFixture(t, "search-match") - defer f.cleanup() - - cm, err := testutil.CreateConfigMapWithAnnotations( - k8sClient, namespace, f.name, "initial-data", map[string]string{ - cfg.Annotations.Match: "true", - }, - ) - if err != nil { - t.Fatalf("Failed to create ConfigMap: %v", err) - } - f.configMaps = append(f.configMaps, cm.Name) - - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.Search: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentReloaded(f.name, nil) -} - -// TestSearchWithoutMatch tests that search annotation without match doesn't trigger reload. -func TestSearchWithoutMatch(t *testing.T) { - f := newFixture(t, "search-no-match") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.Search: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentNotReloaded(f.name, nil) -} - -// TestResourceIgnore tests the ignore annotation on ConfigMap/Secret. -func TestResourceIgnore(t *testing.T) { - f := newFixture(t, "ignore") - defer f.cleanup() - - cm, err := testutil.CreateConfigMapWithAnnotations( - k8sClient, namespace, f.name, "initial-data", map[string]string{ - cfg.Annotations.Ignore: "true", - }, - ) - if err != nil { - t.Fatalf("Failed to create ConfigMap: %v", err) - } - f.configMaps = append(f.configMaps, cm.Name) - - f.createDeployment( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentNotReloaded(f.name, nil) -} - -// createCronJob creates a CronJob and registers it for cleanup. -func (f *testFixture) createCronJob(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateCronJob(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create CronJob %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "cronjob"}) -} - -// assertCronJobTriggeredJob asserts that a CronJob triggered a new Job. -func (f *testFixture) assertCronJobTriggeredJob(name string) { - f.t.Helper() - triggered, err := testutil.WaitForCronJobTriggeredJob(k8sClient, namespace, name, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for CronJob %s to trigger Job: %v", name, err) - } - if !triggered { - f.t.Errorf("CronJob %s did not trigger a Job after resource change", name) - } -} - -// TestCronJobConfigMapReload tests that updating a ConfigMap triggers a CronJob to create a new Job. -func TestCronJobConfigMapReload(t *testing.T) { - f := newFixture(t, "cj-cm") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createCronJob( - f.name, true, map[string]string{ - cfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertCronJobTriggeredJob(f.name) -} - -// TestCronJobSecretReload tests that updating a Secret triggers a CronJob to create a new Job. -func TestCronJobSecretReload(t *testing.T) { - f := newFixture(t, "cj-secret") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createCronJob( - f.name, false, map[string]string{ - cfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertCronJobTriggeredJob(f.name) -} - -// TestCronJobAutoReload tests that CronJob with auto annotation triggers a Job on ConfigMap update. -func TestCronJobAutoReload(t *testing.T) { - f := newFixture(t, "cj-auto") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createCronJob( - f.name, true, map[string]string{ - cfg.Annotations.Auto: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertCronJobTriggeredJob(f.name) -} - -// startManagerWithConfig creates and starts a controller-runtime manager for e2e testing. -func startManagerWithConfig(cfg *config.Config, restConfig *rest.Config) (manager.Manager, context.CancelFunc) { - collectors := metrics.NewCollectors() - mgr, err := controller.NewManagerWithRestConfig( - controller.ManagerOptions{ - Config: cfg, - Log: ctrl.Log.WithName("test-manager"), - Collectors: &collectors, - }, restConfig, - ) - if err != nil { - log.Fatalf("Failed to create manager: %v", err) - } - - if err := controller.SetupReconcilers(mgr, cfg, ctrl.Log.WithName("test-reconcilers"), &collectors); err != nil { - log.Fatalf("Failed to setup reconcilers: %v", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - if err := controller.RunManager(ctx, mgr, ctrl.Log.WithName("test-runner")); err != nil { - log.Printf("Manager exited: %v", err) - } - }() - - time.Sleep(3 * time.Second) - return mgr, cancel -} diff --git a/test/e2e/envvars/e2e_test.go b/test/e2e/envvars/e2e_test.go deleted file mode 100644 index 2c1c7b1c7..000000000 --- a/test/e2e/envvars/e2e_test.go +++ /dev/null @@ -1,474 +0,0 @@ -// Package envvars contains end-to-end tests for Reloader's EnvVars Reload Strategy. -package envvars - -import ( - "context" - "flag" - "log" - "os" - "testing" - "time" - - "github.com/go-logr/zerologr" - "github.com/rs/zerolog" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/testutil" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - ctrl "sigs.k8s.io/controller-runtime" - ctrllog "sigs.k8s.io/controller-runtime/pkg/log" -) - -const ( - testNamespacePrefix = "test-reloader-envvars-" - waitTimeout = 30 * time.Second - setupDelay = 2 * time.Second - negativeTestTimeout = 5 * time.Second - envVarPrefix = "STAKATER_" -) - -var ( - k8sClient kubernetes.Interface - envVarsCfg *config.Config - namespace string - skipE2ETests bool - cancelManager context.CancelFunc - restCfg *rest.Config -) - -// envVarsFixture provides test setup/teardown for EnvVars strategy tests. -type envVarsFixture struct { - t *testing.T - name string - configMaps []string - secrets []string - workloads []workloadInfo -} - -type workloadInfo struct { - name string - kind string -} - -func newEnvVarsFixture(t *testing.T, prefix string) *envVarsFixture { - t.Helper() - skipIfNoCluster(t) - return &envVarsFixture{ - t: t, - name: prefix + "-" + testutil.RandSeq(5), - } -} - -func (f *envVarsFixture) createConfigMap(name, data string) { - f.t.Helper() - _, err := testutil.CreateConfigMap(k8sClient, namespace, name, data) - if err != nil { - f.t.Fatalf("Failed to create ConfigMap %s: %v", name, err) - } - f.configMaps = append(f.configMaps, name) -} - -func (f *envVarsFixture) createSecret(name, data string) { - f.t.Helper() - _, err := testutil.CreateSecret(k8sClient, namespace, name, data) - if err != nil { - f.t.Fatalf("Failed to create Secret %s: %v", name, err) - } - f.secrets = append(f.secrets, name) -} - -func (f *envVarsFixture) createDeployment(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateDeployment(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create Deployment %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "deployment"}) -} - -func (f *envVarsFixture) createDaemonSet(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateDaemonSet(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create DaemonSet %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "daemonset"}) -} - -func (f *envVarsFixture) createStatefulSet(name string, useConfigMap bool, annotations map[string]string) { - f.t.Helper() - _, err := testutil.CreateStatefulSet(k8sClient, name, namespace, useConfigMap, annotations) - if err != nil { - f.t.Fatalf("Failed to create StatefulSet %s: %v", name, err) - } - f.workloads = append(f.workloads, workloadInfo{name: name, kind: "statefulset"}) -} - -func (f *envVarsFixture) waitForReady() { - time.Sleep(setupDelay) -} - -func (f *envVarsFixture) updateConfigMap(name, data string) { - f.t.Helper() - if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, "", data); err != nil { - f.t.Fatalf("Failed to update ConfigMap %s: %v", name, err) - } -} - -func (f *envVarsFixture) updateConfigMapLabel(name, label string) { - f.t.Helper() - cm, err := k8sClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - f.t.Fatalf("Failed to get ConfigMap %s: %v", name, err) - } - data := cm.Data["url"] - if err := testutil.UpdateConfigMapWithClient(k8sClient, namespace, name, label, data); err != nil { - f.t.Fatalf("Failed to update ConfigMap label %s: %v", name, err) - } -} - -func (f *envVarsFixture) updateSecret(name, data string) { - f.t.Helper() - if err := testutil.UpdateSecretWithClient(k8sClient, namespace, name, "", data); err != nil { - f.t.Fatalf("Failed to update Secret %s: %v", name, err) - } -} - -func (f *envVarsFixture) assertDeploymentHasEnvVar(name string) { - f.t.Helper() - updated, err := testutil.WaitForDeploymentEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for deployment %s env var: %v", name, err) - } - if !updated { - f.t.Errorf("Deployment %s does not have Reloader env var", name) - } -} - -func (f *envVarsFixture) assertDeploymentNoEnvVar(name string) { - f.t.Helper() - time.Sleep(negativeTestTimeout) - updated, _ := testutil.WaitForDeploymentEnvVar(k8sClient, namespace, name, envVarPrefix, negativeTestTimeout) - if updated { - f.t.Errorf("Deployment %s should not have Reloader env var", name) - } -} - -func (f *envVarsFixture) assertDaemonSetHasEnvVar(name string) { - f.t.Helper() - updated, err := testutil.WaitForDaemonSetEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for daemonset %s env var: %v", name, err) - } - if !updated { - f.t.Errorf("DaemonSet %s does not have Reloader env var", name) - } -} - -func (f *envVarsFixture) assertStatefulSetHasEnvVar(name string) { - f.t.Helper() - updated, err := testutil.WaitForStatefulSetEnvVar(k8sClient, namespace, name, envVarPrefix, waitTimeout) - if err != nil { - f.t.Fatalf("Error waiting for statefulset %s env var: %v", name, err) - } - if !updated { - f.t.Errorf("StatefulSet %s does not have Reloader env var", name) - } -} - -func (f *envVarsFixture) cleanup() { - for _, w := range f.workloads { - switch w.kind { - case "deployment": - _ = testutil.DeleteDeployment(k8sClient, namespace, w.name) - case "daemonset": - _ = testutil.DeleteDaemonSet(k8sClient, namespace, w.name) - case "statefulset": - _ = testutil.DeleteStatefulSet(k8sClient, namespace, w.name) - } - } - for _, name := range f.configMaps { - _ = testutil.DeleteConfigMap(k8sClient, namespace, name) - } - for _, name := range f.secrets { - _ = testutil.DeleteSecret(k8sClient, namespace, name) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - - if testing.Short() { - os.Exit(0) - } - - zl := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}). - Level(zerolog.WarnLevel). - With(). - Timestamp(). - Logger() - ctrllog.SetLogger(zerologr.New(&zl)) - - kubeconfig := os.Getenv("KUBECONFIG") - if kubeconfig == "" { - kubeconfig = os.Getenv("HOME") + "/.kube/config" - } - - var err error - restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - skipE2ETests = true - os.Exit(0) - } - - k8sClient, err = kubernetes.NewForConfig(restCfg) - if err != nil { - skipE2ETests = true - os.Exit(0) - } - - if _, err = k8sClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}); err != nil { - skipE2ETests = true - os.Exit(0) - } - - namespace = testNamespacePrefix + testutil.RandSeq(5) - if err := testutil.CreateNamespace(namespace, k8sClient); err != nil { - panic(err) - } - - envVarsCfg = config.NewDefault() - envVarsCfg.ReloadStrategy = config.ReloadStrategyEnvVars - envVarsCfg.AutoReloadAll = false - - collectors := metrics.NewCollectors() - mgr, err := controller.NewManagerWithRestConfig( - controller.ManagerOptions{ - Config: envVarsCfg, - Log: ctrl.Log.WithName("envvars-test-manager"), - Collectors: &collectors, - }, restCfg, - ) - if err != nil { - panic("Failed to create EnvVars manager: " + err.Error()) - } - - if err := controller.SetupReconcilers(mgr, envVarsCfg, ctrl.Log.WithName("envvars-test-reconcilers"), &collectors); err != nil { - panic("Failed to setup EnvVars reconcilers: " + err.Error()) - } - - ctx, cancel := context.WithCancel(context.Background()) - cancelManager = cancel - - go func() { - if err := controller.RunManager(ctx, mgr, ctrl.Log.WithName("envvars-test-runner")); err != nil { - log.Printf("Manager exited: %v", err) - } - }() - - time.Sleep(3 * time.Second) - - code := m.Run() - - if cancelManager != nil { - cancelManager() - time.Sleep(2 * time.Second) - } - - _ = testutil.DeleteNamespace(namespace, k8sClient) - os.Exit(code) -} - -func skipIfNoCluster(t *testing.T) { - if skipE2ETests { - t.Skip("Skipping e2e test: no Kubernetes cluster available") - } -} - -// TestEnvVarsConfigMapUpdate tests that updating a ConfigMap triggers env var update in deployment. -func TestEnvVarsConfigMapUpdate(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-cm") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - envVarsCfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentHasEnvVar(f.name) -} - -// TestEnvVarsSecretUpdate tests that updating a Secret triggers env var update in deployment. -func TestEnvVarsSecretUpdate(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-secret") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDeployment( - f.name, false, map[string]string{ - envVarsCfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertDeploymentHasEnvVar(f.name) -} - -// TestEnvVarsAutoReload tests auto-reload with EnvVars strategy. -func TestEnvVarsAutoReload(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-auto") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - envVarsCfg.Annotations.Auto: "true", - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDeploymentHasEnvVar(f.name) -} - -// TestEnvVarsDaemonSetConfigMap tests that DaemonSets get env var on ConfigMap change. -func TestEnvVarsDaemonSetConfigMap(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-ds-cm") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDaemonSet( - f.name, true, map[string]string{ - envVarsCfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertDaemonSetHasEnvVar(f.name) -} - -// TestEnvVarsDaemonSetSecret tests that DaemonSets get env var on Secret change. -func TestEnvVarsDaemonSetSecret(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-ds-secret") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createDaemonSet( - f.name, false, map[string]string{ - envVarsCfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertDaemonSetHasEnvVar(f.name) -} - -// TestEnvVarsStatefulSetConfigMap tests that StatefulSets get env var on ConfigMap change. -func TestEnvVarsStatefulSetConfigMap(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-sts-cm") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createStatefulSet( - f.name, true, map[string]string{ - envVarsCfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data") - f.assertStatefulSetHasEnvVar(f.name) -} - -// TestEnvVarsStatefulSetSecret tests that StatefulSets get env var on Secret change. -func TestEnvVarsStatefulSetSecret(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-sts-secret") - defer f.cleanup() - - f.createSecret(f.name, "initial-secret") - f.createStatefulSet( - f.name, false, map[string]string{ - envVarsCfg.Annotations.SecretReload: f.name, - }, - ) - f.waitForReady() - - f.updateSecret(f.name, "updated-secret") - f.assertStatefulSetHasEnvVar(f.name) -} - -// TestEnvVarsLabelOnlyChange tests that label-only changes don't trigger env var updates. -func TestEnvVarsLabelOnlyChange(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-label") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - envVarsCfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMapLabel(f.name, "new-label") - f.assertDeploymentNoEnvVar(f.name) -} - -// TestEnvVarsMultipleUpdates tests multiple updates with EnvVars strategy. -func TestEnvVarsMultipleUpdates(t *testing.T) { - f := newEnvVarsFixture(t, "envvars-multi") - defer f.cleanup() - - f.createConfigMap(f.name, "initial-data") - f.createDeployment( - f.name, true, map[string]string{ - envVarsCfg.Annotations.ConfigmapReload: f.name, - }, - ) - f.waitForReady() - - f.updateConfigMap(f.name, "updated-data-1") - f.assertDeploymentHasEnvVar(f.name) - - deploy1, _ := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) - var envValue1 string - for _, container := range deploy1.Spec.Template.Spec.Containers { - for _, env := range container.Env { - if len(env.Name) > len(envVarPrefix) && env.Name[:len(envVarPrefix)] == envVarPrefix { - envValue1 = env.Value - break - } - } - } - - time.Sleep(2 * time.Second) - - f.updateConfigMap(f.name, "updated-data-2") - time.Sleep(5 * time.Second) - - deploy2, _ := k8sClient.AppsV1().Deployments(namespace).Get(context.Background(), f.name, metav1.GetOptions{}) - var envValue2 string - for _, container := range deploy2.Spec.Template.Spec.Containers { - for _, env := range container.Env { - if len(env.Name) > len(envVarPrefix) && env.Name[:len(envVarPrefix)] == envVarPrefix { - envValue2 = env.Value - break - } - } - } - - if envValue1 == envValue2 { - t.Errorf("Env var value should have changed after second update, got same value: %s", envValue1) - } -} From 5f7f3a57bbeced0f64b2cac667e1cd8d0e9c35e3 Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Thu, 8 Jan 2026 10:25:05 +0100 Subject: [PATCH 35/35] feat: golangci-lint config and fixes --- .golangci.yml | 74 +++ Makefile | 56 +- .../chart/reloader/templates/clusterrole.yaml | 6 +- .../chart/reloader/templates/deployment.yaml | 12 +- .../chart/reloader/templates/role.yaml | 6 +- go.mod | 196 ++++++- go.sum | 502 +++++++++++++++++- internal/pkg/config/validation.go | 3 +- internal/pkg/config/validation_test.go | 4 +- .../pkg/controller/configmap_reconciler.go | 11 +- .../pkg/controller/deployment_reconciler.go | 5 +- internal/pkg/controller/filter.go | 5 +- internal/pkg/controller/filter_test.go | 3 +- internal/pkg/controller/handler.go | 5 +- internal/pkg/controller/manager.go | 15 +- .../pkg/controller/namespace_reconciler.go | 5 +- .../controller/namespace_reconciler_test.go | 3 +- .../pkg/controller/resource_reconciler.go | 11 +- internal/pkg/controller/retry.go | 5 +- internal/pkg/controller/retry_test.go | 11 +- internal/pkg/controller/secret_reconciler.go | 11 +- internal/pkg/controller/test_helpers_test.go | 11 +- internal/pkg/metadata/metadata.go | 3 +- internal/pkg/metadata/metadata_test.go | 3 +- internal/pkg/metadata/publisher.go | 5 +- internal/pkg/reload/decision_test.go | 3 +- internal/pkg/reload/pause.go | 3 +- internal/pkg/reload/pause_test.go | 5 +- internal/pkg/reload/predicate.go | 3 +- internal/pkg/reload/predicate_test.go | 3 +- internal/pkg/reload/service.go | 3 +- internal/pkg/reload/service_test.go | 5 +- internal/pkg/reload/strategy.go | 3 +- internal/pkg/reload/strategy_test.go | 3 +- internal/pkg/webhook/webhook.go | 1 + internal/pkg/workload/base.go | 1 + 36 files changed, 869 insertions(+), 135 deletions(-) create mode 100644 .golangci.yml diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..8644bc04f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,74 @@ +version: "2" + +run: + go: "1.25" + timeout: 5m + allow-parallel-runners: true + +linters: + default: none + enable: + # Core linters + - errcheck + - govet + - staticcheck + - ineffassign + - unused + + # Code quality + - revive + - misspell + - unconvert + - unparam + - nakedret + - copyloopvar + + # Bug prevention + - bodyclose + - durationcheck + - errorlint + + # Test framework + - ginkgolinter + + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + + govet: + enable-all: true + disable: + - shadow + - fieldalignment + + errcheck: + check-type-assertions: true + exclude-functions: + - (io.Closer).Close + - (*os.File).Close + + nakedret: + max-func-lines: 30 + + exclusions: + generated: lax + rules: + - linters: + - errcheck + path: _test\.go + paths: + - third_party$ + - vendor$ + +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/stakater/Reloader + exclusions: + generated: lax diff --git a/Makefile b/Makefile index 57dd27b34..be013e41c 100644 --- a/Makefile +++ b/Makefile @@ -38,18 +38,9 @@ $(LOCALBIN): ## Tool Binaries KUBECTL ?= kubectl -KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) -ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) YQ ?= $(LOCALBIN)/yq ## Tool Versions -KUSTOMIZE_VERSION ?= v5.3.0 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 -ENVTEST_VERSION ?= release-0.17 -GOLANGCI_LINT_VERSION ?= v2.6.1 - YQ_VERSION ?= v4.27.5 YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)" @@ -64,40 +55,6 @@ $(YQ): @chmod +x $(YQ) @echo "yq downloaded successfully to $(YQ)." -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) - -.PHONY: envtest -envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. -$(ENVTEST): $(LOCALBIN) - $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) - -.PHONY: golangci-lint -golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. -$(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) - -# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist -# $1 - target path with name of binary (ideally with version) -# $2 - package url which can be installed -# $3 - specific version of package -define go-install-tool -@[ -f $(1) ] || { \ -set -e; \ -package=$(2)@$(3) ;\ -echo "Downloading $${package}" ;\ -GOBIN=$(LOCALBIN) go install $${package} ;\ -mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ -} -endef - default: build test install: @@ -109,8 +66,8 @@ run: build: "$(GOCMD)" build ${GOFLAGS} -ldflags '${LDFLAGS}' -o "${BINARY}" ./cmd/reloader -lint: golangci-lint ## Run golangci-lint on the codebase - $(GOLANGCI_LINT) run ./... +lint: ## Run golangci-lint on the codebase + go tool golangci-lint run ./... build-image: docker buildx build \ @@ -149,8 +106,9 @@ manifest: test: "$(GOCMD)" test -timeout 1800s -v -short ./cmd/... ./internal/... -e2e: - "$(GOCMD)" test -timeout 1800s -v ./test/... +.PHONY: docker-build +docker-build: ## Build Docker image + $(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile . stop: @docker stop "${BINARY}" @@ -161,8 +119,8 @@ apply: deploy: binary-image push apply .PHONY: k8s-manifests -k8s-manifests: $(KUSTOMIZE) ## Generate k8s manifests using Kustomize from 'manifests' folder - $(KUSTOMIZE) build ./deployments/kubernetes/ -o ./deployments/kubernetes/reloader.yaml +k8s-manifests: ## Generate k8s manifests using Kustomize from 'manifests' folder + go tool kustomize build ./deployments/kubernetes/ -o ./deployments/kubernetes/reloader.yaml .PHONY: update-manifests-version update-manifests-version: ## Generate k8s manifests using Kustomize from 'manifests' folder diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml index c229c113a..11e8a5d43 100644 --- a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml +++ b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml @@ -56,12 +56,12 @@ rules: {{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }} - apiGroups: - "argoproj.io" - - "" resources: - rollouts verbs: - list - get + - watch - update - patch {{- end }} @@ -85,6 +85,9 @@ rules: verbs: - list - get + - watch + - update + - patch {{- end }} {{- if .Values.reloader.ignoreJobs }}{{- else }} - apiGroups: @@ -96,6 +99,7 @@ rules: - delete - list - get + - watch {{- end}} {{- if .Values.reloader.enableHA }} - apiGroups: diff --git a/deployments/kubernetes/chart/reloader/templates/deployment.yaml b/deployments/kubernetes/chart/reloader/templates/deployment.yaml index 16564b209..048526e83 100644 --- a/deployments/kubernetes/chart/reloader/templates/deployment.yaml +++ b/deployments/kubernetes/chart/reloader/templates/deployment.yaml @@ -173,10 +173,12 @@ spec: ports: - name: http containerPort: 9090 + - name: health + containerPort: 8080 livenessProbe: httpGet: - path: /live - port: http + path: /healthz + port: health timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }} failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }} periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }} @@ -184,8 +186,8 @@ spec: initialDelaySeconds: {{ .Values.reloader.deployment.livenessProbe.initialDelaySeconds | default "10" }} readinessProbe: httpGet: - path: /metrics - port: http + path: /readyz + port: health timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }} failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }} periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }} @@ -235,7 +237,7 @@ spec: - "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}" {{- end }} {{- if (include "reloader-namespaceSelector" .) }} - - "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\"" + - "--namespace-selector={{ include "reloader-namespaceSelector" . }}" {{- end }} {{- if .Values.reloader.resourceLabelSelector }} - "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}" diff --git a/deployments/kubernetes/chart/reloader/templates/role.yaml b/deployments/kubernetes/chart/reloader/templates/role.yaml index 860cf895f..c6cfed646 100644 --- a/deployments/kubernetes/chart/reloader/templates/role.yaml +++ b/deployments/kubernetes/chart/reloader/templates/role.yaml @@ -47,12 +47,12 @@ rules: {{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }} - apiGroups: - "argoproj.io" - - "" resources: - rollouts verbs: - list - get + - watch - update - patch {{- end }} @@ -75,6 +75,9 @@ rules: verbs: - list - get + - watch + - update + - patch - apiGroups: - "batch" resources: @@ -84,6 +87,7 @@ rules: - delete - list - get + - watch {{- if .Values.reloader.enableHA }} - apiGroups: - "coordination.k8s.io" diff --git a/go.mod b/go.mod index ad21ed637..ed633b39d 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/argoproj/argo-rollouts v1.8.3 github.com/go-logr/logr v1.4.3 github.com/go-logr/zerologr v1.2.3 - github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a - github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc + github.com/openshift/api v0.0.0-20260107103503-6d35063ca179 + github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/rs/zerolog v1.34.0 @@ -21,13 +21,72 @@ require ( ) require ( + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + codeberg.org/chavacava/garif v0.2.0 // indirect + codeberg.org/polyfloyd/go-errorlint v1.9.0 // indirect + dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect + dev.gaijin.team/go/golib v0.6.0 // indirect + github.com/4meepo/tagalign v1.4.3 // indirect + github.com/Abirdcfly/dupword v0.1.7 // indirect + github.com/AdminBenni/iota-mixing v1.0.0 // indirect + github.com/AlwxSin/noinlineerr v1.0.5 // indirect + github.com/Antonboom/errname v1.1.1 // indirect + github.com/Antonboom/nilnil v1.1.1 // indirect + github.com/Antonboom/testifylint v1.6.4 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect + github.com/Djarvur/go-err113 v0.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/MirrexOne/unqueryvet v1.4.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect + github.com/alecthomas/chroma/v2 v2.21.1 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.6 // indirect + github.com/alexkohler/prealloc v1.0.1 // indirect + github.com/alfatraining/structtag v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.2.0 // indirect + github.com/ashanbrown/forbidigo/v2 v2.3.0 // indirect + github.com/ashanbrown/makezero/v2 v2.1.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.7.0 // indirect + github.com/bombsimon/wsl/v5 v5.3.0 // indirect + github.com/breml/bidichk v0.3.3 // indirect + github.com/breml/errchkjson v0.4.1 // indirect + github.com/butuzov/ireturn v0.4.0 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/catenacyber/perfsprint v0.10.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/charithe/durationcheck v0.0.11 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/ckaznocha/intrange v0.3.1 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.7 // indirect + github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.6 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/ghostiam/protogetter v0.3.18 // indirect + github.com/go-critic/go-critic v0.14.3 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.22.4 // indirect github.com/go-openapi/jsonreference v0.21.4 // indirect github.com/go-openapi/swag v0.25.4 // indirect @@ -42,30 +101,146 @@ require ( github.com/go-openapi/swag/stringutils v0.25.4 // indirect github.com/go-openapi/swag/typeutils v0.25.4 // indirect github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godoc-lint/godoc-lint v0.11.1 // indirect + github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golangci/asciicheck v0.5.0 // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect + github.com/golangci/go-printf-func-name v0.1.1 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect + github.com/golangci/golangci-lint/v2 v2.8.0 // indirect + github.com/golangci/golines v0.14.0 // indirect + github.com/golangci/misspell v0.7.0 // indirect + github.com/golangci/plugin-module-register v0.1.2 // indirect + github.com/golangci/revgrep v0.8.0 // indirect + github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect + github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gordonklaus/ineffassign v0.2.0 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jgautheron/goconst v1.8.2 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jjti/go-spancheck v0.6.5 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.2 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect + github.com/kulti/thelper v0.7.1 // indirect + github.com/kunwardeep/paralleltest v1.0.15 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.5 // indirect + github.com/ldez/gomoddirectives v0.8.0 // indirect + github.com/ldez/grignotin v0.10.1 // indirect + github.com/ldez/structtags v0.6.1 // indirect + github.com/ldez/tagliatelle v0.7.2 // indirect + github.com/ldez/usetesting v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.2 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/macabu/inamedparam v0.2.0 // indirect + github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect + github.com/manuelarte/funcorder v0.5.0 // indirect + github.com/maratori/testableexamples v1.0.1 // indirect + github.com/maratori/testpackage v1.1.2 // indirect + github.com/matoous/godox v1.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mgechev/revive v1.13.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/moricho/tparallel v0.3.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.21.2 // indirect + github.com/onsi/ginkgo/v2 v2.27.3 // indirect + github.com/onsi/gomega v1.38.3 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.67.4 // indirect + github.com/prometheus/common v0.67.5 // indirect github.com/prometheus/procfs v0.19.2 // indirect + github.com/quasilyte/go-ruleguard v0.4.5 // indirect + github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ryancurrah/gomodguard v1.4.1 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sagikazarmark/locafero v0.12.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect + github.com/securego/gosec/v2 v2.22.11 // indirect + github.com/sergi/go-diff v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sonatard/noctx v0.4.0 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.3.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/tetafro/godot v1.5.4 // indirect + github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect + github.com/timonwong/loggercheck v0.11.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.12.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.4.1 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xen0n/gosmopolitan v1.3.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.3.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + gitlab.com/bosi/decorder v0.4.2 // indirect + go-simpler.org/musttag v0.14.0 // indirect + go-simpler.org/sloglint v0.11.1 // indirect + go.augendre.info/arangolint v0.3.1 // indirect + go.augendre.info/fatcontext v0.9.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.31.0 // indirect golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect @@ -78,11 +253,19 @@ require ( google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.6.1 // indirect k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect - k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 // indirect + k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 // indirect + mvdan.cc/gofumpt v0.9.2 // indirect + mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kustomize/api v0.21.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.21.0 // indirect + sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.21.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect @@ -110,3 +293,8 @@ replace ( k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2 k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2 ) + +tool ( + github.com/golangci/golangci-lint/v2/cmd/golangci-lint + sigs.k8s.io/kustomize/kustomize/v5 +) diff --git a/go.sum b/go.sum index 07c360254..945867094 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,151 @@ +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= +codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= +codeberg.org/polyfloyd/go-errorlint v1.9.0 h1:VkdEEmA1VBpH6ecQoMR4LdphVI3fA4RrCh2an7YmodI= +codeberg.org/polyfloyd/go-errorlint v1.9.0/go.mod h1:GPRRu2LzVijNn4YkrZYJfatQIdS+TrcK8rL5Xs24qw8= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= +dev.gaijin.team/go/golib v0.6.0 h1:v6nnznFTs4bppib/NyU1PQxobwDHwCXXl15P7DV5Zgo= +dev.gaijin.team/go/golib v0.6.0/go.mod h1:uY1mShx8Z/aNHWDyAkZTkX+uCi5PdX7KsG1eDQa2AVE= +github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= +github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= +github.com/Abirdcfly/dupword v0.1.7 h1:2j8sInznrje4I0CMisSL6ipEBkeJUJAmK1/lfoNGWrQ= +github.com/Abirdcfly/dupword v0.1.7/go.mod h1:K0DkBeOebJ4VyOICFdppB23Q0YMOgVafM0zYW0n9lF4= +github.com/AdminBenni/iota-mixing v1.0.0 h1:Os6lpjG2dp/AE5fYBPAA1zfa2qMdCAWwPMCgpwKq7wo= +github.com/AdminBenni/iota-mixing v1.0.0/go.mod h1:i4+tpAaB+qMVIV9OK3m4/DAynOd5bQFaOu+2AhtBCNY= +github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= +github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= +github.com/Antonboom/errname v1.1.1 h1:bllB7mlIbTVzO9jmSWVWLjxTEbGBVQ1Ff/ClQgtPw9Q= +github.com/Antonboom/errname v1.1.1/go.mod h1:gjhe24xoxXp0ScLtHzjiXp0Exi1RFLKJb0bVBtWKCWQ= +github.com/Antonboom/nilnil v1.1.1 h1:9Mdr6BYd8WHCDngQnNVV0b554xyisFioEKi30sksufQ= +github.com/Antonboom/nilnil v1.1.1/go.mod h1:yCyAmSw3doopbOWhJlVci+HuyNRuHJKIv6V2oYQa8II= +github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS87UiqmQ= +github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g= +github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/MirrexOne/unqueryvet v1.4.0 h1:6KAkqqW2KUnkl9Z0VuTphC3IXRPoFqEkJEtyxxHj5eQ= +github.com/MirrexOne/unqueryvet v1.4.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.21.1 h1:FaSDrp6N+3pphkNKU6HPCiYLgm8dbe5UXIXcoBhZSWA= +github.com/alecthomas/chroma/v2 v2.21.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= +github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= +github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= +github.com/alexkohler/prealloc v1.0.1 h1:A9P1haqowqUxWvU9nk6tQ7YktXIHf+LQM9wPRhuteEE= +github.com/alexkohler/prealloc v1.0.1/go.mod h1:fT39Jge3bQrfA7nPMDngUfvUbQGQeJyGQnR+913SCig= +github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= +github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= +github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA= github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8= +github.com/ashanbrown/forbidigo/v2 v2.3.0 h1:OZZDOchCgsX5gvToVtEBoV2UWbFfI6RKQTir2UZzSxo= +github.com/ashanbrown/forbidigo/v2 v2.3.0/go.mod h1:5p6VmsG5/1xx3E785W9fouMxIOkvY2rRV9nMdWadd6c= +github.com/ashanbrown/makezero/v2 v2.1.0 h1:snuKYMbqosNokUKm+R6/+vOPs8yVAi46La7Ck6QYSaE= +github.com/ashanbrown/makezero/v2 v2.1.0/go.mod h1:aEGT/9q3S8DHeE57C88z2a6xydvgx8J5hgXIGWgo0MY= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= +github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= +github.com/bombsimon/wsl/v5 v5.3.0 h1:nZWREJFL6U3vgW/B1lfDOigl+tEF6qgs6dGGbFeR0UM= +github.com/bombsimon/wsl/v5 v5.3.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= +github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= +github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= +github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= +github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= +github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E= +github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= +github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.11 h1:g1/EX1eIiKS57NTWsYtHDZ/APfeXKhye1DidBcABctk= +github.com/charithe/durationcheck v0.0.11/go.mod h1:x5iZaixRNl8ctbM+3B2RrPG5t856TxRyVQEnbIEM2X4= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= +github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= +github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= +github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= +github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= +github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.6 h1:vmiBcKV/3EqKY3ZiPxCINmpS431OcE1S47AQUwhrg8E= +github.com/firefart/nonamedreturns v1.0.6/go.mod h1:R8NisJnSIpvPWheCq0mNRXJok6D8h7fagJTF8EMEwCo= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghostiam/protogetter v0.3.18 h1:yEpghRGtP9PjKvVXtEzGpYfQj1Wl/ZehAfU6fr62Lfo= +github.com/ghostiam/protogetter v0.3.18/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= +github.com/go-critic/go-critic v0.14.3 h1:5R1qH2iFeo4I/RJU8vTezdqs08Egi4u5p6vOESA0pog= +github.com/go-critic/go-critic v0.14.3/go.mod h1:xwntfW6SYAd7h1OqDzmN6hBX/JxsEKl5up/Y2bsxgVQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -64,40 +186,172 @@ github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxE github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godoc-lint/godoc-lint v0.11.1 h1:z9as8Qjiy6miRIa3VRymTa+Gt2RLnGICVikcvlUVOaA= +github.com/godoc-lint/godoc-lint v0.11.1/go.mod h1:BAqayheFSuZrEAqCRxgw9MyvsM+S/hZwJbU1s/ejRj8= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= +github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarogrvjO9AfiW3B4U= +github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint/v2 v2.8.0 h1:wJnr3hJWY3eVzOUcfwbDc2qbi2RDEpvLmQeNFaPSNYA= +github.com/golangci/golangci-lint/v2 v2.8.0/go.mod h1:xl+HafQ9xoP8rzw0z5AwnO5kynxtb80e8u02Ej/47RI= +github.com/golangci/golines v0.14.0 h1:xt9d3RKBjhasA3qpoXs99J2xN2t6eBlpLHt0TrgyyXc= +github.com/golangci/golines v0.14.0/go.mod h1:gf555vPG2Ia7mmy2mzmhVQbVjuK8Orw0maR1G4vVAAQ= +github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= +github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= +github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2bRA5htgAG9r7s3tHsfjIhN98WshBTJ9jM= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= +github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= +github.com/gostaticanalysis/nilerr v0.1.2 h1:S6nk8a9N8g062nsx63kUkF6AzbHGw7zzyHMcpu52xQU= +github.com/gostaticanalysis/nilerr v0.1.2/go.mod h1:A19UHhoY3y8ahoL7YKz6sdjDtduwTSI4CsymaC2htPA= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jgautheron/goconst v1.8.2 h1:y0XF7X8CikZ93fSNT6WBTb/NElBu9IjaY7CCYQrCMX4= +github.com/jgautheron/goconst v1.8.2/go.mod h1:A0oxgBCHy55NQn6sYpO7UdnA9p+h7cPtoOZUmvNIako= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jjti/go-spancheck v0.6.5 h1:lmi7pKxa37oKYIMScialXUK6hP3iY5F1gu+mLBPgYB8= +github.com/jjti/go-spancheck v0.6.5/go.mod h1:aEogkeatBrbYsyW6y5TgDfihCulDYciL1B7rG2vSsrU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.2 h1:yfNQvP9YaGQR7VaWLYcfZUlRP2eo2vhExWKxD/fP6q0= +github.com/karamaru-alpha/copyloopvar v1.2.2/go.mod h1:oY4rGZqZ879JkJMtX3RRkcXRkmUvH0x35ykgaKgsgJY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= +github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= +github.com/kunwardeep/paralleltest v1.0.15 h1:ZMk4Qt306tHIgKISHWFJAO1IDQJLc6uDyJMLyncOb6w= +github.com/kunwardeep/paralleltest v1.0.15/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.5 h1:kv2ZGUVI6VwRfp/+bcQ6Nbx0ghFWcGIKInkG/oFn1aQ= +github.com/ldez/exptostd v0.4.5/go.mod h1:QRjHRMXJrCTIm9WxVNH6VW7oN7KrGSht69bIRwvdFsM= +github.com/ldez/gomoddirectives v0.8.0 h1:JqIuTtgvFC2RdH1s357vrE23WJF2cpDCPFgA/TWDGpk= +github.com/ldez/gomoddirectives v0.8.0/go.mod h1:jutzamvZR4XYJLr0d5Honycp4Gy6GEg2mS9+2YX3F1Q= +github.com/ldez/grignotin v0.10.1 h1:keYi9rYsgbvqAZGI1liek5c+jv9UUjbvdj3Tbn5fn4o= +github.com/ldez/grignotin v0.10.1/go.mod h1:UlDbXFCARrXbWGNGP3S5vsysNXAPhnSuBufpTEbwOas= +github.com/ldez/structtags v0.6.1 h1:bUooFLbXx41tW8SvkfwfFkkjPYvFFs59AAMgVg6DUBk= +github.com/ldez/structtags v0.6.1/go.mod h1:YDxVSgDy/MON6ariaxLF2X09bh19qL7MtGBN5MrvbdY= +github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= +github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= +github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= +github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= +github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0 h1:3mAIyaGRtjK6EO9E73JlXLtiy7ha80b2ZVGyacxgfww= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0/go.mod h1:z8dFSyXqp+fC6NLDSljRJeNQJJDWnY7RoWFzV3PC6UM= +github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= +github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= +github.com/maratori/testableexamples v1.0.1 h1:HfOQXs+XgfeRBJ+Wz0XfH+FHnoY9TVqL6Fcevpzy4q8= +github.com/maratori/testableexamples v1.0.1/go.mod h1:XE2F/nQs7B9N08JgyRmdGjYVGqxWwClLPCGSQhXQSrQ= +github.com/maratori/testpackage v1.1.2 h1:ffDSh+AgqluCLMXhM19f/cpvQAKygKAJXFl9aUjmbqs= +github.com/maratori/testpackage v1.1.2/go.mod h1:8F24GdVDFW5Ew43Et02jamrVMNXLUNaOynhDssITGfc= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -105,67 +359,197 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a h1:lz22938uOBlzTHjGpobGeVWkcxGu6fDQ7oZWheClTHE= -github.com/openshift/api v0.0.0-20251223163548-3f584b29ee4a/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= -github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8= -github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc/go.mod h1:cs9BwTu96sm2vQvy7r9rOiltgu90M6ju2qIHFG9WU+o= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.21.2 h1:khzWfm2/Br8ZemX8QM1pl72LwM+rMeW6VUbQ4rzh0Po= +github.com/nunnatsa/ginkgolinter v0.21.2/go.mod h1:GItSI5fw7mCGLPmkvGYrr1kEetZe7B593jcyOpyabsY= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/openshift/api v0.0.0-20260107103503-6d35063ca179 h1:5gMFMmuVLAcEnBAjNFql/8L2ZRPBDOxl7nmbjO5klvk= +github.com/openshift/api v0.0.0-20260107103503-6d35063ca179/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae h1:veyDeAOBVJun1KoOsTIRlD7Q5LwRR32kfS2IPjPXJKE= +github.com/openshift/client-go v0.0.0-20260105124352-f93a4291f9ae/go.mod h1:leoeMrUnO40DwByGl7we2l+h6HQq3Y6bHUa+DnmRl+8= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= +github.com/quasilyte/go-ruleguard v0.4.5/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= +github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= +github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= +github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= +github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= +github.com/securego/gosec/v2 v2.22.11 h1:tW+weM/hCM/GX3iaCV91d5I6hqaRT2TPsFM1+USPXwg= +github.com/securego/gosec/v2 v2.22.11/go.mod h1:KE4MW/eH0GLWztkbt4/7XpyH0zJBBnu7sYB4l6Wn7Mw= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= +github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg= +github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= +github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= +github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= +github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= +go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= +go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= +go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= +go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= +go.augendre.info/arangolint v0.3.1 h1:n2E6p8f+zfXSFLa2e2WqFPp4bfvcuRdd50y6cT65pSo= +go.augendre.info/arangolint v0.3.1/go.mod h1:6ZKzEzIZuBQwoSvlKT+qpUfIbBfFCE5gbAoTg0/117g= +go.augendre.info/fatcontext v0.9.0 h1:Gt5jGD4Zcj8CDMVzjOJITlSb9cEch54hjRRlN3qDojE= +go.augendre.info/fatcontext v0.9.0/go.mod h1:L94brOAT1OOUNue6ph/2HnwxoNlds9aXDF2FcUntbNw= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -179,45 +563,117 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -227,14 +683,20 @@ gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuB google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= @@ -247,12 +709,24 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE= -k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 h1:oV4uULAC2QPIdMQwjMaNIwykyhWhnhBwX40yd5h9u3U= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= +mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6yRGpLLPNfuGtU2Z0cPvFquGDYU= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.21.0 h1:I7nry5p8iDJbuRdYS7ez8MUvw7XVNPcIP5GkzzuXIIQ= +sigs.k8s.io/kustomize/api v0.21.0/go.mod h1:XGVQuR5n2pXKWbzXHweZU683pALGw/AMVO4zU4iS8SE= +sigs.k8s.io/kustomize/cmd/config v0.21.0 h1:ikLtzcNK9isBqSaXXhAg7LRCTNKdp70z5v/c4Y55DOw= +sigs.k8s.io/kustomize/cmd/config v0.21.0/go.mod h1:oxa6eRzeLWUcE7M3Rmio29Sfc4KpqGspHur3GjOYqNA= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 h1:CCIJK7z/xJOlkXOaDOcL2jprV53a/eloiL02wg7oJJs= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0/go.mod h1:qewGAExYZK9LbPPbnJMPK5HQ8nsdxRzpclIg0qslzDo= +sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= +sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= diff --git a/internal/pkg/config/validation.go b/internal/pkg/config/validation.go index 3b89d2924..b3d2695b7 100644 --- a/internal/pkg/config/validation.go +++ b/internal/pkg/config/validation.go @@ -4,8 +4,9 @@ import ( "fmt" "strings" - "github.com/stakater/Reloader/internal/pkg/workload" "k8s.io/apimachinery/pkg/labels" + + "github.com/stakater/Reloader/internal/pkg/workload" ) // ValidationError represents a configuration validation error. diff --git a/internal/pkg/config/validation_test.go b/internal/pkg/config/validation_test.go index ae495276a..52dc6f000 100644 --- a/internal/pkg/config/validation_test.go +++ b/internal/pkg/config/validation_test.go @@ -1,6 +1,7 @@ package config import ( + "errors" "strings" "testing" ) @@ -205,7 +206,8 @@ func TestConfig_Validate_MultipleErrors(t *testing.T) { t.Fatal("Validate() should return error for multiple invalid values") } - errs, ok := err.(ValidationErrors) + var errs ValidationErrors + ok := errors.As(err, &errs) if !ok { t.Fatalf("Expected ValidationErrors, got %T", err) } diff --git a/internal/pkg/controller/configmap_reconciler.go b/internal/pkg/controller/configmap_reconciler.go index 8aa19d5e9..04bd3bb3f 100644 --- a/internal/pkg/controller/configmap_reconciler.go +++ b/internal/pkg/controller/configmap_reconciler.go @@ -2,6 +2,12 @@ package controller import ( "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" @@ -9,11 +15,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // ConfigMapReconciler watches ConfigMaps and triggers workload reloads. diff --git a/internal/pkg/controller/deployment_reconciler.go b/internal/pkg/controller/deployment_reconciler.go index ff81e8121..ebc1b759a 100644 --- a/internal/pkg/controller/deployment_reconciler.go +++ b/internal/pkg/controller/deployment_reconciler.go @@ -4,13 +4,14 @@ import ( "context" "github.com/go-logr/logr" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/reload" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" ) // DeploymentReconciler reconciles Deployment objects to handle pause expiration. diff --git a/internal/pkg/controller/filter.go b/internal/pkg/controller/filter.go index c3a387b4e..a66279ff3 100644 --- a/internal/pkg/controller/filter.go +++ b/internal/pkg/controller/filter.go @@ -1,10 +1,11 @@ package controller import ( - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/reload" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" ) // BuildEventFilter combines a resource-specific predicate with common filters. diff --git a/internal/pkg/controller/filter_test.go b/internal/pkg/controller/filter_test.go index 0d551e643..be6eec361 100644 --- a/internal/pkg/controller/filter_test.go +++ b/internal/pkg/controller/filter_test.go @@ -3,10 +3,11 @@ package controller import ( "testing" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/stakater/Reloader/internal/pkg/config" ) func TestCreateEventPredicate_CreateEvent(t *testing.T) { diff --git a/internal/pkg/controller/handler.go b/internal/pkg/controller/handler.go index 46905e81e..062001846 100644 --- a/internal/pkg/controller/handler.go +++ b/internal/pkg/controller/handler.go @@ -5,14 +5,15 @@ import ( "time" "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/events" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) // ReloadHandler handles the common reload workflow. diff --git a/internal/pkg/controller/manager.go b/internal/pkg/controller/manager.go index 785da3dd8..b33b86a35 100644 --- a/internal/pkg/controller/manager.go +++ b/internal/pkg/controller/manager.go @@ -7,13 +7,6 @@ import ( argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/go-logr/logr" openshiftv1 "github.com/openshift/api/apps/v1" - "github.com/stakater/Reloader/internal/pkg/alerting" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/events" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/reload" - "github.com/stakater/Reloader/internal/pkg/webhook" - "github.com/stakater/Reloader/internal/pkg/workload" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -22,6 +15,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "github.com/stakater/Reloader/internal/pkg/alerting" + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/events" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/webhook" + "github.com/stakater/Reloader/internal/pkg/workload" ) var runtimeScheme = runtime.NewScheme() diff --git a/internal/pkg/controller/namespace_reconciler.go b/internal/pkg/controller/namespace_reconciler.go index 8cc03d63f..4e220fd5e 100644 --- a/internal/pkg/controller/namespace_reconciler.go +++ b/internal/pkg/controller/namespace_reconciler.go @@ -5,13 +5,14 @@ import ( "sync" "github.com/go-logr/logr" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/reload" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/reload" ) // NamespaceCache provides thread-safe access to the set of namespaces diff --git a/internal/pkg/controller/namespace_reconciler_test.go b/internal/pkg/controller/namespace_reconciler_test.go index 516b58161..604dca92a 100644 --- a/internal/pkg/controller/namespace_reconciler_test.go +++ b/internal/pkg/controller/namespace_reconciler_test.go @@ -3,10 +3,11 @@ package controller_test import ( "testing" + "k8s.io/apimachinery/pkg/labels" + "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/testutil" - "k8s.io/apimachinery/pkg/labels" ) func TestNamespaceCache_Basic(t *testing.T) { diff --git a/internal/pkg/controller/resource_reconciler.go b/internal/pkg/controller/resource_reconciler.go index 6dd42c1f2..7bfdd8057 100644 --- a/internal/pkg/controller/resource_reconciler.go +++ b/internal/pkg/controller/resource_reconciler.go @@ -6,6 +6,11 @@ import ( "time" "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" @@ -13,10 +18,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) // ResourceReconcilerDeps holds shared dependencies for resource reconcilers. @@ -197,6 +198,6 @@ func (r *ResourceReconciler[T]) SetupWithManager(mgr ctrl.Manager, forObject T) r.CreatePredicates(r.Config, r.ReloadService.Hasher()), r.Config, r.Initialized(), ), - ). + ). Complete(r) } diff --git a/internal/pkg/controller/retry.go b/internal/pkg/controller/retry.go index e71dfa7f8..ffb30615e 100644 --- a/internal/pkg/controller/retry.go +++ b/internal/pkg/controller/retry.go @@ -3,11 +3,12 @@ package controller import ( "context" - "github.com/stakater/Reloader/internal/pkg/reload" - "github.com/stakater/Reloader/internal/pkg/workload" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/workload" ) // UpdateObjectWithRetry updates a Kubernetes object with retry on conflict. diff --git a/internal/pkg/controller/retry_test.go b/internal/pkg/controller/retry_test.go index 8e43e0e21..ff33c0b55 100644 --- a/internal/pkg/controller/retry_test.go +++ b/internal/pkg/controller/retry_test.go @@ -5,11 +5,6 @@ import ( "testing" "github.com/go-logr/logr/testr" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/controller" - "github.com/stakater/Reloader/internal/pkg/reload" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -17,6 +12,12 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/controller" + "github.com/stakater/Reloader/internal/pkg/reload" + "github.com/stakater/Reloader/internal/pkg/testutil" + "github.com/stakater/Reloader/internal/pkg/workload" ) func TestUpdateWorkloadWithRetry_WorkloadTypes(t *testing.T) { diff --git a/internal/pkg/controller/secret_reconciler.go b/internal/pkg/controller/secret_reconciler.go index ddc8f328b..b50c75476 100644 --- a/internal/pkg/controller/secret_reconciler.go +++ b/internal/pkg/controller/secret_reconciler.go @@ -2,6 +2,12 @@ package controller import ( "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/events" @@ -9,11 +15,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/reload" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // SecretReconciler watches Secrets and triggers workload reloads. diff --git a/internal/pkg/controller/test_helpers_test.go b/internal/pkg/controller/test_helpers_test.go index c34f34329..2b0f9e75b 100644 --- a/internal/pkg/controller/test_helpers_test.go +++ b/internal/pkg/controller/test_helpers_test.go @@ -6,6 +6,12 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/logr/testr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/stakater/Reloader/internal/pkg/alerting" "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/controller" @@ -15,11 +21,6 @@ import ( "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/webhook" "github.com/stakater/Reloader/internal/pkg/workload" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) // testDeps holds shared test dependencies. diff --git a/internal/pkg/metadata/metadata.go b/internal/pkg/metadata/metadata.go index 0e1327618..df306af4e 100644 --- a/internal/pkg/metadata/metadata.go +++ b/internal/pkg/metadata/metadata.go @@ -8,9 +8,10 @@ import ( "runtime" "time" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/config" ) const ( diff --git a/internal/pkg/metadata/metadata_test.go b/internal/pkg/metadata/metadata_test.go index fd87d1d65..52c5f1997 100644 --- a/internal/pkg/metadata/metadata_test.go +++ b/internal/pkg/metadata/metadata_test.go @@ -6,11 +6,12 @@ import ( "testing" "github.com/go-logr/logr" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stakater/Reloader/internal/pkg/config" ) // testLogger returns a no-op logger for testing. diff --git a/internal/pkg/metadata/publisher.go b/internal/pkg/metadata/publisher.go index 385dd270f..6c6a42221 100644 --- a/internal/pkg/metadata/publisher.go +++ b/internal/pkg/metadata/publisher.go @@ -6,11 +6,12 @@ import ( "os" "github.com/go-logr/logr" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/workload" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" ) // Publisher handles creating and updating the metadata ConfigMap. diff --git a/internal/pkg/reload/decision_test.go b/internal/pkg/reload/decision_test.go index fdc011a6b..5b7a6135e 100644 --- a/internal/pkg/reload/decision_test.go +++ b/internal/pkg/reload/decision_test.go @@ -3,9 +3,10 @@ package reload import ( "testing" - "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/workload" ) func TestFilterDecisions(t *testing.T) { diff --git a/internal/pkg/reload/pause.go b/internal/pkg/reload/pause.go index 78194e781..e995dc33c 100644 --- a/internal/pkg/reload/pause.go +++ b/internal/pkg/reload/pause.go @@ -4,9 +4,10 @@ import ( "fmt" "time" + appsv1 "k8s.io/api/apps/v1" + "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/workload" - appsv1 "k8s.io/api/apps/v1" ) // PauseHandler handles pause deployment logic. diff --git a/internal/pkg/reload/pause_test.go b/internal/pkg/reload/pause_test.go index 49fea4a51..1962194d1 100644 --- a/internal/pkg/reload/pause_test.go +++ b/internal/pkg/reload/pause_test.go @@ -4,10 +4,11 @@ import ( "testing" "time" - "github.com/stakater/Reloader/internal/pkg/config" - "github.com/stakater/Reloader/internal/pkg/workload" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/config" + "github.com/stakater/Reloader/internal/pkg/workload" ) func TestPauseHandler_ShouldPause(t *testing.T) { diff --git a/internal/pkg/reload/predicate.go b/internal/pkg/reload/predicate.go index 0c913f87c..c866364aa 100644 --- a/internal/pkg/reload/predicate.go +++ b/internal/pkg/reload/predicate.go @@ -1,11 +1,12 @@ package reload import ( - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/stakater/Reloader/internal/pkg/config" ) // resourcePredicates returns predicates for filtering resource events. diff --git a/internal/pkg/reload/predicate_test.go b/internal/pkg/reload/predicate_test.go index 1ccb0475a..b6d48340c 100644 --- a/internal/pkg/reload/predicate_test.go +++ b/internal/pkg/reload/predicate_test.go @@ -3,11 +3,12 @@ package reload import ( "testing" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/stakater/Reloader/internal/pkg/config" ) func TestNamespaceFilterPredicate_Create(t *testing.T) { diff --git a/internal/pkg/reload/service.go b/internal/pkg/reload/service.go index ae2e85f26..076cf786e 100644 --- a/internal/pkg/reload/service.go +++ b/internal/pkg/reload/service.go @@ -7,9 +7,10 @@ import ( "time" "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/workload" - corev1 "k8s.io/api/core/v1" ) // Service orchestrates the reload logic for ConfigMaps and Secrets. diff --git a/internal/pkg/reload/service_test.go b/internal/pkg/reload/service_test.go index 5a13f0252..24356b1e6 100644 --- a/internal/pkg/reload/service_test.go +++ b/internal/pkg/reload/service_test.go @@ -5,11 +5,12 @@ import ( "testing" "github.com/go-logr/logr/testr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stakater/Reloader/internal/pkg/config" "github.com/stakater/Reloader/internal/pkg/testutil" "github.com/stakater/Reloader/internal/pkg/workload" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestService_ProcessConfigMap_AutoReload(t *testing.T) { diff --git a/internal/pkg/reload/strategy.go b/internal/pkg/reload/strategy.go index 8912e1cb7..4386bf4b6 100644 --- a/internal/pkg/reload/strategy.go +++ b/internal/pkg/reload/strategy.go @@ -7,8 +7,9 @@ import ( "strings" "time" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/config" ) const ( diff --git a/internal/pkg/reload/strategy_test.go b/internal/pkg/reload/strategy_test.go index 41c54538d..3ea4f2458 100644 --- a/internal/pkg/reload/strategy_test.go +++ b/internal/pkg/reload/strategy_test.go @@ -4,8 +4,9 @@ import ( "encoding/json" "testing" - "github.com/stakater/Reloader/internal/pkg/config" corev1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/config" ) func TestEnvVarStrategy_Apply(t *testing.T) { diff --git a/internal/pkg/webhook/webhook.go b/internal/pkg/webhook/webhook.go index ea2507325..3653b22e2 100644 --- a/internal/pkg/webhook/webhook.go +++ b/internal/pkg/webhook/webhook.go @@ -11,6 +11,7 @@ import ( "time" "github.com/go-logr/logr" + httputil "github.com/stakater/Reloader/internal/pkg/http" ) diff --git a/internal/pkg/workload/base.go b/internal/pkg/workload/base.go index 71576b0ca..e8479bfb4 100644 --- a/internal/pkg/workload/base.go +++ b/internal/pkg/workload/base.go @@ -162,6 +162,7 @@ func (b *BaseWorkload[T]) Update(ctx context.Context, c client.Client) error { // ResetOriginal resets the original state to the current object state. func (b *BaseWorkload[T]) ResetOriginal() { + //nolint:errcheck // Type assertion is safe: DeepCopyObject returns same type T b.original = b.object.DeepCopyObject().(T) }