diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index c428826ea..df52b31e7 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -20,7 +20,7 @@ env: DOCKER_FILE_PATH: Dockerfile DOCKER_UBI_FILE_PATH: Dockerfile.ubi KUBERNETES_VERSION: "1.30.0" - KIND_VERSION: "0.23.0" + KIND_VERSION: "0.31.0" REGISTRY: ghcr.io jobs: @@ -102,15 +102,15 @@ jobs: kind version kind version | grep -q ${KIND_VERSION} - - name: Create Kind Cluster - run: | - kind create cluster - kubectl cluster-info - + - name: Create Kind Cluster and Setup E2E Dependencies + run: KIND_CLUSTER=kind make e2e-setup - - name: Test + - name: Run unit tests run: make test + - name: Run E2E tests + run: KIND_CLUSTER=kind make e2e + - name: Run quick A/B load tests uses: ./.github/actions/loadtest with: diff --git a/.gitignore b/.gitignore index 3f28c3f59..b3827fffc 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ test/loadtest/results test/loadtest/loadtest # Temporary NFS files .nfs* +*.test diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..8644bc04f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,74 @@ +version: "2" + +run: + go: "1.25" + timeout: 5m + allow-parallel-runners: true + +linters: + default: none + enable: + # Core linters + - errcheck + - govet + - staticcheck + - ineffassign + - unused + + # Code quality + - revive + - misspell + - unconvert + - unparam + - nakedret + - copyloopvar + + # Bug prevention + - bodyclose + - durationcheck + - errorlint + + # Test framework + - ginkgolinter + + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + + govet: + enable-all: true + disable: + - shadow + - fieldalignment + + errcheck: + check-type-assertions: true + exclude-functions: + - (io.Closer).Close + - (*os.File).Close + + nakedret: + max-func-lines: 30 + + exclusions: + generated: lax + rules: + - linters: + - errcheck + path: _test\.go + paths: + - third_party$ + - vendor$ + +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/stakater/Reloader + exclusions: + generated: lax diff --git a/Makefile b/Makefile index 8c0aed8fa..6b29d1cc5 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,9 @@ DOCKER_IMAGE ?= ghcr.io/stakater/reloader # Default value "dev" VERSION ?= 0.0.1 +# Full image reference (used for docker-build) +IMG ?= $(DOCKER_IMAGE):v$(VERSION) + REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION} REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH} BUILD= @@ -34,14 +37,12 @@ KUBECTL ?= kubectl KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) YQ ?= $(LOCALBIN)/yq ## Tool Versions KUSTOMIZE_VERSION ?= v5.3.0 CONTROLLER_TOOLS_VERSION ?= v0.14.0 ENVTEST_VERSION ?= release-0.17 -GOLANGCI_LINT_VERSION ?= v2.6.1 YQ_VERSION ?= v4.27.5 YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)" @@ -72,10 +73,6 @@ envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. $(ENVTEST): $(LOCALBIN) $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) -.PHONY: golangci-lint -golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. -$(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist # $1 - target path with name of binary (ideally with version) @@ -102,8 +99,12 @@ run: build: "$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}" -lint: golangci-lint ## Run golangci-lint on the codebase - $(GOLANGCI_LINT) run ./... +lint: ## Run golangci-lint on the codebase + go tool golangci-lint run ./... + +fmt: ## Format all Go files + go tool goimports -w -local github.com/stakater/Reloader . + gofmt -w . build-image: docker buildx build \ @@ -140,7 +141,48 @@ manifest: docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH) test: - "$(GOCMD)" test -timeout 1800s -v ./... + "$(GOCMD)" test -timeout 1800s -v -short -count=1 ./internal/... ./test/e2e/utils/... + +##@ E2E Tests + +E2E_IMG ?= ghcr.io/stakater/reloader:test +E2E_TIMEOUT ?= 45m +KIND_CLUSTER ?= reloader-e2e +CONTAINER_RUNTIME ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null) + +.PHONY: e2e-setup +e2e-setup: ## One-time setup: create Kind cluster and install dependencies (Argo, CSI, Vault) + @if kind get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER)$$"; then \ + echo "Kind cluster $(KIND_CLUSTER) already exists"; \ + else \ + echo "Creating Kind cluster $(KIND_CLUSTER)..."; \ + kind create cluster --name $(KIND_CLUSTER); \ + fi + ./scripts/e2e-cluster-setup.sh + +.PHONY: e2e +e2e: ## Run e2e tests (builds image, loads to Kind, runs tests in parallel) + $(CONTAINER_RUNTIME) build -t $(E2E_IMG) -f Dockerfile . +ifeq ($(notdir $(CONTAINER_RUNTIME)),podman) + $(CONTAINER_RUNTIME) save $(E2E_IMG) -o /tmp/reloader-e2e.tar + kind load image-archive /tmp/reloader-e2e.tar --name $(KIND_CLUSTER) + rm -f /tmp/reloader-e2e.tar +else + kind load docker-image $(E2E_IMG) --name $(KIND_CLUSTER) +endif + SKIP_BUILD=true RELOADER_IMAGE=$(E2E_IMG) "$(GOCMD)" tool ginkgo --keep-going -v --timeout=$(E2E_TIMEOUT) ./test/e2e/... + +.PHONY: e2e-cleanup +e2e-cleanup: ## Cleanup: remove test resources and delete Kind cluster + ./scripts/e2e-cluster-cleanup.sh + kind delete cluster --name $(KIND_CLUSTER) + +.PHONY: e2e-ci +e2e-ci: e2e-setup e2e e2e-cleanup ## CI pipeline: setup, run tests, cleanup + +.PHONY: docker-build +docker-build: ## Build Docker image + $(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile . stop: @docker stop "${BINARY}" diff --git a/go.mod b/go.mod index 48f13d8e4..9c2545fd6 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,10 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 - github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86 - github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc + github.com/onsi/ginkgo/v2 v2.27.4 + github.com/onsi/gomega v1.39.0 + github.com/openshift/api v0.0.0-20260109135506-3920bba77f16 + github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 github.com/parnurzeal/gorequest v0.3.0 github.com/prometheus/client_golang v1.23.2 github.com/sirupsen/logrus v1.9.3 @@ -15,81 +17,274 @@ require ( k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.35.0 k8s.io/kubectl v0.35.0 - k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 + k8s.io/utils v0.0.0-20260108192941-914a6e750570 sigs.k8s.io/secrets-store-csi-driver v1.5.5 ) require ( + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + codeberg.org/chavacava/garif v0.2.0 // indirect + codeberg.org/polyfloyd/go-errorlint v1.9.0 // indirect + dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect + dev.gaijin.team/go/golib v0.6.0 // indirect + github.com/4meepo/tagalign v1.4.3 // indirect + github.com/Abirdcfly/dupword v0.1.7 // indirect + github.com/AdminBenni/iota-mixing v1.0.0 // indirect + github.com/AlwxSin/noinlineerr v1.0.5 // indirect + github.com/Antonboom/errname v1.1.1 // indirect + github.com/Antonboom/nilnil v1.1.1 // indirect + github.com/Antonboom/testifylint v1.6.4 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect + github.com/Djarvur/go-err113 v0.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/MirrexOne/unqueryvet v1.4.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect + github.com/alecthomas/chroma/v2 v2.21.1 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.6 // indirect + github.com/alexkohler/prealloc v1.0.1 // indirect + github.com/alfatraining/structtag v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.2.0 // indirect + github.com/ashanbrown/forbidigo/v2 v2.3.0 // indirect + github.com/ashanbrown/makezero/v2 v2.1.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.7.0 // indirect + github.com/bombsimon/wsl/v5 v5.3.0 // indirect + github.com/breml/bidichk v0.3.3 // indirect + github.com/breml/errchkjson v0.4.1 // indirect + github.com/butuzov/ireturn v0.4.0 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/catenacyber/perfsprint v0.10.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/charithe/durationcheck v0.0.11 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/ckaznocha/intrange v0.3.1 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.7 // indirect + github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/ettle/strcase v0.2.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.6 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/ghostiam/protogetter v0.3.18 // indirect + github.com/go-critic/go-critic v0.14.3 // indirect github.com/go-logr/logr v1.4.3 // indirect - github.com/go-openapi/jsonpointer v0.21.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godoc-lint/godoc-lint v0.11.1 // indirect + github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect + github.com/golangci/asciicheck v0.5.0 // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect + github.com/golangci/go-printf-func-name v0.1.1 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect + github.com/golangci/golangci-lint/v2 v2.8.0 // indirect + github.com/golangci/golines v0.14.0 // indirect + github.com/golangci/misspell v0.7.0 // indirect + github.com/golangci/plugin-module-register v0.1.2 // indirect + github.com/golangci/revgrep v0.8.0 // indirect + github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect + github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20260106004452-d7df1bf2cac7 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gordonklaus/ineffassign v0.2.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect + github.com/jgautheron/goconst v1.8.2 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jjti/go-spancheck v0.6.5 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.2 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect + github.com/kulti/thelper v0.7.1 // indirect + github.com/kunwardeep/paralleltest v1.0.15 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.5 // indirect + github.com/ldez/gomoddirectives v0.8.0 // indirect + github.com/ldez/grignotin v0.10.1 // indirect + github.com/ldez/structtags v0.6.1 // indirect + github.com/ldez/tagliatelle v0.7.2 // indirect + github.com/ldez/usetesting v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.2 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/macabu/inamedparam v0.2.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect + github.com/manuelarte/funcorder v0.5.0 // indirect + github.com/maratori/testableexamples v1.0.1 // indirect + github.com/maratori/testpackage v1.1.2 // indirect + github.com/matoous/godox v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mgechev/revive v1.13.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/moricho/tparallel v0.3.2 // indirect github.com/moul/http2curl v1.0.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.21.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/quasilyte/go-ruleguard v0.4.5 // indirect + github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ryancurrah/gomodguard v1.4.1 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect + github.com/securego/gosec/v2 v2.22.11 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect github.com/smartystreets/goconvey v1.7.2 // indirect - github.com/spf13/pflag v1.0.9 // indirect + github.com/sonatard/noctx v0.4.0 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.12.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.3.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + github.com/tetafro/godot v1.5.4 // indirect + github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect + github.com/timonwong/loggercheck v0.11.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.12.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.4.1 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xen0n/gosmopolitan v1.3.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.3.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + gitlab.com/bosi/decorder v0.4.2 // indirect + go-simpler.org/musttag v0.14.0 // indirect + go-simpler.org/sloglint v0.11.1 // indirect + go.augendre.info/arangolint v0.3.1 // indirect + go.augendre.info/fatcontext v0.9.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect - golang.org/x/time v0.11.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.40.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.6.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect + mvdan.cc/gofumpt v0.9.2 // indirect + mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) +tool ( + github.com/golangci/golangci-lint/v2/cmd/golangci-lint + github.com/onsi/ginkgo/v2/ginkgo + golang.org/x/tools/cmd/goimports +) + // Replacements for argo-rollouts replace ( - github.com/go-check/check => github.com/go-check/check v0.0.0-20201130134442-10cb98267c6c k8s.io/api v0.0.0 => k8s.io/api v0.35.0 k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.0.0 => k8s.io/client-go v0.35.0 - k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2 - k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2 - k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0 - k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2 - k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2 - k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2 - k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2 - k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2 - k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.35.0 - k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2 - k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2 - k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0 - k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2 - k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2 - k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2 + k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.35.0 + k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.35.0 ) diff --git a/go.sum b/go.sum index a1b7e7d15..1deb90ec4 100644 --- a/go.sum +++ b/go.sum @@ -1,125 +1,584 @@ +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= +codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= +codeberg.org/polyfloyd/go-errorlint v1.9.0 h1:VkdEEmA1VBpH6ecQoMR4LdphVI3fA4RrCh2an7YmodI= +codeberg.org/polyfloyd/go-errorlint v1.9.0/go.mod h1:GPRRu2LzVijNn4YkrZYJfatQIdS+TrcK8rL5Xs24qw8= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= +dev.gaijin.team/go/golib v0.6.0 h1:v6nnznFTs4bppib/NyU1PQxobwDHwCXXl15P7DV5Zgo= +dev.gaijin.team/go/golib v0.6.0/go.mod h1:uY1mShx8Z/aNHWDyAkZTkX+uCi5PdX7KsG1eDQa2AVE= +github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= +github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= +github.com/Abirdcfly/dupword v0.1.7 h1:2j8sInznrje4I0CMisSL6ipEBkeJUJAmK1/lfoNGWrQ= +github.com/Abirdcfly/dupword v0.1.7/go.mod h1:K0DkBeOebJ4VyOICFdppB23Q0YMOgVafM0zYW0n9lF4= +github.com/AdminBenni/iota-mixing v1.0.0 h1:Os6lpjG2dp/AE5fYBPAA1zfa2qMdCAWwPMCgpwKq7wo= +github.com/AdminBenni/iota-mixing v1.0.0/go.mod h1:i4+tpAaB+qMVIV9OK3m4/DAynOd5bQFaOu+2AhtBCNY= +github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= +github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= +github.com/Antonboom/errname v1.1.1 h1:bllB7mlIbTVzO9jmSWVWLjxTEbGBVQ1Ff/ClQgtPw9Q= +github.com/Antonboom/errname v1.1.1/go.mod h1:gjhe24xoxXp0ScLtHzjiXp0Exi1RFLKJb0bVBtWKCWQ= +github.com/Antonboom/nilnil v1.1.1 h1:9Mdr6BYd8WHCDngQnNVV0b554xyisFioEKi30sksufQ= +github.com/Antonboom/nilnil v1.1.1/go.mod h1:yCyAmSw3doopbOWhJlVci+HuyNRuHJKIv6V2oYQa8II= +github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS87UiqmQ= +github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g= +github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/MirrexOne/unqueryvet v1.4.0 h1:6KAkqqW2KUnkl9Z0VuTphC3IXRPoFqEkJEtyxxHj5eQ= +github.com/MirrexOne/unqueryvet v1.4.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.21.1 h1:FaSDrp6N+3pphkNKU6HPCiYLgm8dbe5UXIXcoBhZSWA= +github.com/alecthomas/chroma/v2 v2.21.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= +github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= +github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= +github.com/alexkohler/prealloc v1.0.1 h1:A9P1haqowqUxWvU9nk6tQ7YktXIHf+LQM9wPRhuteEE= +github.com/alexkohler/prealloc v1.0.1/go.mod h1:fT39Jge3bQrfA7nPMDngUfvUbQGQeJyGQnR+913SCig= +github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= +github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= +github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA= github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ashanbrown/forbidigo/v2 v2.3.0 h1:OZZDOchCgsX5gvToVtEBoV2UWbFfI6RKQTir2UZzSxo= +github.com/ashanbrown/forbidigo/v2 v2.3.0/go.mod h1:5p6VmsG5/1xx3E785W9fouMxIOkvY2rRV9nMdWadd6c= +github.com/ashanbrown/makezero/v2 v2.1.0 h1:snuKYMbqosNokUKm+R6/+vOPs8yVAi46La7Ck6QYSaE= +github.com/ashanbrown/makezero/v2 v2.1.0/go.mod h1:aEGT/9q3S8DHeE57C88z2a6xydvgx8J5hgXIGWgo0MY= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= +github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= +github.com/bombsimon/wsl/v5 v5.3.0 h1:nZWREJFL6U3vgW/B1lfDOigl+tEF6qgs6dGGbFeR0UM= +github.com/bombsimon/wsl/v5 v5.3.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= +github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= +github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= +github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= +github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= +github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E= +github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= +github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.11 h1:g1/EX1eIiKS57NTWsYtHDZ/APfeXKhye1DidBcABctk= +github.com/charithe/durationcheck v0.0.11/go.mod h1:x5iZaixRNl8ctbM+3B2RrPG5t856TxRyVQEnbIEM2X4= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= +github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= +github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= +github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= +github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= +github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0= github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.6 h1:vmiBcKV/3EqKY3ZiPxCINmpS431OcE1S47AQUwhrg8E= +github.com/firefart/nonamedreturns v1.0.6/go.mod h1:R8NisJnSIpvPWheCq0mNRXJok6D8h7fagJTF8EMEwCo= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghostiam/protogetter v0.3.18 h1:yEpghRGtP9PjKvVXtEzGpYfQj1Wl/ZehAfU6fr62Lfo= +github.com/ghostiam/protogetter v0.3.18/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= +github.com/go-critic/go-critic v0.14.3 h1:5R1qH2iFeo4I/RJU8vTezdqs08Egi4u5p6vOESA0pog= +github.com/go-critic/go-critic v0.14.3/go.mod h1:xwntfW6SYAd7h1OqDzmN6hBX/JxsEKl5up/Y2bsxgVQ= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= -github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/godoc-lint/godoc-lint v0.11.1 h1:z9as8Qjiy6miRIa3VRymTa+Gt2RLnGICVikcvlUVOaA= +github.com/godoc-lint/godoc-lint v0.11.1/go.mod h1:BAqayheFSuZrEAqCRxgw9MyvsM+S/hZwJbU1s/ejRj8= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= +github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarogrvjO9AfiW3B4U= +github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint/v2 v2.8.0 h1:wJnr3hJWY3eVzOUcfwbDc2qbi2RDEpvLmQeNFaPSNYA= +github.com/golangci/golangci-lint/v2 v2.8.0/go.mod h1:xl+HafQ9xoP8rzw0z5AwnO5kynxtb80e8u02Ej/47RI= +github.com/golangci/golines v0.14.0 h1:xt9d3RKBjhasA3qpoXs99J2xN2t6eBlpLHt0TrgyyXc= +github.com/golangci/golines v0.14.0/go.mod h1:gf555vPG2Ia7mmy2mzmhVQbVjuK8Orw0maR1G4vVAAQ= +github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= +github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= +github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2bRA5htgAG9r7s3tHsfjIhN98WshBTJ9jM= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20260106004452-d7df1bf2cac7 h1:kmPAX+IJBcUAFTddx2+xC0H7sk2U9ijIIxZLLrPLNng= +github.com/google/pprof v0.0.0-20260106004452-d7df1bf2cac7/go.mod h1:67FPmZWbr+KDT/VlpWtw6sO9XSjpJmLuHpoLmWiTGgY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= +github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= +github.com/gostaticanalysis/nilerr v0.1.2 h1:S6nk8a9N8g062nsx63kUkF6AzbHGw7zzyHMcpu52xQU= +github.com/gostaticanalysis/nilerr v0.1.2/go.mod h1:A19UHhoY3y8ahoL7YKz6sdjDtduwTSI4CsymaC2htPA= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jgautheron/goconst v1.8.2 h1:y0XF7X8CikZ93fSNT6WBTb/NElBu9IjaY7CCYQrCMX4= +github.com/jgautheron/goconst v1.8.2/go.mod h1:A0oxgBCHy55NQn6sYpO7UdnA9p+h7cPtoOZUmvNIako= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jjti/go-spancheck v0.6.5 h1:lmi7pKxa37oKYIMScialXUK6hP3iY5F1gu+mLBPgYB8= +github.com/jjti/go-spancheck v0.6.5/go.mod h1:aEogkeatBrbYsyW6y5TgDfihCulDYciL1B7rG2vSsrU= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.2 h1:yfNQvP9YaGQR7VaWLYcfZUlRP2eo2vhExWKxD/fP6q0= +github.com/karamaru-alpha/copyloopvar v1.2.2/go.mod h1:oY4rGZqZ879JkJMtX3RRkcXRkmUvH0x35ykgaKgsgJY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= +github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= +github.com/kunwardeep/paralleltest v1.0.15 h1:ZMk4Qt306tHIgKISHWFJAO1IDQJLc6uDyJMLyncOb6w= +github.com/kunwardeep/paralleltest v1.0.15/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.5 h1:kv2ZGUVI6VwRfp/+bcQ6Nbx0ghFWcGIKInkG/oFn1aQ= +github.com/ldez/exptostd v0.4.5/go.mod h1:QRjHRMXJrCTIm9WxVNH6VW7oN7KrGSht69bIRwvdFsM= +github.com/ldez/gomoddirectives v0.8.0 h1:JqIuTtgvFC2RdH1s357vrE23WJF2cpDCPFgA/TWDGpk= +github.com/ldez/gomoddirectives v0.8.0/go.mod h1:jutzamvZR4XYJLr0d5Honycp4Gy6GEg2mS9+2YX3F1Q= +github.com/ldez/grignotin v0.10.1 h1:keYi9rYsgbvqAZGI1liek5c+jv9UUjbvdj3Tbn5fn4o= +github.com/ldez/grignotin v0.10.1/go.mod h1:UlDbXFCARrXbWGNGP3S5vsysNXAPhnSuBufpTEbwOas= +github.com/ldez/structtags v0.6.1 h1:bUooFLbXx41tW8SvkfwfFkkjPYvFFs59AAMgVg6DUBk= +github.com/ldez/structtags v0.6.1/go.mod h1:YDxVSgDy/MON6ariaxLF2X09bh19qL7MtGBN5MrvbdY= +github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= +github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= +github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= +github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= +github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0 h1:3mAIyaGRtjK6EO9E73JlXLtiy7ha80b2ZVGyacxgfww= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0/go.mod h1:z8dFSyXqp+fC6NLDSljRJeNQJJDWnY7RoWFzV3PC6UM= +github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= +github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= +github.com/maratori/testableexamples v1.0.1 h1:HfOQXs+XgfeRBJ+Wz0XfH+FHnoY9TVqL6Fcevpzy4q8= +github.com/maratori/testableexamples v1.0.1/go.mod h1:XE2F/nQs7B9N08JgyRmdGjYVGqxWwClLPCGSQhXQSrQ= +github.com/maratori/testpackage v1.1.2 h1:ffDSh+AgqluCLMXhM19f/cpvQAKygKAJXFl9aUjmbqs= +github.com/maratori/testpackage v1.1.2/go.mod h1:8F24GdVDFW5Ew43Et02jamrVMNXLUNaOynhDssITGfc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86 h1:Vsqg+WqSA91LjrwK5lzkSCjztK/B+T8MPKI3MIALx3w= -github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= -github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8= -github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc/go.mod h1:cs9BwTu96sm2vQvy7r9rOiltgu90M6ju2qIHFG9WU+o= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.21.2 h1:khzWfm2/Br8ZemX8QM1pl72LwM+rMeW6VUbQ4rzh0Po= +github.com/nunnatsa/ginkgolinter v0.21.2/go.mod h1:GItSI5fw7mCGLPmkvGYrr1kEetZe7B593jcyOpyabsY= +github.com/onsi/ginkgo/v2 v2.27.4 h1:fcEcQW/A++6aZAZQNUmNjvA9PSOzefMJBerHJ4t8v8Y= +github.com/onsi/ginkgo/v2 v2.27.4/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q= +github.com/onsi/gomega v1.39.0/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/openshift/api v0.0.0-20260109135506-3920bba77f16 h1:EfTfmlNBtG/xauH9gcnq64J08nYTBKyilbl/EUbxGno= +github.com/openshift/api v0.0.0-20260109135506-3920bba77f16/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 h1:6rd4zSo2UaWQcAPZfHK9yzKVqH0BnMv1hqMzqXZyTds= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13/go.mod h1:YvOmPmV7wcJxpfhTDuFqqs2Xpb3M3ovsM6Qs/i2ptq4= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI= github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= +github.com/quasilyte/go-ruleguard v0.4.5/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= +github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= +github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= +github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= +github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= +github.com/securego/gosec/v2 v2.22.11 h1:tW+weM/hCM/GX3iaCV91d5I6hqaRT2TPsFM1+USPXwg= +github.com/securego/gosec/v2 v2.22.11/go.mod h1:KE4MW/eH0GLWztkbt4/7XpyH0zJBBnu7sYB4l6Wn7Mw= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= +github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg= +github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= +github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= +github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= +github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= +go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= +go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= +go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= +go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= +go.augendre.info/arangolint v0.3.1 h1:n2E6p8f+zfXSFLa2e2WqFPp4bfvcuRdd50y6cT65pSo= +go.augendre.info/arangolint v0.3.1/go.mod h1:6ZKzEzIZuBQwoSvlKT+qpUfIbBfFCE5gbAoTg0/117g= +go.augendre.info/fatcontext v0.9.0 h1:Gt5jGD4Zcj8CDMVzjOJITlSb9cEch54hjRRlN3qDojE= +go.augendre.info/fatcontext v0.9.0/go.mod h1:L94brOAT1OOUNue6ph/2HnwxoNlds9aXDF2FcUntbNw= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -127,51 +586,126 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE= +golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -179,9 +713,16 @@ gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnf gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= @@ -190,19 +731,23 @@ k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= -k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE= -k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY= +k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= +mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI= +mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6yRGpLLPNfuGtU2Z0cPvFquGDYU= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/secrets-store-csi-driver v1.5.5 h1:LJDpDL5TILhlP68nGvtGSlJFxSDgAD2m148NT0Ts7os= sigs.k8s.io/secrets-store-csi-driver v1.5.5/go.mod h1:i2WqLicYH00hrTG3JAzICPMF4HL4KMEORlDt9UQoZLk= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/pkg/app/app.go b/internal/pkg/app/app.go index 8d09188fc..734fd2a9c 100644 --- a/internal/pkg/app/app.go +++ b/internal/pkg/app/app.go @@ -4,6 +4,6 @@ import "github.com/stakater/Reloader/internal/pkg/cmd" // Run runs the command func Run() error { - cmd := cmd.NewReloaderCommand() - return cmd.Execute() + rootCmd := cmd.NewReloaderCommand() + return rootCmd.Execute() } diff --git a/internal/pkg/callbacks/rolling_upgrade.go b/internal/pkg/callbacks/rolling_upgrade.go index 13e5a63cd..3a0551405 100644 --- a/internal/pkg/callbacks/rolling_upgrade.go +++ b/internal/pkg/callbacks/rolling_upgrade.go @@ -7,8 +7,6 @@ import ( "time" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" @@ -16,6 +14,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" patchtypes "k8s.io/apimachinery/pkg/types" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/kube" + "maps" argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -265,158 +266,254 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object { // GetDeploymentAnnotations returns the annotations of given deployment func GetDeploymentAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Annotations == nil { - item.(*appsv1.Deployment).Annotations = make(map[string]string) + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + if deployment.Annotations == nil { + deployment.Annotations = make(map[string]string) } - return item.(*appsv1.Deployment).Annotations + return deployment.Annotations } // GetCronJobAnnotations returns the annotations of given cronjob func GetCronJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Annotations == nil { - item.(*batchv1.CronJob).Annotations = make(map[string]string) + cronJob, ok := item.(*batchv1.CronJob) + if !ok { + return nil } - return item.(*batchv1.CronJob).Annotations + if cronJob.Annotations == nil { + cronJob.Annotations = make(map[string]string) + } + return cronJob.Annotations } // GetJobAnnotations returns the annotations of given job func GetJobAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Annotations == nil { - item.(*batchv1.Job).Annotations = make(map[string]string) + job, ok := item.(*batchv1.Job) + if !ok { + return nil + } + if job.Annotations == nil { + job.Annotations = make(map[string]string) } - return item.(*batchv1.Job).Annotations + return job.Annotations } // GetDaemonSetAnnotations returns the annotations of given daemonSet func GetDaemonSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Annotations == nil { - item.(*appsv1.DaemonSet).Annotations = make(map[string]string) + daemonSet, ok := item.(*appsv1.DaemonSet) + if !ok { + return nil + } + if daemonSet.Annotations == nil { + daemonSet.Annotations = make(map[string]string) } - return item.(*appsv1.DaemonSet).Annotations + return daemonSet.Annotations } // GetStatefulSetAnnotations returns the annotations of given statefulSet func GetStatefulSetAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Annotations == nil { - item.(*appsv1.StatefulSet).Annotations = make(map[string]string) + statefulSet, ok := item.(*appsv1.StatefulSet) + if !ok { + return nil } - return item.(*appsv1.StatefulSet).Annotations + if statefulSet.Annotations == nil { + statefulSet.Annotations = make(map[string]string) + } + return statefulSet.Annotations } // GetRolloutAnnotations returns the annotations of given rollout func GetRolloutAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string) + rollout, ok := item.(*argorolloutv1alpha1.Rollout) + if !ok { + return nil + } + if rollout.Annotations == nil { + rollout.Annotations = make(map[string]string) } - return item.(*argorolloutv1alpha1.Rollout).Annotations + return rollout.Annotations } // GetDeploymentPodAnnotations returns the pod's annotations of given deployment func GetDeploymentPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.Deployment).Spec.Template.Annotations == nil { - item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string) + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil } - return item.(*appsv1.Deployment).Spec.Template.Annotations + if deployment.Spec.Template.Annotations == nil { + deployment.Spec.Template.Annotations = make(map[string]string) + } + return deployment.Spec.Template.Annotations } // GetCronJobPodAnnotations returns the pod's annotations of given cronjob func GetCronJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil { - item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) + cronJob, ok := item.(*batchv1.CronJob) + if !ok { + return nil + } + if cronJob.Spec.JobTemplate.Spec.Template.Annotations == nil { + cronJob.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) } - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations + return cronJob.Spec.JobTemplate.Spec.Template.Annotations } // GetJobPodAnnotations returns the pod's annotations of given job func GetJobPodAnnotations(item runtime.Object) map[string]string { - if item.(*batchv1.Job).Spec.Template.Annotations == nil { - item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string) + job, ok := item.(*batchv1.Job) + if !ok { + return nil + } + if job.Spec.Template.Annotations == nil { + job.Spec.Template.Annotations = make(map[string]string) } - return item.(*batchv1.Job).Spec.Template.Annotations + return job.Spec.Template.Annotations } // GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil { - item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string) + daemonSet, ok := item.(*appsv1.DaemonSet) + if !ok { + return nil } - return item.(*appsv1.DaemonSet).Spec.Template.Annotations + if daemonSet.Spec.Template.Annotations == nil { + daemonSet.Spec.Template.Annotations = make(map[string]string) + } + return daemonSet.Spec.Template.Annotations } // GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string { - if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil { - item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string) + statefulSet, ok := item.(*appsv1.StatefulSet) + if !ok { + return nil + } + if statefulSet.Spec.Template.Annotations == nil { + statefulSet.Spec.Template.Annotations = make(map[string]string) } - return item.(*appsv1.StatefulSet).Spec.Template.Annotations + return statefulSet.Spec.Template.Annotations } // GetRolloutPodAnnotations returns the pod's annotations of given rollout func GetRolloutPodAnnotations(item runtime.Object) map[string]string { - if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil { - item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string) + rollout, ok := item.(*argorolloutv1alpha1.Rollout) + if !ok { + return nil + } + if rollout.Spec.Template.Annotations == nil { + rollout.Spec.Template.Annotations = make(map[string]string) } - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations + return rollout.Spec.Template.Annotations } // GetDeploymentContainers returns the containers of given deployment func GetDeploymentContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.Containers + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return []v1.Container{} + } + return deployment.Spec.Template.Spec.Containers } // GetCronJobContainers returns the containers of given cronjob func GetCronJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers + cronJob, ok := item.(*batchv1.CronJob) + if !ok { + return []v1.Container{} + } + return cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers } // GetJobContainers returns the containers of given job func GetJobContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.Containers + job, ok := item.(*batchv1.Job) + if !ok { + return []v1.Container{} + } + return job.Spec.Template.Spec.Containers } // GetDaemonSetContainers returns the containers of given daemonSet func GetDaemonSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers + daemonSet, ok := item.(*appsv1.DaemonSet) + if !ok { + return []v1.Container{} + } + return daemonSet.Spec.Template.Spec.Containers } // GetStatefulSetContainers returns the containers of given statefulSet func GetStatefulSetContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers + statefulSet, ok := item.(*appsv1.StatefulSet) + if !ok { + return []v1.Container{} + } + return statefulSet.Spec.Template.Spec.Containers } // GetRolloutContainers returns the containers of given rollout func GetRolloutContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers + rollout, ok := item.(*argorolloutv1alpha1.Rollout) + if !ok { + return []v1.Container{} + } + return rollout.Spec.Template.Spec.Containers } // GetDeploymentInitContainers returns the containers of given deployment func GetDeploymentInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return []v1.Container{} + } + return deployment.Spec.Template.Spec.InitContainers } // GetCronJobInitContainers returns the containers of given cronjob func GetCronJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers + cronJob, ok := item.(*batchv1.CronJob) + if !ok { + return []v1.Container{} + } + return cronJob.Spec.JobTemplate.Spec.Template.Spec.InitContainers } // GetJobInitContainers returns the containers of given job func GetJobInitContainers(item runtime.Object) []v1.Container { - return item.(*batchv1.Job).Spec.Template.Spec.InitContainers + job, ok := item.(*batchv1.Job) + if !ok { + return []v1.Container{} + } + return job.Spec.Template.Spec.InitContainers } // GetDaemonSetInitContainers returns the containers of given daemonSet func GetDaemonSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers + daemonSet, ok := item.(*appsv1.DaemonSet) + if !ok { + return []v1.Container{} + } + return daemonSet.Spec.Template.Spec.InitContainers } // GetStatefulSetInitContainers returns the containers of given statefulSet func GetStatefulSetInitContainers(item runtime.Object) []v1.Container { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers + statefulSet, ok := item.(*appsv1.StatefulSet) + if !ok { + return []v1.Container{} + } + return statefulSet.Spec.Template.Spec.InitContainers } // GetRolloutInitContainers returns the containers of given rollout func GetRolloutInitContainers(item runtime.Object) []v1.Container { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers + rollout, ok := item.(*argorolloutv1alpha1.Rollout) + if !ok { + return []v1.Container{} + } + return rollout.Spec.Template.Spec.InitContainers } // GetPatchTemplates returns patch templates @@ -430,21 +527,30 @@ func GetPatchTemplates() PatchTemplates { // UpdateDeployment performs rolling upgrade on deployment func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error { - deployment := resource.(*appsv1.Deployment) + deployment, ok := resource.(*appsv1.Deployment) + if !ok { + return errors.New("resource is not a Deployment") + } _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"}) return err } // PatchDeployment performs rolling upgrade on deployment func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - deployment := resource.(*appsv1.Deployment) + deployment, ok := resource.(*appsv1.Deployment) + if !ok { + return errors.New("resource is not a Deployment") + } _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) return err } // CreateJobFromCronjob performs rolling upgrade on cronjob func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error { - cronJob := resource.(*batchv1.CronJob) + cronJob, ok := resource.(*batchv1.CronJob) + if !ok { + return errors.New("resource is not a CronJob") + } annotations := make(map[string]string) annotations["cronjob.kubernetes.io/instantiate"] = "manual" @@ -470,7 +576,10 @@ func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Objec // ReCreateJobFromjob performs rolling upgrade on job func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error { - oldJob := resource.(*batchv1.Job) + oldJob, ok := resource.(*batchv1.Job) + if !ok { + return errors.New("resource is not a Job") + } job := oldJob.DeepCopy() // Delete the old job @@ -506,33 +615,48 @@ func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, p // UpdateDaemonSet performs rolling upgrade on daemonSet func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error { - daemonSet := resource.(*appsv1.DaemonSet) + daemonSet, ok := resource.(*appsv1.DaemonSet) + if !ok { + return errors.New("resource is not a DaemonSet") + } _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) return err } func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - daemonSet := resource.(*appsv1.DaemonSet) + daemonSet, ok := resource.(*appsv1.DaemonSet) + if !ok { + return errors.New("resource is not a DaemonSet") + } _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) return err } // UpdateStatefulSet performs rolling upgrade on statefulSet func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error { - statefulSet := resource.(*appsv1.StatefulSet) + statefulSet, ok := resource.(*appsv1.StatefulSet) + if !ok { + return errors.New("resource is not a StatefulSet") + } _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"}) return err } func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - statefulSet := resource.(*appsv1.StatefulSet) + statefulSet, ok := resource.(*appsv1.StatefulSet) + if !ok { + return errors.New("resource is not a StatefulSet") + } _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"}) return err } // UpdateRollout performs rolling upgrade on rollout func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error { - rollout := resource.(*argorolloutv1alpha1.Rollout) + rollout, ok := resource.(*argorolloutv1alpha1.Rollout) + if !ok { + return errors.New("resource is not a Rollout") + } strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation] var err error switch options.ToArgoRolloutStrategy(strategy) { @@ -550,30 +674,54 @@ func PatchRollout(clients kube.Clients, namespace string, resource runtime.Objec // GetDeploymentVolumes returns the Volumes of given deployment func GetDeploymentVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return []v1.Volume{} + } + return deployment.Spec.Template.Spec.Volumes } // GetCronJobVolumes returns the Volumes of given cronjob func GetCronJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes + cronJob, ok := item.(*batchv1.CronJob) + if !ok { + return []v1.Volume{} + } + return cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes } // GetJobVolumes returns the Volumes of given job func GetJobVolumes(item runtime.Object) []v1.Volume { - return item.(*batchv1.Job).Spec.Template.Spec.Volumes + job, ok := item.(*batchv1.Job) + if !ok { + return []v1.Volume{} + } + return job.Spec.Template.Spec.Volumes } // GetDaemonSetVolumes returns the Volumes of given daemonSet func GetDaemonSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes + daemonSet, ok := item.(*appsv1.DaemonSet) + if !ok { + return []v1.Volume{} + } + return daemonSet.Spec.Template.Spec.Volumes } // GetStatefulSetVolumes returns the Volumes of given statefulSet func GetStatefulSetVolumes(item runtime.Object) []v1.Volume { - return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes + statefulSet, ok := item.(*appsv1.StatefulSet) + if !ok { + return []v1.Volume{} + } + return statefulSet.Spec.Template.Spec.Volumes } // GetRolloutVolumes returns the Volumes of given rollout func GetRolloutVolumes(item runtime.Object) []v1.Volume { - return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes + rollout, ok := item.(*argorolloutv1alpha1.Rollout) + if !ok { + return []v1.Volume{} + } + return rollout.Spec.Template.Spec.Volumes } diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index 1a51d9a59..9b7361c1a 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -6,12 +6,6 @@ import ( "time" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/kube" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -24,12 +18,18 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubectl/pkg/scheme" csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/handler" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/internal/pkg/util" + "github.com/stakater/Reloader/pkg/kube" ) // Controller for checking events type Controller struct { client kubernetes.Interface - indexer cache.Indexer queue workqueue.TypedRateLimitingInterface[any] informer cache.Controller namespace string @@ -42,14 +42,13 @@ type Controller struct { } // controllerInitialized flag determines whether controlled is being initialized -var secretControllerInitialized bool = false -var configmapControllerInitialized bool = false +var secretControllerInitialized = false +var configmapControllerInitialized = false var selectedNamespacesCache []string // NewController for initializing a Controller -func NewController( - client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, error) { - +func NewController(client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, + error) { if options.SyncAfterRestart { secretControllerInitialized = true configmapControllerInitialized = true @@ -67,17 +66,18 @@ func NewController( eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ Interface: client.CoreV1().Events(""), }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, + v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)}) queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]()) - optionsModifier := func(options *metav1.ListOptions) { + optionsModifier := func(opts *metav1.ListOptions) { if resource == "namespaces" { - options.LabelSelector = c.namespaceSelector + opts.LabelSelector = c.namespaceSelector } else if len(c.resourceSelector) > 0 { - options.LabelSelector = c.resourceSelector + opts.LabelSelector = c.resourceSelector } else { - options.FieldSelector = fields.Everything().String() + opts.FieldSelector = fields.Everything().String() } } @@ -299,7 +299,12 @@ func (c *Controller) processNextItem() bool { startTime := time.Now() // Invoke the method containing the business logic - err := resourceHandler.(handler.ResourceHandler).Handle() + rh, ok := resourceHandler.(handler.ResourceHandler) + if !ok { + logrus.Errorf("Invalid resource handler type: %T", resourceHandler) + return true + } + err := rh.Handle() duration := time.Since(startTime) diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index c7eed63fb..e16b3dffb 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -1,2794 +1,755 @@ package controller import ( - "context" - "os" + "errors" "testing" "time" - "github.com/stakater/Reloader/internal/pkg/constants" - - "github.com/stakater/Reloader/internal/pkg/metrics" - - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/handler" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" -) - -var ( - clients = kube.GetClients() - namespace = "test-reloader-" + testutil.RandSeq(5) - configmapNamePrefix = "testconfigmap-reloader" - secretNamePrefix = "testsecret-reloader" - secretProviderClassPodStatusPrefix = "testsecretproviderclasspodstatus-reloader" - data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - collectors = metrics.NewCollectors() -) - -const ( - sleepDuration = 3 * time.Second -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(namespace, clients.KubernetesClient) - - logrus.Infof("Creating controller") - for k := range kube.ResourceMap { - // Don't create controller if CSI provider is not installed - if k == "secretproviderclasspodstatuses" && !kube.IsCSIInstalled { - continue - } - if k == "namespaces" { - continue - } - c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - go c.Run(1, stop) - } - time.Sleep(sleepDuration) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(namespace, clients.KubernetesClient) - - os.Exit(retCode) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create pod annotation var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon creating the secret -func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon updating the secretclassproviderpodstatus -func TestControllerUpdatingSecretProviderClassPodStatusShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating secretproviderclasspodstatus for first time - updateErr := testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) - if updateErr != nil { - t.Errorf("Secretproviderclasspodstatus was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the secretproviderclasspodstatus -func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Updating Secret - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) - -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secretproviderclasspodstatus -func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", data) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing in secretproviderclasspodstatus") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon creating the secret -func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secretproviderclasspodstatus -func TestControllerUpdatingSecretProviderClassPodStatusShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the secretproviderclasspodstatus -func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating secretproviderclasspodstatus - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Updating secretproviderclasspodstatus - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secretclasssproviderpodstatus -func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - if !kube.IsCSIInstalled { - return - } - - // Creating secretproviderclass - secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) - _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretproviderclass %v", err) - } - - // Creating secretproviderclasspodstatus - spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) - if err != nil { - t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "test", data) - if err != nil { - t.Errorf("Error while updating secretproviderclasspodstatus %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretproviderclasspodstatusName, - SHAValue: shaData, - Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secretproviderclasspodstatus") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting secretproviderclasspodstatus - err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } + "github.com/stakater/Reloader/internal/pkg/handler" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/common" +) - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) +// mockResourceHandler implements handler.ResourceHandler and handler.TimedHandler for testing. +type mockResourceHandler struct { + handleErr error + handleCalls int + enqueueTime time.Time } -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) +func (m *mockResourceHandler) Handle() error { + m.handleCalls++ + return m.handleErr } -// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) +func (m *mockResourceHandler) GetConfig() (common.Config, string) { + return common.Config{ + ResourceName: "test-resource", + Namespace: "test-ns", + Type: "configmap", + SHAValue: "sha256:test", + }, "test-resource" } -// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) +func (m *mockResourceHandler) GetEnqueueTime() time.Time { + return m.enqueueTime } -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) +// resetGlobalState resets global variables between tests +func resetGlobalState() { + secretControllerInitialized = false + configmapControllerInitialized = false + selectedNamespacesCache = []string{} } -// Perform rolling upgrade on StatefulSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } +// newTestController creates a controller for testing without starting informers +func newTestController(ignoredNamespaces []string, namespaceSelector string) *Controller { + queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]()) + collectors := metrics.NewCollectors() - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) + return &Controller{ + queue: queue, + ignoredNamespaces: ignoredNamespaces, + namespaceSelector: namespaceSelector, + collectors: collectors, + resource: "configmaps", } +} - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) +func TestResourceInIgnoredNamespace(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + resource interface{} + expected bool + }{ + { + name: "ConfigMap in ignored namespace", + ignoredNamespaces: []string{"kube-system", "default"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + }, + expected: true, + }, + { + name: "ConfigMap not in ignored namespace", + ignoredNamespaces: []string{"kube-system", "default"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "my-namespace", + }, + }, + expected: false, + }, + { + name: "Secret in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "kube-system", + }, + }, + expected: true, + }, + { + name: "Secret not in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "my-namespace", + }, + }, + expected: false, + }, + { + name: "Empty ignored namespaces list", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "any-namespace", + }, + }, + expected: false, + }, + { + name: "Unknown resource type", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Pod{}, // Not a ConfigMap or Secret + expected: false, + }, } - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + c := newTestController(tt.ignoredNamespaces, "") + result := c.resourceInIgnoredNamespace(tt.resource) + assert.Equal(t, tt.expected, result) + }, + ) } +} - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") +func TestResourceInSelectedNamespaces(t *testing.T) { + tests := []struct { + name string + namespaceSelector string + cachedNamespaces []string + resource interface{} + expected bool + }{ + { + name: "No namespace selector - all namespaces allowed", + namespaceSelector: "", + cachedNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "any-namespace", + }, + }, + expected: true, + }, + { + name: "ConfigMap in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns", "staging-ns"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "prod-ns", + }, + }, + expected: true, + }, + { + name: "ConfigMap not in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", + }, + }, + expected: false, + }, + { + name: "Secret in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "prod-ns", + }, + }, + expected: true, + }, + { + name: "Secret not in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "dev-ns", + }, + }, + expected: false, + }, + { + name: "Unknown resource type with selector", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Pod{}, + expected: false, + }, } - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetGlobalState() + selectedNamespacesCache = tt.cachedNamespaces - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) + c := newTestController([]string{}, tt.namespaceSelector) + result := c.resourceInSelectedNamespaces(tt.resource) + assert.Equal(t, tt.expected, result) + }, + ) } - time.Sleep(sleepDuration) } -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy +func TestAddSelectedNamespaceToCache(t *testing.T) { + resetGlobalState() - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } + c := newTestController([]string{}, "env=prod") - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) + // Add first namespace + ns1 := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "namespace-1"}, } + c.addSelectedNamespaceToCache(ns1) + assert.Contains(t, selectedNamespacesCache, "namespace-1") + assert.Len(t, selectedNamespacesCache, 1) - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) + // Add second namespace + ns2 := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "namespace-2"}, } + c.addSelectedNamespaceToCache(ns2) + assert.Contains(t, selectedNamespacesCache, "namespace-1") + assert.Contains(t, selectedNamespacesCache, "namespace-2") + assert.Len(t, selectedNamespacesCache, 2) +} - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) +func TestRemoveSelectedNamespaceFromCache(t *testing.T) { + tests := []struct { + name string + initialCache []string + namespaceToRemove string + expectedCache []string + }{ + { + name: "Remove existing namespace", + initialCache: []string{"ns-1", "ns-2", "ns-3"}, + namespaceToRemove: "ns-2", + expectedCache: []string{"ns-1", "ns-3"}, + }, + { + name: "Remove non-existing namespace", + initialCache: []string{"ns-1", "ns-2"}, + namespaceToRemove: "ns-3", + expectedCache: []string{"ns-1", "ns-2"}, + }, + { + name: "Remove from empty cache", + initialCache: []string{}, + namespaceToRemove: "ns-1", + expectedCache: []string{}, + }, + { + name: "Remove only namespace", + initialCache: []string{"ns-1"}, + namespaceToRemove: "ns-1", + expectedCache: []string{}, + }, } - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetGlobalState() + selectedNamespacesCache = tt.initialCache - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } + c := newTestController([]string{}, "env=prod") + ns := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: tt.namespaceToRemove}, + } + c.removeSelectedNamespaceFromCache(ns) - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) + assert.Equal(t, tt.expectedCache, selectedNamespacesCache) + }, + ) } - time.Sleep(sleepDuration) } -func TestController_resourceInIgnoredNamespace(t *testing.T) { - type fields struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - ignoredNamespaces util.List - } - type args struct { - raw interface{} - } +func TestAddHandler(t *testing.T) { tests := []struct { - name string - fields fields - args args - want bool + name string + reloadOnCreate string + ignoredNamespaces []string + resource interface{} + controllersInit bool + expectQueueItem bool }{ { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", - }, - }, - args: args{ - raw: testutil.GetConfigmap("system", "testcm", "test"), + name: "Namespace resource - should not queue", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, }, - want: true, + controllersInit: true, + expectQueueItem: false, }, { - name: "TestSecretResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "ReloadOnCreate disabled", + reloadOnCreate: "false", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", }, }, - args: args{ - raw: testutil.GetSecret("system", "testsecret", "test"), - }, - want: true, + controllersInit: true, + expectQueueItem: false, }, { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "ConfigMap in ignored namespace", + reloadOnCreate: "true", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", }, }, - args: args{ - raw: testutil.GetConfigmap("some-other-namespace", "testcm", "test"), - }, - want: false, + controllersInit: true, + expectQueueItem: false, }, { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "Controllers not initialized", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", }, }, - args: args{ - raw: testutil.GetSecret("some-other-namespace", "testsecret", "test"), + controllersInit: false, + expectQueueItem: false, + }, + { + name: "Valid ConfigMap - should queue", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, }, - want: false, + controllersInit: true, + expectQueueItem: true, }, } + for _, tt := range tests { t.Run( tt.name, func(t *testing.T) { - c := &Controller{ - client: tt.fields.client, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace, - ignoredNamespaces: tt.fields.ignoredNamespaces, - } - if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want) + resetGlobalState() + options.ReloadOnCreate = tt.reloadOnCreate + secretControllerInitialized = tt.controllersInit + configmapControllerInitialized = tt.controllersInit + + c := newTestController(tt.ignoredNamespaces, "") + c.Add(tt.resource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") } }, ) } } -func TestController_resourceInNamespaceSelector(t *testing.T) { - type fields struct { - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace v1.Namespace - namespaceSelector string - } - type args struct { - raw interface{} - } +func TestUpdateHandler(t *testing.T) { tests := []struct { - name string - fields fields - args args - want bool + name string + ignoredNamespaces []string + namespaceSelector string + cachedNamespaces []string + oldResource interface{} + newResource interface{} + expectQueueItem bool }{ { - name: "TestConfigMapResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, - }, + name: "Namespace resource - should not queue", + ignoredNamespaces: []string{}, + oldResource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, }, - args: args{ - raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"), + newResource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, }, - want: true, - }, { - name: "TestConfigMapResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, + expectQueueItem: false, + }, + { + name: "ConfigMap in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", }, }, - args: args{ - raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"), + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, }, - want: false, + expectQueueItem: false, }, { - name: "TestSecretResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, + name: "ConfigMap not in selected namespace", + ignoredNamespaces: []string{}, + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "testsecret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", }, }, - args: args{ - raw: testutil.GetSecret("not-selected-namespace", "secret", "test"), + expectQueueItem: false, + }, + { + name: "Valid ConfigMap update - should queue", + ignoredNamespaces: []string{}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "old-value"}, }, - want: false, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyExists", - fields: fields{ - namespaceSelector: "select", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", }, + Data: map[string]string{"key": "new-value"}, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), + expectQueueItem: true, + }, + { + name: "Valid Secret update - should queue", + ignoredNamespaces: []string{}, + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorValueIn", - fields: fields{ - namespaceSelector: "select in (select1, select2, select3)", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "select2", - }, - }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), + expectQueueItem: true, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetGlobalState() + if tt.cachedNamespaces != nil { + selectedNamespacesCache = tt.cachedNamespaces + } + + c := newTestController(tt.ignoredNamespaces, tt.namespaceSelector) + c.Update(tt.oldResource, tt.newResource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + // Verify the queued item is the correct type + item, _ := c.queue.Get() + _, ok := item.(handler.ResourceUpdatedHandler) + assert.True(t, ok, "Expected ResourceUpdatedHandler in queue") + c.queue.Done(item) + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") + } + }, + ) + } +} + +func TestDeleteHandler(t *testing.T) { + tests := []struct { + name string + reloadOnDelete string + ignoredNamespaces []string + resource interface{} + controllersInit bool + expectQueueItem bool + }{ + { + name: "ReloadOnDelete disabled", + reloadOnDelete: "false", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist", - fields: fields{ - namespaceSelector: "!select2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "ConfigMap in ignored namespace", + reloadOnDelete: "true", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), + controllersInit: true, + expectQueueItem: false, + }, + { + name: "Controllers not initialized", + reloadOnDelete: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorMultipleConditions", - fields: fields{ - namespaceSelector: "select,select2=this2,select3!=this4", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - "select3": "this3", - }, - }, + controllersInit: false, + expectQueueItem: false, + }, + { + name: "Valid ConfigMap delete - should queue", + reloadOnDelete: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), + controllersInit: true, + expectQueueItem: true, + }, + { + name: "Namespace delete - updates cache", + reloadOnDelete: "false", // Disable to test cache update only + ignoredNamespaces: []string{}, + resource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, }, - want: true, + controllersInit: true, + expectQueueItem: false, }, } for _, tt := range tests { t.Run( tt.name, func(t *testing.T) { - fakeClient := fake.NewClientset() - namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) - logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) - - c := &Controller{ - client: fakeClient, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace.Name, - namespaceSelector: tt.fields.namespaceSelector, + resetGlobalState() + options.ReloadOnDelete = tt.reloadOnDelete + secretControllerInitialized = tt.controllersInit + configmapControllerInitialized = tt.controllersInit + + c := newTestController(tt.ignoredNamespaces, "") + c.Delete(tt.resource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + // Verify the queued item is the correct type + item, _ := c.queue.Get() + _, ok := item.(handler.ResourceDeleteHandler) + assert.True(t, ok, "Expected ResourceDeleteHandler in queue") + c.queue.Done(item) + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") } + }, + ) + } +} - listOptions := metav1.ListOptions{} - listOptions.LabelSelector = tt.fields.namespaceSelector - namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions) +func TestHandleErr(t *testing.T) { + t.Run( + "No error - should forget key", func(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") - for _, ns := range namespaces.Items { - c.addSelectedNamespaceToCache(ns) - } + key := "test-key" + // Add key to queue first + c.queue.Add(key) + item, _ := c.queue.Get() - if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want) - } + // Handle with no error + c.handleErr(nil, item) + c.queue.Done(item) - for _, ns := range namespaces.Items { - c.removeSelectedNamespaceFromCache(ns) - } + // Key should be forgotten (NumRequeues should be 0) + assert.Equal(t, 0, c.queue.NumRequeues(key)) + }, + ) + + t.Run( + "Error at max retries - should drop key", func(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") + + key := "test-key-max" + + // Simulate 5 previous failures (max retries) + for range 5 { + c.queue.AddRateLimited(key) + } + + // After max retries, handleErr should forget the key + c.handleErr(assert.AnError, key) + + // Key should be forgotten + assert.Equal(t, 0, c.queue.NumRequeues(key)) + }, + ) +} + +func TestAddHandlerWithNamespaceEvent(t *testing.T) { + resetGlobalState() + + c := newTestController([]string{}, "env=prod") + + // When a namespace is added, it should be cached + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "new-namespace"}, + } + + c.Add(ns) + + assert.Contains(t, selectedNamespacesCache, "new-namespace") + assert.Equal(t, 0, c.queue.Len(), "Namespace add should not queue anything") +} + +func TestDeleteHandlerWithNamespaceEvent(t *testing.T) { + resetGlobalState() + selectedNamespacesCache = []string{"ns-1", "ns-to-delete", "ns-2"} + + c := newTestController([]string{}, "env=prod") + options.ReloadOnDelete = "true" + secretControllerInitialized = true + configmapControllerInitialized = true + + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "ns-to-delete"}, + } + + c.Delete(ns) + + assert.NotContains(t, selectedNamespacesCache, "ns-to-delete") + assert.Contains(t, selectedNamespacesCache, "ns-1") + assert.Contains(t, selectedNamespacesCache, "ns-2") + assert.Equal(t, 0, c.queue.Len(), "Namespace delete should not queue anything") +} + +func TestProcessNextItem(t *testing.T) { + tests := []struct { + name string + handler *mockResourceHandler + expectContinue bool + expectCalls int + }{ + { + name: "Successful handler execution", + handler: &mockResourceHandler{ + handleErr: nil, + enqueueTime: time.Now().Add(-10 * time.Millisecond), + }, + expectContinue: true, + expectCalls: 1, + }, + { + name: "Handler returns error", + handler: &mockResourceHandler{ + handleErr: errors.New("test error"), + enqueueTime: time.Now().Add(-10 * time.Millisecond), + }, + expectContinue: true, + expectCalls: 1, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") + + c.queue.Add(tt.handler) + + result := c.processNextItem() + + assert.Equal(t, tt.expectContinue, result) + assert.Equal(t, tt.expectCalls, tt.handler.handleCalls) }, ) } } + +func TestProcessNextItemQueueShutdown(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") + + c.queue.ShutDown() + + result := c.processNextItem() + assert.False(t, result, "Should return false when queue is shutdown") +} diff --git a/internal/pkg/handler/create.go b/internal/pkg/handler/create.go index d67661007..2ab290031 100644 --- a/internal/pkg/handler/create.go +++ b/internal/pkg/handler/create.go @@ -4,11 +4,12 @@ import ( "time" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" ) // ResourceCreatedHandler contains new objects @@ -59,10 +60,10 @@ func (r ResourceCreatedHandler) Handle() error { func (r ResourceCreatedHandler) GetConfig() (common.Config, string) { var oldSHAData string var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) + if cm, ok := r.Resource.(*v1.ConfigMap); ok { + config = common.GetConfigmapConfig(cm) + } else if secret, ok := r.Resource.(*v1.Secret); ok { + config = common.GetSecretConfig(secret) } else { logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) } diff --git a/internal/pkg/handler/create_test.go b/internal/pkg/handler/create_test.go new file mode 100644 index 000000000..ef21f06b5 --- /dev/null +++ b/internal/pkg/handler/create_test.go @@ -0,0 +1,353 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" +) + +func TestResourceCreatedHandler_GetConfig(t *testing.T) { + tests := []struct { + name string + resource interface{} + expectedName string + expectedNS string + expectedType string + expectSHANotEmpty bool + expectOldSHAEmpty bool + }{ + { + name: "ConfigMap with data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-configmap", + Namespace: "test-ns", + }, + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + expectedName: "my-configmap", + expectedNS: "test-ns", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with empty data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-configmap", + Namespace: "default", + }, + Data: map[string]string{}, + }, + expectedName: "empty-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with binary data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binary-configmap", + Namespace: "default", + }, + BinaryData: map[string][]byte{ + "binary-key": []byte("binary-value"), + }, + }, + expectedName: "binary-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with annotations", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "annotated-configmap", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/match": "true", + }, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "annotated-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with data", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "secret-ns", + }, + Data: map[string][]byte{ + "password": []byte("secret-password"), + }, + }, + expectedName: "my-secret", + expectedNS: "secret-ns", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with empty data", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-secret", + Namespace: "default", + }, + Data: map[string][]byte{}, + }, + expectedName: "empty-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with StringData", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stringdata-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "username": "admin", + }, + }, + expectedName: "stringdata-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with labels", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "labeled-secret", + Namespace: "default", + Labels: map[string]string{ + "app": "test", + }, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectedName: "labeled-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - string", + resource: "invalid-string", + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - int", + resource: 123, + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - struct", + resource: struct{ Name string }{Name: "test"}, + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: tt.resource, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, tt.expectedName, config.ResourceName) + assert.Equal(t, tt.expectedNS, config.Namespace) + assert.Equal(t, tt.expectedType, config.Type) + + if tt.expectSHANotEmpty { + assert.NotEmpty(t, config.SHAValue, "SHA should not be empty") + } + + if tt.expectOldSHAEmpty { + assert.Empty(t, oldSHA, "oldSHA should always be empty for create handler") + } + }) + } +} + +func TestResourceCreatedHandler_GetConfig_Annotations(t *testing.T) { + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "annotated-cm", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/match": "true", + "reloader.stakater.com/search": "true", + }, + }, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceCreatedHandler{ + Resource: cm, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.NotNil(t, config.ResourceAnnotations) + assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/match"]) + assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/search"]) +} + +func TestResourceCreatedHandler_GetConfig_Labels(t *testing.T) { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "labeled-secret", + Namespace: "default", + Labels: map[string]string{ + "app": "myapp", + "version": "v1", + }, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + handler := ResourceCreatedHandler{ + Resource: secret, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.NotNil(t, config.Labels) + assert.Equal(t, "myapp", config.Labels["app"]) + assert.Equal(t, "v1", config.Labels["version"]) +} + +func TestResourceCreatedHandler_Handle(t *testing.T) { + tests := []struct { + name string + resource interface{} + expectError bool + }{ + { + name: "Nil resource", + resource: nil, + expectError: false, + }, + { + name: "Valid ConfigMap - no workloads to update", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + }, + expectError: false, + }, + { + name: "Valid Secret - no workloads to update", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: tt.resource, + Collectors: metrics.NewCollectors(), + } + + err := handler.Handle() + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResourceCreatedHandler_SHAConsistency(t *testing.T) { + data := map[string]string{"key": "value"} + + cm1 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "default"}, + Data: data, + } + cm2 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "default"}, + Data: data, + } + + handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()} + handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()} + + config1, _ := handler1.GetConfig() + config2, _ := handler2.GetConfig() + + assert.Equal(t, config1.SHAValue, config2.SHAValue) +} + +func TestResourceCreatedHandler_SHADifference(t *testing.T) { + cm1 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + cm2 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value2"}, + } + + handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()} + handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()} + + config1, _ := handler1.GetConfig() + config2, _ := handler2.GetConfig() + + assert.NotEqual(t, config1.SHAValue, config2.SHAValue) +} diff --git a/internal/pkg/handler/delete.go b/internal/pkg/handler/delete.go index 34e032b7f..845bc876e 100644 --- a/internal/pkg/handler/delete.go +++ b/internal/pkg/handler/delete.go @@ -6,6 +6,7 @@ import ( "time" "github.com/sirupsen/logrus" + "github.com/stakater/Reloader/internal/pkg/callbacks" "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/metrics" @@ -67,10 +68,10 @@ func (r ResourceDeleteHandler) Handle() error { func (r ResourceDeleteHandler) GetConfig() (common.Config, string) { var oldSHAData string var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) + if cm, ok := r.Resource.(*v1.ConfigMap); ok { + config = common.GetConfigmapConfig(cm) + } else if secret, ok := r.Resource.(*v1.Secret); ok { + config = common.GetSecretConfig(secret) } else { logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) } @@ -98,7 +99,7 @@ func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item run return InvokeStrategyResult{constants.NoContainerFound, nil} } - //remove if env var exists + // remove if env var exists if len(container.Env) > 0 { index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool { return envVariable.Name == envVar diff --git a/internal/pkg/handler/delete_test.go b/internal/pkg/handler/delete_test.go new file mode 100644 index 000000000..812b0d18a --- /dev/null +++ b/internal/pkg/handler/delete_test.go @@ -0,0 +1,353 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/stakater/Reloader/internal/pkg/callbacks" + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/common" +) + +// mockDeploymentForDelete creates a deployment with containers for testing delete strategies +func mockDeploymentForDelete(name, namespace string, containers []v1.Container, volumes []v1.Volume) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: v1.PodSpec{ + Containers: containers, + Volumes: volumes, + }, + }, + }, + } +} + +// Mock funcs for testing +func mockContainersFunc(item runtime.Object) []v1.Container { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.Containers +} + +func mockInitContainersFunc(item runtime.Object) []v1.Container { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.InitContainers +} + +func mockVolumesFunc(item runtime.Object) []v1.Volume { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.Volumes +} + +func mockPodAnnotationsFunc(item runtime.Object) map[string]string { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Annotations +} + +func mockPatchTemplatesFunc() callbacks.PatchTemplates { + return callbacks.PatchTemplates{ + AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, + EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, + DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, + } +} + +func TestRemoveContainerEnvVars(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumes []v1.Volume + config common.Config + autoReload bool + expected constants.Result + envVarRemoved bool + }{ + { + name: "Remove existing env var - configmap envFrom", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.Updated, + envVarRemoved: true, + }, + { + name: "No env var to remove", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{}, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.NotUpdated, + envVarRemoved: false, + }, + { + name: "Remove existing env var - secret envFrom", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_SECRET_SECRET", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-secret", + Type: constants.SecretEnvVarPostfix, + }, + autoReload: true, + expected: constants.Updated, + envVarRemoved: true, + }, + { + name: "No container found", + containers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.NoContainerFound, + envVarRemoved: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: true, + } + + result := removeContainerEnvVars(funcs, deployment, tt.config, tt.autoReload) + + assert.Equal(t, tt.expected, result.Result) + + if tt.envVarRemoved { + containers := deployment.Spec.Template.Spec.Containers + for _, c := range containers { + for _, env := range c.Env { + envVarName := getEnvVarName(tt.config.ResourceName, tt.config.Type) + assert.NotEqual(t, envVarName, env.Name, "Env var should have been removed") + } + } + } + }) + } +} + +func TestInvokeDeleteStrategy(t *testing.T) { + originalStrategy := options.ReloadStrategy + defer func() { + options.ReloadStrategy = originalStrategy + }() + + tests := []struct { + name string + reloadStrategy string + containers []v1.Container + volumes []v1.Volume + config common.Config + }{ + { + name: "Annotations strategy", + reloadStrategy: constants.AnnotationsReloadStrategy, + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + SHAValue: "sha-value", + }, + }, + { + name: "EnvVars strategy", + reloadStrategy: constants.EnvVarsReloadStrategy, + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + options.ReloadStrategy = tt.reloadStrategy + + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: true, + } + + result := invokeDeleteStrategy(funcs, deployment, tt.config, true) + + assert.NotNil(t, result) + }) + } +} + +func TestRemovePodAnnotations(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumes []v1.Volume + config common.Config + }{ + { + name: "Remove pod annotations - configmap", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + SHAValue: "sha-value", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: false, + } + + result := removePodAnnotations(funcs, deployment, tt.config, true) + + assert.Equal(t, constants.Updated, result.Result) + }) + } +} diff --git a/internal/pkg/handler/handlers_test.go b/internal/pkg/handler/handlers_test.go new file mode 100644 index 000000000..dedefcc90 --- /dev/null +++ b/internal/pkg/handler/handlers_test.go @@ -0,0 +1,281 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" +) + +// Helper function to create a test ConfigMap +func createTestConfigMap(data map[string]string) *v1.ConfigMap { + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: data, + } +} + +// Helper function to create a test Secret +func createTestSecret(data map[string][]byte) *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: data, + } +} + +// Helper function to create test metrics collectors +func createTestCollectors() metrics.Collectors { + return metrics.NewCollectors() +} + +// ============================================================ +// ResourceCreatedHandler Tests +// ============================================================ + +func TestResourceCreatedHandler_GetConfig_ConfigMap(t *testing.T) { + cm := createTestConfigMap(map[string]string{"key": "value"}) + handler := ResourceCreatedHandler{ + Resource: cm, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceCreatedHandler_GetConfig_Secret(t *testing.T) { + secret := createTestSecret(map[string][]byte{"key": []byte("value")}) + handler := ResourceCreatedHandler{ + Resource: secret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceCreatedHandler_GetConfig_InvalidResource(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Empty(t, config.ResourceName) +} + +func TestResourceCreatedHandler_Handle_NilResource(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +// ============================================================ +// ResourceDeleteHandler Tests +// ============================================================ + +func TestResourceDeleteHandler_GetConfig_ConfigMap(t *testing.T) { + cm := createTestConfigMap(map[string]string{"key": "value"}) + handler := ResourceDeleteHandler{ + Resource: cm, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceDeleteHandler_GetConfig_Secret(t *testing.T) { + secret := createTestSecret(map[string][]byte{"key": []byte("value")}) + handler := ResourceDeleteHandler{ + Resource: secret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceDeleteHandler_GetConfig_InvalidResource(t *testing.T) { + handler := ResourceDeleteHandler{ + Resource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Empty(t, config.ResourceName) +} + +func TestResourceDeleteHandler_Handle_NilResource(t *testing.T) { + handler := ResourceDeleteHandler{ + Resource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +// ============================================================ +// ResourceUpdatedHandler Tests +// ============================================================ + +func TestResourceUpdatedHandler_GetConfig_ConfigMap(t *testing.T) { + oldCM := createTestConfigMap(map[string]string{"key": "old-value"}) + newCM := createTestConfigMap(map[string]string{"key": "new-value"}) + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.NotEmpty(t, oldSHA) + assert.NotEqual(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_ConfigMap_SameData(t *testing.T) { + oldCM := createTestConfigMap(map[string]string{"key": "same-value"}) + newCM := createTestConfigMap(map[string]string{"key": "same-value"}) + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_Secret(t *testing.T) { + oldSecret := createTestSecret(map[string][]byte{"key": []byte("old-value")}) + newSecret := createTestSecret(map[string][]byte{"key": []byte("new-value")}) + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.NotEmpty(t, oldSHA) + assert.NotEqual(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_Secret_SameData(t *testing.T) { + oldSecret := createTestSecret(map[string][]byte{"key": []byte("same-value")}) + newSecret := createTestSecret(map[string][]byte{"key": []byte("same-value")}) + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_InvalidResource(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: "invalid", + OldResource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Empty(t, config.ResourceName) +} + +func TestResourceUpdatedHandler_Handle_NilResource(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: nil, + OldResource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +func TestResourceUpdatedHandler_Handle_NilOldResource(t *testing.T) { + cm := createTestConfigMap(map[string]string{"key": "value"}) + handler := ResourceUpdatedHandler{ + Resource: cm, + OldResource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +func TestResourceUpdatedHandler_Handle_NoChange(t *testing.T) { + cm := createTestConfigMap(map[string]string{"key": "same-value"}) + handler := ResourceUpdatedHandler{ + Resource: cm, + OldResource: cm, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} diff --git a/internal/pkg/handler/pause_deployment.go b/internal/pkg/handler/pause_deployment.go index 28d1b9efd..d255b1cc3 100644 --- a/internal/pkg/handler/pause_deployment.go +++ b/internal/pkg/handler/pause_deployment.go @@ -7,11 +7,12 @@ import ( "time" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" app "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" patchtypes "k8s.io/apimachinery/pkg/types" + + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/kube" ) // Keeps track of currently active timers diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go index 19e7ac661..1f95b11ee 100644 --- a/internal/pkg/handler/pause_deployment_test.go +++ b/internal/pkg/handler/pause_deployment_test.go @@ -6,14 +6,15 @@ import ( "testing" "time" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/pkg/kube" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" testclient "k8s.io/client-go/kubernetes/fake" + + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/kube" ) func TestIsPaused(t *testing.T) { @@ -377,7 +378,7 @@ func FindDeploymentByName(deployments []runtime.Object, deploymentName string) ( for _, deployment := range deployments { accessor, err := meta.Accessor(deployment) if err != nil { - return nil, fmt.Errorf("error getting accessor for item: %v", err) + return nil, fmt.Errorf("error getting accessor for item: %w", err) } if accessor.GetName() == deploymentName { deploymentObj, ok := deployment.(*appsv1.Deployment) diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go index 3fde98e3a..7a1ad7d99 100644 --- a/internal/pkg/handler/update.go +++ b/internal/pkg/handler/update.go @@ -4,13 +4,14 @@ import ( "time" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" "github.com/stakater/Reloader/internal/pkg/util" "github.com/stakater/Reloader/pkg/common" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" - csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // ResourceUpdatedHandler contains updated objects diff --git a/internal/pkg/handler/update_test.go b/internal/pkg/handler/update_test.go new file mode 100644 index 000000000..1ae10d413 --- /dev/null +++ b/internal/pkg/handler/update_test.go @@ -0,0 +1,520 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" +) + +func TestResourceUpdatedHandler_GetConfig(t *testing.T) { + tests := []struct { + name string + oldResource any + newResource any + expectedName string + expectedNS string + expectedType string + expectSHANotEmpty bool + expectSHAChanged bool + }{ + { + name: "ConfigMap data changed", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "old-value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "new-value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap data unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "same-value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "same-value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "ConfigMap key added", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1", "key2": "value2"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap key removed", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1", "key2": "value2"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap only labels changed - SHA unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Labels: map[string]string{"version": "v1"}, + }, + Data: map[string]string{"key": "value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Labels: map[string]string{"version": "v2"}, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "ConfigMap only annotations changed - SHA unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Annotations: map[string]string{"note": "old"}, + }, + Data: map[string]string{"key": "value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Annotations: map[string]string{"note": "new"}, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "Secret data changed", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("old-pass")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("new-pass")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "Secret data unchanged", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("same-pass")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("same-pass")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "Secret key added", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"key1": []byte("value1")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "Secret only labels changed - SHA unchanged", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + Labels: map[string]string{"env": "dev"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + Labels: map[string]string{"env": "prod"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "Invalid resource type", + oldResource: "invalid", + newResource: "invalid", + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectSHAChanged: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: tt.newResource, + OldResource: tt.oldResource, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, tt.expectedName, config.ResourceName) + assert.Equal(t, tt.expectedNS, config.Namespace) + assert.Equal(t, tt.expectedType, config.Type) + + if tt.expectSHANotEmpty { + assert.NotEmpty(t, config.SHAValue, "new SHA should not be empty") + assert.NotEmpty(t, oldSHA, "old SHA should not be empty") + } + + if tt.expectSHAChanged { + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should have changed") + } else if tt.expectSHANotEmpty { + assert.Equal(t, config.SHAValue, oldSHA, "SHA should not have changed") + } + }) + } +} + +func TestResourceUpdatedHandler_Handle(t *testing.T) { + tests := []struct { + name string + oldResource any + newResource any + expectError bool + }{ + { + name: "Both resources nil", + oldResource: nil, + newResource: nil, + expectError: false, + }, + { + name: "Old resource nil", + oldResource: nil, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + }, + expectError: false, + }, + { + name: "New resource nil", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + }, + newResource: nil, + expectError: false, + }, + { + name: "ConfigMap unchanged - no action", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "same"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "same"}, + }, + expectError: false, + }, + { + name: "ConfigMap changed - triggers update", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "old"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "new"}, + }, + expectError: false, + }, + { + name: "Secret unchanged - no action", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("same")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("same")}, + }, + expectError: false, + }, + { + name: "Secret changed - triggers update", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("old")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("new")}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: tt.newResource, + OldResource: tt.oldResource, + Collectors: metrics.NewCollectors(), + } + + err := handler.Handle() + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResourceUpdatedHandler_GetConfig_Annotations(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "default", + Annotations: map[string]string{ + "old-annotation": "old-value", + }, + }, + Data: map[string]string{"key": "value"}, + } + + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "default", + Annotations: map[string]string{ + "new-annotation": "new-value", + }, + }, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Equal(t, "new-value", config.ResourceAnnotations["new-annotation"]) + _, hasOld := config.ResourceAnnotations["old-annotation"] + assert.False(t, hasOld) +} + +func TestResourceUpdatedHandler_GetConfig_Labels(t *testing.T) { + oldSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + Labels: map[string]string{"version": "v1"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + newSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + Labels: map[string]string{"version": "v2"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Equal(t, "v2", config.Labels["version"]) +} + +func TestResourceUpdatedHandler_EmptyToNonEmpty(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is added") +} + +func TestResourceUpdatedHandler_NonEmptyToEmpty(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is removed") +} + +func TestResourceUpdatedHandler_BinaryDataChange(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + BinaryData: map[string][]byte{"binary": []byte("old-binary")}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + BinaryData: map[string][]byte{"binary": []byte("new-binary")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes") +} + +func TestResourceUpdatedHandler_MixedDataAndBinaryData(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"text": "value"}, + BinaryData: map[string][]byte{"binary": []byte("binary-value")}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"text": "value"}, + BinaryData: map[string][]byte{"binary": []byte("new-binary-value")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes") +} + +func TestResourceUpdatedHandler_DifferentNamespaces(t *testing.T) { + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns1"}, + Data: map[string]string{"key": "value"}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns2"}, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Equal(t, "ns2", config.Namespace) +} diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index 982dbfad5..a48704030 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -14,14 +14,6 @@ import ( "github.com/parnurzeal/gorequest" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" - alert "github.com/stakater/Reloader/internal/pkg/alerts" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" app "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -32,6 +24,15 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + + alert "github.com/stakater/Reloader/internal/pkg/alerts" + "github.com/stakater/Reloader/internal/pkg/callbacks" + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/internal/pkg/util" + "github.com/stakater/Reloader/pkg/common" + "github.com/stakater/Reloader/pkg/kube" ) // GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment @@ -617,7 +618,7 @@ func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item run return InvokeStrategyResult{constants.NotUpdated, nil} } - //update if env var exists + // update if env var exists updateResult := updateEnvVar(container, envVar, config.SHAValue) // if no existing env var exists lets create one @@ -680,10 +681,10 @@ func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *co } func jsonEscape(toEscape string) (string, error) { - bytes, err := json.Marshal(toEscape) + data, err := json.Marshal(toEscape) if err != nil { return "", err } - escaped := string(bytes) + escaped := string(data) return escaped[1 : len(escaped)-1], nil } diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 68ba94dec..82701329e 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -1,5153 +1,1376 @@ package handler import ( - "context" - "fmt" - "os" + "errors" "testing" - "time" - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/prometheus/client_golang/prometheus" - promtestutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - testclient "k8s.io/client-go/kubernetes/fake" - csitestclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/fake" -) - -var ( - clients = kube.Clients{ - KubernetesClient: testclient.NewClientset(), - CSIClient: csitestclient.NewSimpleClientset(), - } - - arsNamespace = "test-handler-" + testutil.RandSeq(5) - arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) - arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/retry" - // Secret provider class - arsSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5) - arsSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5) - arsSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5) - arsSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5) - - ersNamespace = "test-handler-" + testutil.RandSeq(5) - ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) - - // SecretProviderClass - ersSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5) - ersSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5) - - ersSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5) - ersSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5) + "github.com/stakater/Reloader/internal/pkg/callbacks" + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/common" ) -func TestMain(m *testing.M) { - - // Creating namespaces - testutil.CreateNamespace(arsNamespace, clients.KubernetesClient) - testutil.CreateNamespace(ersNamespace, clients.KubernetesClient) - - logrus.Infof("Setting up the annotation reload strategy test resources") - setupArs() - logrus.Infof("Setting up the env-var reload strategy test resources") - setupErs() - - logrus.Infof("Running Testcases") - retCode := m.Run() - - logrus.Infof("tearing down the annotation reload strategy test resources") - teardownArs() - logrus.Infof("tearing down the env-var reload strategy test resources") - teardownErs() - - os.Exit(retCode) -} - -func setupArs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassName, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithInitContainer, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass used with secretproviderclass auto annotation - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass used with secret auto annotation - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating secretproviderclass to reload with same config - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating secretproviderclass to reload with different config - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - // Patch with ignore annotation - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace) - _, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedConfigMapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secretproviderclass mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretProviderClassWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secretproviderclass - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsConfigmapWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsSecretWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated, - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with configmap and without annotations - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(clients.KubernetesClient, arsConfigMapWithNonAnnotatedDeployment, arsNamespace, map[string]string{}) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and without annotation creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretWithSecretAutoAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secretproviderclass and with secretproviderclass auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassWithSPCAutoAnnotation, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsConfigmapWithConfigMapAutoAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and exclude secret annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secretproviderclass and exclude secretproviderclass annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretProviderClassWithExcludeSPCAnnotation, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass exclude annotation: %v", err) - } - - // Creating Deployment with secretproviderclass to reload with same config - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassReloadedWithSameConfig, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass to reload with same config: %v", err) - } - - // Creating Deployment with secretproviderclass to reload with different config - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassReloadedWithDifferentConfig, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass to reload with different config: %v", err) - } - - // Creating Deployment with secret and exclude configmap annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsConfigmapWithExcludeConfigMapAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with secretproviderclass - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secretproviderclass creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with secretproviderclass - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secretproviderclass creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithPodAnnotations, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithBothAnnotations, arsNamespace, true) - - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) +func TestGetRollingUpgradeFuncs(t *testing.T) { + tests := []struct { + name string + getFuncs func() callbacks.RollingUpgradeFuncs + resourceType string + supportsPatch bool + }{ + { + name: "Deployment", + getFuncs: GetDeploymentRollingUpgradeFuncs, + resourceType: "Deployment", + supportsPatch: true, + }, + { + name: "CronJob", + getFuncs: GetCronJobCreateJobFuncs, + resourceType: "CronJob", + supportsPatch: false, + }, + { + name: "Job", + getFuncs: GetJobCreateJobFuncs, + resourceType: "Job", + supportsPatch: false, + }, + { + name: "DaemonSet", + getFuncs: GetDaemonSetRollingUpgradeFuncs, + resourceType: "DaemonSet", + supportsPatch: true, + }, + { + name: "StatefulSet", + getFuncs: GetStatefulSetRollingUpgradeFuncs, + resourceType: "StatefulSet", + supportsPatch: true, + }, + { + name: "ArgoRollout", + getFuncs: GetArgoRolloutRollingUpgradeFuncs, + resourceType: "Rollout", + supportsPatch: false, + }, } - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + funcs := tt.getFuncs() + assert.Equal(t, tt.resourceType, funcs.ResourceType) + assert.Equal(t, tt.supportsPatch, funcs.SupportsPatch) + assert.NotNil(t, funcs.ItemFunc) + assert.NotNil(t, funcs.ItemsFunc) + assert.NotNil(t, funcs.AnnotationsFunc) + assert.NotNil(t, funcs.PodAnnotationsFunc) + assert.NotNil(t, funcs.ContainersFunc) + assert.NotNil(t, funcs.InitContainersFunc) + assert.NotNil(t, funcs.UpdateFunc) + assert.NotNil(t, funcs.PatchFunc) + assert.NotNil(t, funcs.PatchTemplatesFunc) + assert.NotNil(t, funcs.VolumesFunc) + }) } } -func teardownArs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass and secretproviderclass auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and exclude secret annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass and exclude secretproviderclass annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass to reload with same config - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with same config %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass to reload with different config - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with different config %v", deploymentError) - } - - // Deleting Deployment with configmap and exclude configmap annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting DeamonSet with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DeamonSet with secretproviderclass - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secretproviderclass %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting StatefulSet with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with secretproviderclass - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secretproviderclass %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment with pause annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Secretproviderclass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Secretproviderclass used in init container - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass auto annotation - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotations: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting Secret used with exclude secret annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting Secretproviderclass used with exclude secretproviderclass annotation - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotations: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass to reload with same config - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with same config: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass to reload with different config - err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with different config: %v", err) - } - - // Deleting ConfigMap used with exclude configmap annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting configmap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient) - -} - -func setupErs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassName, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating secretproviderclass - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithInitContainer, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secretproviderclass used with secretproviderclass auto annotation - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating secret used with secret exclude annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap exclude annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secretproviderclass used with secret exclude annotation - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating secretproviderclass to reload with same config - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating secretproviderclass to reload with different config - _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing") - if err != nil { - logrus.Errorf("Error in secretproviderclass creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace) - _, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedConfigMapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secretproviderclass mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretProviderClassWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secretproviderclass - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersConfigmapWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersSecretWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated, - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretWithSecretAutoAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapAutoAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secretproviderclass and with secretproviderclass auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassWithSPCAutoAnnotation, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretWithSecretExcludeAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapExcludeAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating Deployment with secretproviderclass and with secretproviderclass exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretProviderClassWithExcludeSPCAnnotation, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass exclude annotation: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with secretproviderclass - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secretproviderclass creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with secretproviderclass - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secretproviderclass creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithPodAnnotations, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithBothAnnotations, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } - - // Creating Deployment with secretproviderclass to reload with same config - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassReloadedWithSameConfig, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass to reload with same config: %v", err) - } - - // Creating Deployment with secretproviderclass to reload with different config - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassReloadedWithDifferentConfig, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secretproviderclass to reload with different config: %v", err) - } -} - -func teardownErs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretprovider class %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass and secretproviderclass auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret exclude annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap exclude annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass and secretproviderclass exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass exclude annotation %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass to reload with same config - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with same config %v", deploymentError) - } - - // Deleting Deployment with secretproviderclass to reload with different config - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with different config %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting DaemonSet with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with secretproviderclass - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secretproviderclass %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting StatefulSet with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with secretproviderclass - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secretproviderclass %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment for testing pausing deployments - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting SecretProviderClass - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassName) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting SecretProviderClass used in init container - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotation: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotation: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass auto annotation - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotation: %v", err) - } - - // Deleting Secret used with secret exclude annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret exclude annotation: %v", err) - } - - // Deleting ConfigMap used with configmap exclude annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass exclude annotation - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass exclude annotation: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass to reload with same config - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with same config: %v", err) - } - - // Deleting SecretProviderClass used with secretproviderclass to reload with different config - err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig) - if err != nil { - logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with different config: %v", err) - } - // Deleting ConfigMap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient) - -} - -func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config { - ns := ersNamespace - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - ns = arsNamespace - } - - return common.Config{ - Namespace: ns, - ResourceName: name, - SHAValue: shaData, - Annotation: annotation, - TypedAutoAnnotation: typedAutoAnnotation, - Type: resourceType, - } -} - -func getCollectors() metrics.Collectors { - return metrics.NewCollectors() -} - -var labelSucceeded = prometheus.Labels{"success": "true"} -var labelFailed = prometheus.Labels{"success": "false"} - -func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - config.SHAValue = testutil.GetSHAfromEmptyData() - removed := testutil.VerifyResourceAnnotationUpdate(clients, config, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate) - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationButWithAutoReloadAllUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - options.AutoReloadAll = true - defer func() { options.AutoReloadAll = false }() - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated+"-different", - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretproviderclassInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithInitContainer, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithInitContainer, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithExcludeSecretAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithExcludeSecretAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretproviderclassExcludeAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithExcludeSPCAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be exluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithSameConfigUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithSameConfig, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassReloadedWithSameConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with same config") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - logrus.Infof("Performing reload using same config") - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Second rolling upgrade failed for Deployment with same config") - } - - logrus.Infof("Verifying second reload did not reload") - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 && - promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { - t.Errorf("Second reload with same config updated Deployment") - } -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithDifferentConfigUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassReloadedWithDifferentConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with different config") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - logrus.Infof("Applying different config") - shaData = testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing2") - config.SHAValue = shaData - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Second rolling upgrade failed for Deployment with different config") - } - - logrus.Infof("Verifying deployment did update") - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 && - promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { - t.Errorf("Second reload with different config did not update Deployment") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithSPCAutoAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithExcludeConfigMapAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretProviderClassUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with SecretProviderClass") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass: %v", err) - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased, expected 1 but got %f", promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded))) - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased, expected 1 but got %f", promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace}))) - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == arsConfigmapWithPodAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == arsConfigmapWithBothAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased") - } -} -func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been (ERS)") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased (ERS)") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased (ERS)") - } -} - -func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - removed := testutil.VerifyResourceEnvVarRemoved(clients, config, envVarPostfix, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate) - - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.JSONPatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - assert.Equal(t, 2, patchCalled) - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated+"-different", - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithInitContainer, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithInitContainer, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretExcludeAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretExcludeAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment that had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithExcludeSPCAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude SecretProviderClass") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment that had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithSameConfigUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithSameConfig, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassReloadedWithSameConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with same config") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - logrus.Infof("Performing reload using same config") - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Second rolling upgrade failed for Deployment with same config") - } - - logrus.Infof("Verifying second reload did not reload") - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 && - promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { - t.Errorf("Second reload with same config updated Deployment") - } -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithDifferentConfigUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassReloadedWithDifferentConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with different config") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - logrus.Infof("Applying different config") - shaData = testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing2") - config.SHAValue = shaData - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Second rolling upgrade failed for Deployment with different config") - } - - logrus.Infof("Verifying deployment did update") - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 && - promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { - t.Errorf("Second reload with different config did not update Deployment") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretProviderClassAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithSPCAutoAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigMapExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapExcludeAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretProviderClassUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with SecretProviderClass") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") +func TestGetVolumeMountName(t *testing.T) { + tests := []struct { + name string + volumes []v1.Volume + mountType string + volumeName string + expected string + }{ + { + name: "ConfigMap volume match", + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "my-configmap", + expected: "config-volume", + }, + { + name: "Secret volume match", + volumes: []v1.Volume{ + { + Name: "secret-volume", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + mountType: constants.SecretEnvVarPostfix, + volumeName: "my-secret", + expected: "secret-volume", + }, + { + name: "ConfigMap in projected volume", + volumes: []v1.Volume{ + { + Name: "projected-volume", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "projected-configmap", + }, + }, + }, + }, + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "projected-configmap", + expected: "projected-volume", + }, + { + name: "Secret in projected volume", + volumes: []v1.Volume{ + { + Name: "projected-volume", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "projected-secret", + }, + }, + }, + }, + }, + }, + }, + }, + mountType: constants.SecretEnvVarPostfix, + volumeName: "projected-secret", + expected: "projected-volume", + }, + { + name: "No match - wrong configmap name", + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "other-configmap", + }, + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "my-configmap", + expected: "", + }, + { + name: "No match - wrong type", + volumes: []v1.Volume{ + { + Name: "secret-volume", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "my-secret", + expected: "", + }, + { + name: "Empty volumes", + volumes: []v1.Volume{}, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "any", + expected: "", + }, } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getVolumeMountName(tt.volumes, tt.mountType, tt.volumeName) + assert.Equal(t, tt.expected, result) + }) } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } -func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") +func TestGetContainerWithVolumeMount(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumeMountName string + expectFound bool + expectedName string + }{ + { + name: "Container with matching volume mount", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: true, + expectedName: "app", + }, + { + name: "Multiple containers, second has mount", + containers: []v1.Container{ + { + Name: "init", + VolumeMounts: []v1.VolumeMount{}, + }, + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: true, + expectedName: "app", + }, + { + name: "No matching volume mount", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "other-volume", MountPath: "/etc/other"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: false, + }, + { + name: "Empty containers", + containers: []v1.Container{}, + volumeMountName: "config-volume", + expectFound: false, + }, } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getContainerWithVolumeMount(tt.containers, tt.volumeMountName) + if tt.expectFound { + assert.NotNil(t, result) + assert.Equal(t, tt.expectedName, result.Name) + } else { + assert.Nil(t, result) + } + }) } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } -func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretProviderClassEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass") +func TestGetContainerWithEnvReference(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + resourceName string + resourceType string + expectFound bool + expectedName string + }{ + { + name: "Container with ConfigMapKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &v1.EnvVarSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with SecretKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + resourceType: constants.SecretEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with ConfigMapRef (envFrom)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with SecretRef (envFrom)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + resourceType: constants.SecretEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "No match - wrong resource name", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "other-configmap", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: false, + }, + { + name: "No match - wrong type (looking for secret but has configmap)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-resource", + }, + }, + }, + }, + }, + }, + resourceName: "my-resource", + resourceType: constants.SecretEnvVarPostfix, + expectFound: false, + }, + { + name: "Empty containers", + containers: []v1.Container{}, + resourceName: "any", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: false, + }, } - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getContainerWithEnvReference(tt.containers, tt.resourceName, tt.resourceType) + if tt.expectFound { + assert.NotNil(t, result) + assert.Equal(t, tt.expectedName, result.Name) + } else { + assert.Nil(t, result) + } + }) } +} - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") +func TestGetEnvVarName(t *testing.T) { + tests := []struct { + name string + resourceName string + typeName string + expected string + }{ + { + name: "ConfigMap with simple name", + resourceName: "my-config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_CONFIG_CONFIGMAP", + }, + { + name: "Secret with simple name", + resourceName: "my-secret", + typeName: constants.SecretEnvVarPostfix, + expected: "STAKATER_MY_SECRET_SECRET", + }, + { + name: "Name with hyphens", + resourceName: "my-app-config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_APP_CONFIG_CONFIGMAP", + }, + { + name: "Name with dots", + resourceName: "my.app.config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_APP_CONFIG_CONFIGMAP", + }, } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getEnvVarName(tt.resourceName, tt.typeName) + assert.Equal(t, tt.expected, result) + }) } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") +func TestUpdateEnvVar(t *testing.T) { + tests := []struct { + name string + container *v1.Container + envVar string + shaData string + expected constants.Result + newValue string + }{ + { + name: "Update existing env var with different value", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "STAKATER_CONFIG_CONFIGMAP", Value: "old-sha"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.Updated, + newValue: "new-sha", + }, + { + name: "No update when value is same", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "STAKATER_CONFIG_CONFIGMAP", Value: "same-sha"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "same-sha", + expected: constants.NotUpdated, + newValue: "same-sha", + }, + { + name: "Env var not found", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "OTHER_VAR", Value: "value"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.NoEnvVarFound, + }, + { + name: "Empty env list", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{}, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.NoEnvVarFound, + }, } - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := updateEnvVar(tt.container, tt.envVar, tt.shaData) + assert.Equal(t, tt.expected, result) - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + if tt.expected == constants.Updated || tt.expected == constants.NotUpdated { + for _, env := range tt.container.Env { + if env.Name == tt.envVar { + assert.Equal(t, tt.newValue, env.Value) + break + } + } + } + }) } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() +func TestGetReloaderAnnotationKey(t *testing.T) { + result := getReloaderAnnotationKey() + expected := "reloader.stakater.com/last-reloaded-from" + assert.Equal(t, expected, result) +} - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") +func TestJsonEscape(t *testing.T) { + tests := []struct { + name string + input string + expected string + hasError bool + }{ + { + name: "Simple string", + input: "hello", + expected: "hello", + hasError: false, + }, + { + name: "String with quotes", + input: `say "hello"`, + expected: `say \"hello\"`, + hasError: false, + }, + { + name: "String with backslash", + input: `path\to\file`, + expected: `path\\to\\file`, + hasError: false, + }, + { + name: "String with newline", + input: "line1\nline2", + expected: `line1\nline2`, + hasError: false, + }, + { + name: "JSON-like string", + input: `{"key":"value"}`, + expected: `{\"key\":\"value\"}`, + hasError: false, + }, } - logrus.Infof("Verifying deployment update") - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == ersConfigmapWithPodAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := jsonEscape(tt.input) + if tt.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) } - foundPod = true - } - if name == ersConfigmapWithBothAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + }) } } -func TestFailedRollingUpgradeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") +func TestCreateReloadedAnnotations(t *testing.T) { + tests := []struct { + name string + target *common.ReloadSource + hasError bool + }{ + { + name: "Nil target", + target: nil, + hasError: true, + }, + { + name: "Valid target", + target: &common.ReloadSource{ + Name: "my-configmap", + Type: "CONFIGMAP", + }, + hasError: false, + }, } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") + funcs := callbacks.RollingUpgradeFuncs{ + SupportsPatch: false, } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + annotations, _, err := createReloadedAnnotations(tt.target, funcs) + if tt.hasError { + assert.Error(t, err) + assert.Nil(t, annotations) + } else { + assert.NoError(t, err) + assert.NotNil(t, annotations) + _, exists := annotations[getReloaderAnnotationKey()] + assert.True(t, exists) + } + }) } } -func TestPausingDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace) +// Helper function to create a mock deployment for testing +func createTestDeployment(containers []v1.Container, initContainers []v1.Container, volumes []v1.Volume) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: containers, + InitContainers: initContainers, + Volumes: volumes, + }, + }, + }, + } } -func TestPausingDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace) +// mockRollingUpgradeFuncs creates mock callbacks for testing getContainerUsingResource +func mockRollingUpgradeFuncs(deployment *appsv1.Deployment) callbacks.RollingUpgradeFuncs { + return callbacks.RollingUpgradeFuncs{ + VolumesFunc: func(item runtime.Object) []v1.Volume { + return deployment.Spec.Template.Spec.Volumes + }, + ContainersFunc: func(item runtime.Object) []v1.Container { + return deployment.Spec.Template.Spec.Containers + }, + InitContainersFunc: func(item runtime.Object) []v1.Container { + return deployment.Spec.Template.Spec.InitContainers + }, + } } -func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) { - options.ReloadStrategy = reloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - // Wait for deployment to have paused-at annotation - logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName) - err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second) - if err != nil { - t.Errorf("Failed to wait for deployment paused-at annotation: %v", err) +func TestGetContainerUsingResource(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + initContainers []v1.Container + volumes []v1.Volume + config common.Config + autoReload bool + expectNil bool + expectedName string + }{ + { + name: "Volume mount in regular container", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}, + }, + }, + }, + }, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "app", + }, + { + name: "Volume mount in init container returns first regular container", + containers: []v1.Container{ + {Name: "main-app"}, + {Name: "sidecar"}, + }, + initContainers: []v1.Container{ + { + Name: "init", + VolumeMounts: []v1.VolumeMount{ + {Name: "secret-volume", MountPath: "/etc/secrets"}, + }, + }, + }, + volumes: []v1.Volume{ + { + Name: "secret-volume", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}, + }, + }, + }, + config: common.Config{ + ResourceName: "my-secret", + Type: constants.SecretEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "main-app", + }, + { + name: "EnvFrom ConfigMap in regular container", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "env-configmap"}, + }, + }, + }, + }, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "env-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "app", + }, + { + name: "EnvFrom Secret in init container returns first regular container", + containers: []v1.Container{ + {Name: "main-app"}, + }, + initContainers: []v1.Container{ + { + Name: "init", + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "init-secret"}, + }, + }, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "init-secret", + Type: constants.SecretEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "main-app", + }, + { + name: "autoReload=false with no mount returns first container (explicit annotation)", + containers: []v1.Container{ + {Name: "first-container"}, + {Name: "second-container"}, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "external-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "first-container", + }, + { + name: "autoReload=true with no mount returns nil", + containers: []v1.Container{ + {Name: "app"}, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "unmounted-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expectNil: true, + }, + { + name: "Empty containers returns nil", + containers: []v1.Container{}, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "any-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: true, + }, + { + name: "Init container with volume but no regular containers returns nil", + containers: []v1.Container{}, + initContainers: []v1.Container{ + { + Name: "init", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "init-only-cm"}, + }, + }, + }, + }, + config: common.Config{ + ResourceName: "init-only-cm", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: true, + }, + { + name: "CSI SecretProviderClass volume", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "csi-volume", MountPath: "/mnt/secrets"}, + }, + }, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{ + { + Name: "csi-volume", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + VolumeAttributes: map[string]string{ + "secretProviderClass": "my-spc", + }, + }, + }, + }, + }, + config: common.Config{ + ResourceName: "my-spc", + Type: constants.SecretProviderClassEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "app", + }, + { + name: "Env ValueFrom ConfigMapKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &v1.EnvVarSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{Name: "keyref-cm"}, + Key: "my-key", + }, + }, + }, + }, + }, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "keyref-cm", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "app", + }, + { + name: "Env ValueFrom SecretKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{Name: "keyref-secret"}, + Key: "password", + }, + }, + }, + }, + }, + }, + initContainers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "keyref-secret", + Type: constants.SecretEnvVarPostfix, + }, + autoReload: false, + expectNil: false, + expectedName: "app", + }, } - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deployment := createTestDeployment(tt.containers, tt.initContainers, tt.volumes) + funcs := mockRollingUpgradeFuncs(deployment) - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } + result := getContainerUsingResource(funcs, deployment, tt.config, tt.autoReload) - logrus.Infof("Verifying deployment has been paused") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err := isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment has not been paused") + if tt.expectNil { + assert.Nil(t, result, "Expected nil container") + } else { + assert.NotNil(t, result, "Expected non-nil container") + assert.Equal(t, tt.expectedName, result.Name) + } + }) } +} - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com") - config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") +func TestRetryOnConflict(t *testing.T) { + tests := []struct { + name string + fnResults []struct { + matched bool + err error + } + expectMatched bool + expectError bool + }{ + { + name: "Success on first try", + fnResults: []struct { + matched bool + err error + }{ + {matched: true, err: nil}, + }, + expectMatched: true, + expectError: false, + }, + { + name: "Conflict then success", + fnResults: []struct { + matched bool + err error + }{ + {matched: false, + err: apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "deployments"}, "test", + errors.New("conflict"))}, + {matched: true, err: nil}, + }, + expectMatched: true, + expectError: false, + }, + { + name: "Non-conflict error returns immediately", + fnResults: []struct { + matched bool + err error + }{ + {matched: false, err: errors.New("some other error")}, + }, + expectMatched: false, + expectError: true, + }, + { + name: "Multiple conflicts then success", + fnResults: []struct { + matched bool + err error + }{ + {matched: false, err: apierrors.NewConflict(schema.GroupResource{}, "test", errors.New("conflict 1"))}, + {matched: false, err: apierrors.NewConflict(schema.GroupResource{}, "test", errors.New("conflict 2"))}, + {matched: true, err: nil}, + }, + expectMatched: true, + expectError: false, + }, + { + name: "Not matched but no error", + fnResults: []struct { + matched bool + err error + }{ + {matched: false, err: nil}, + }, + expectMatched: false, + expectError: false, + }, } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 { - t.Errorf("Counter by namespace was not increased") - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + callCount := 0 + fn := func(fetchResource bool) (bool, error) { + if callCount >= len(tt.fnResults) { + return true, nil + } + result := tt.fnResults[callCount] + callCount++ + return result.matched, result.err + } - logrus.Infof("Verifying deployment is still paused") - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment should still be paused") - } + matched, err := retryOnConflict(retry.DefaultRetry, fn) - logrus.Infof("Verifying deployment has been resumed after pause interval") - time.Sleep(11 * time.Second) - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if deploymentPaused { - t.Errorf("Deployment should have been resumed after pause interval") + assert.Equal(t, tt.expectMatched, matched) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) } } -func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) { - deployment, err := FindDeploymentByName(deployments, deploymentName) - if err != nil { - return false, err +func TestGetVolumeMountNameCSI(t *testing.T) { + tests := []struct { + name string + volumes []v1.Volume + mountType string + volumeName string + expected string + }{ + { + name: "CSI SecretProviderClass volume match", + volumes: []v1.Volume{ + { + Name: "csi-secrets", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + VolumeAttributes: map[string]string{ + "secretProviderClass": "my-vault-spc", + }, + }, + }, + }, + }, + mountType: constants.SecretProviderClassEnvVarPostfix, + volumeName: "my-vault-spc", + expected: "csi-secrets", + }, + { + name: "CSI volume with different SPC name - no match", + volumes: []v1.Volume{ + { + Name: "csi-secrets", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + VolumeAttributes: map[string]string{ + "secretProviderClass": "other-spc", + }, + }, + }, + }, + }, + mountType: constants.SecretProviderClassEnvVarPostfix, + volumeName: "my-vault-spc", + expected: "", + }, + { + name: "CSI volume without secretProviderClass attribute", + volumes: []v1.Volume{ + { + Name: "csi-volume", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "other-csi-driver", + VolumeAttributes: map[string]string{}, + }, + }, + }, + }, + mountType: constants.SecretProviderClassEnvVarPostfix, + volumeName: "any-spc", + expected: "", + }, + { + name: "CSI volume with nil VolumeAttributes", + volumes: []v1.Volume{ + { + Name: "csi-volume", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + }, + }, + }, + }, + mountType: constants.SecretProviderClassEnvVarPostfix, + volumeName: "any-spc", + expected: "", + }, + { + name: "Multiple volumes with CSI match", + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "my-cm"}, + }, + }, + }, + { + Name: "csi-secrets", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + VolumeAttributes: map[string]string{ + "secretProviderClass": "target-spc", + }, + }, + }, + }, + }, + mountType: constants.SecretProviderClassEnvVarPostfix, + volumeName: "target-spc", + expected: "csi-secrets", + }, } - return IsPaused(deployment), nil -} - -// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation -func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error { - start := time.Now() - for time.Since(start) < timeout { - items := deploymentFuncs.ItemsFunc(clients, namespace) - deployment, err := FindDeploymentByName(items, deploymentName) - if err == nil { - annotations := deployment.GetAnnotations() - if annotations != nil { - if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists { - return nil - } - } - } - - time.Sleep(100 * time.Millisecond) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getVolumeMountName(tt.volumes, tt.mountType, tt.volumeName) + assert.Equal(t, tt.expected, result) + }) } - - return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName) } -// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers -// This simulates the scenario where Argo Rollouts with workloadRef return empty containers -func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object { - rollout := &argorolloutv1alpha1.Rollout{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, +func TestSecretProviderClassAnnotationReloaded(t *testing.T) { + tests := []struct { + name string + oldAnnotations map[string]string + newConfig common.Config + expected bool + }{ + { + name: "Annotation contains matching SPC name and SHA", + oldAnnotations: map[string]string{ + "reloader.stakater.com/last-reloaded-from": `{"name":"my-spc","sha":"abc123"}`, + }, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "abc123", + }, + expected: true, }, - Spec: argorolloutv1alpha1.RolloutSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{}, // Empty containers slice - InitContainers: []v1.Container{}, // Empty init containers slice - Volumes: []v1.Volume{}, // Empty volumes slice - }, + { + name: "Annotation contains SPC name but different SHA", + oldAnnotations: map[string]string{ + "reloader.stakater.com/last-reloaded-from": `{"name":"my-spc","sha":"old-sha"}`, + }, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "new-sha", + }, + expected: false, + }, + { + name: "Annotation contains different SPC name", + oldAnnotations: map[string]string{ + "reloader.stakater.com/last-reloaded-from": `{"name":"other-spc","sha":"abc123"}`, + }, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "abc123", }, + expected: false, }, + { + name: "Empty annotations", + oldAnnotations: map[string]string{}, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "abc123", + }, + expected: false, + }, + { + name: "Nil annotations", + oldAnnotations: nil, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "abc123", + }, + expected: false, + }, + { + name: "Annotation key missing", + oldAnnotations: map[string]string{ + "other-annotation": "some-value", + }, + newConfig: common.Config{ + ResourceName: "my-spc", + SHAValue: "abc123", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := secretProviderClassAnnotationReloaded(tt.oldAnnotations, tt.newConfig) + assert.Equal(t, tt.expected, result) + }) } - var obj runtime.Object = rollout - return &obj } -// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions -func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) { - namespace := "test-namespace" - resourceName := "test-configmap" +func TestInvokeReloadStrategy(t *testing.T) { + originalStrategy := options.ReloadStrategy + defer func() { options.ReloadStrategy = originalStrategy }() - // Use real Argo Rollout functions but mock the containers function - rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs() - originalContainersFunc := rolloutFuncs.ContainersFunc - originalInitContainersFunc := rolloutFuncs.InitContainersFunc + deployment := createTestDeployment( + []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}, + }, + }, + }, + }, + }, + []v1.Container{}, + []v1.Volume{}, + ) + deployment.Spec.Template.Annotations = map[string]string{} - // Override to return empty containers (simulating workloadRef scenario) - rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts + funcs := callbacks.RollingUpgradeFuncs{ + VolumesFunc: func(item runtime.Object) []v1.Volume { + return deployment.Spec.Template.Spec.Volumes + }, + ContainersFunc: func(item runtime.Object) []v1.Container { + return deployment.Spec.Template.Spec.Containers + }, + InitContainersFunc: func(item runtime.Object) []v1.Container { + return deployment.Spec.Template.Spec.InitContainers + }, + PodAnnotationsFunc: func(item runtime.Object) map[string]string { + return deployment.Spec.Template.Annotations + }, + SupportsPatch: false, } - // Restore original functions after test - defer func() { - rolloutFuncs.ContainersFunc = originalContainersFunc - rolloutFuncs.InitContainersFunc = originalInitContainersFunc - }() - - // Use proper Argo Rollout object instead of Pod - mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout") - config := common.Config{ - Namespace: namespace, - ResourceName: resourceName, + ResourceName: "my-configmap", Type: constants.ConfigmapEnvVarPostfix, - SHAValue: "test-sha", + SHAValue: "sha256:abc123", + Namespace: "default", + } + + tests := []struct { + name string + reloadStrategy string + autoReload bool + expectResult constants.Result + }{ + { + name: "Annotations strategy", + reloadStrategy: constants.AnnotationsReloadStrategy, + autoReload: false, + expectResult: constants.Updated, + }, + { + name: "Env vars strategy with container found", + reloadStrategy: constants.EnvVarsReloadStrategy, + autoReload: false, + expectResult: constants.Updated, + }, } - // Test both autoReload scenarios using subtests as suggested by Felix - for _, autoReload := range []bool{true, false} { - t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) { - // This tests the actual fix in the context of Argo Rollouts - result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + options.ReloadStrategy = tt.reloadStrategy + deployment.Spec.Template.Annotations = map[string]string{} - if result != nil { - t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result) - } + result := invokeReloadStrategy(funcs, deployment, config, tt.autoReload) + assert.Equal(t, tt.expectResult, result.Result) }) } } diff --git a/internal/pkg/leadership/leadership.go b/internal/pkg/leadership/leadership.go index f8c85bc15..f98f29927 100644 --- a/internal/pkg/leadership/leadership.go +++ b/internal/pkg/leadership/leadership.go @@ -7,11 +7,12 @@ import ( "time" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/controller" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + "github.com/stakater/Reloader/internal/pkg/controller" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" ) @@ -75,7 +76,7 @@ func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) { for i, c := range controllers { - c := c + go c.Run(1, stopChannels[i]) } } diff --git a/internal/pkg/leadership/leadership_test.go b/internal/pkg/leadership/leadership_test.go index eed070561..b14341bc0 100644 --- a/internal/pkg/leadership/leadership_test.go +++ b/internal/pkg/leadership/leadership_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/sirupsen/logrus" + "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/controller" "github.com/stakater/Reloader/internal/pkg/handler" diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index a778eb15f..ab64d84e7 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -15,14 +15,6 @@ import ( openshiftv1 "github.com/openshift/api/apps/v1" appsclient "github.com/openshift/client-go/apps/clientset/versioned" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/callbacks" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" @@ -33,6 +25,15 @@ import ( csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" csiclient_v1 "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/typed/apis/v1" + + "github.com/stakater/Reloader/internal/pkg/callbacks" + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/crypto" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/internal/pkg/util" + "github.com/stakater/Reloader/pkg/common" + "github.com/stakater/Reloader/pkg/kube" ) var ( diff --git a/internal/pkg/util/interface.go b/internal/pkg/util/interface.go index ff261ab00..a13787388 100644 --- a/internal/pkg/util/interface.go +++ b/internal/pkg/util/interface.go @@ -31,7 +31,7 @@ type ObjectMeta struct { func ToObjectMeta(kubernetesObject interface{}) ObjectMeta { objectValue := reflect.ValueOf(kubernetesObject) fieldName := reflect.TypeOf((*metav1.ObjectMeta)(nil)).Elem().Name() - field := objectValue.FieldByName(fieldName).Interface().(metav1.ObjectMeta) + field, _ := objectValue.FieldByName(fieldName).Interface().(metav1.ObjectMeta) return ObjectMeta{ ObjectMeta: field, @@ -41,9 +41,11 @@ func ToObjectMeta(kubernetesObject interface{}) ObjectMeta { // ParseBool returns result in bool format after parsing func ParseBool(value interface{}) bool { if reflect.Bool == reflect.TypeOf(value).Kind() { - return value.(bool) + b, _ := value.(bool) + return b } else if reflect.String == reflect.TypeOf(value).Kind() { - result, _ := strconv.ParseBool(value.(string)) + s, _ := value.(string) + result, _ := strconv.ParseBool(s) return result } return false diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go index 476cdb91a..abfbecb39 100644 --- a/internal/pkg/util/util.go +++ b/internal/pkg/util/util.go @@ -9,11 +9,12 @@ import ( "strings" "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/crypto" "github.com/stakater/Reloader/internal/pkg/options" - v1 "k8s.io/api/core/v1" - csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // ConvertToEnvVarName converts the given text into a usable env var diff --git a/internal/pkg/util/util_test.go b/internal/pkg/util/util_test.go index 338f329f3..161e92d2c 100644 --- a/internal/pkg/util/util_test.go +++ b/internal/pkg/util/util_test.go @@ -3,8 +3,9 @@ package util import ( "testing" - "github.com/stakater/Reloader/internal/pkg/options" v1 "k8s.io/api/core/v1" + + "github.com/stakater/Reloader/internal/pkg/options" ) func TestConvertToEnvVarName(t *testing.T) { diff --git a/pkg/common/common.go b/pkg/common/common.go index 7c9d61e04..bebfaa95e 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -8,12 +8,13 @@ import ( "strings" "github.com/sirupsen/logrus" - "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/internal/pkg/util" ) type Map map[string]string @@ -191,10 +192,10 @@ func GetResourceLabelSelector(slice []string) (string, error) { } // ShouldReload checks if a resource should be reloaded based on its annotations and the provided options. -func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult { +func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, reloaderOpts *ReloaderOptions) ReloadCheckResult { // Check if this workload type should be ignored - if len(options.WorkloadTypesToIgnore) > 0 { + if len(reloaderOpts.WorkloadTypesToIgnore) > 0 { ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList() if err != nil { logrus.Errorf("Failed to parse ignored workload types: %v", err) @@ -219,7 +220,7 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota } } - ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation] + ignoreResourceAnnotatonValue := config.ResourceAnnotations[reloaderOpts.IgnoreResourceAnnotation] if ignoreResourceAnnotatonValue == "true" { return ReloadCheckResult{ ShouldReload: false, @@ -227,18 +228,18 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota } annotationValue, found := annotations[config.Annotation] - searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation] + searchAnnotationValue, foundSearchAnn := annotations[reloaderOpts.AutoSearchAnnotation] + reloaderEnabledValue, foundAuto := annotations[reloaderOpts.ReloaderAutoAnnotation] typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation] - excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation] - excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation] - excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[options.SecretProviderClassExcludeReloaderAnnotation] + excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[reloaderOpts.ConfigmapExcludeReloaderAnnotation] + excludeSecretAnnotationValue, foundExcludeSecret := annotations[reloaderOpts.SecretExcludeReloaderAnnotation] + excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[reloaderOpts.SecretProviderClassExcludeReloaderAnnotation] if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn { annotations = podAnnotations annotationValue = annotations[config.Annotation] - searchAnnotationValue = annotations[options.AutoSearchAnnotation] - reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation] + searchAnnotationValue = annotations[reloaderOpts.AutoSearchAnnotation] + reloaderEnabledValue = annotations[reloaderOpts.ReloaderAutoAnnotation] typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation] } @@ -279,7 +280,7 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota } if searchAnnotationValue == "true" { - matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation] + matchAnnotationValue := config.ResourceAnnotations[reloaderOpts.SearchMatchAnnotation] if matchAnnotationValue == "true" { return ReloadCheckResult{ ShouldReload: true, @@ -290,7 +291,7 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { + if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && reloaderOpts.AutoReloadAll { return ReloadCheckResult{ ShouldReload: true, AutoReload: true, diff --git a/pkg/common/config.go b/pkg/common/config.go index 4421fa509..6c90d08b9 100644 --- a/pkg/common/config.go +++ b/pkg/common/config.go @@ -1,11 +1,12 @@ package common import ( + v1 "k8s.io/api/core/v1" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/options" "github.com/stakater/Reloader/internal/pkg/util" - v1 "k8s.io/api/core/v1" - csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // Config contains rolling upgrade configuration parameters diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 9582929c4..1cfe21619 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -137,13 +137,13 @@ func getConfig() (*rest.Config, error) { if kubeconfigPath == "" { kubeconfigPath = os.Getenv("HOME") + "/.kube/config" } - //If file exists so use that config settings + // If file exists so use that config settings if _, err := os.Stat(kubeconfigPath); err == nil { config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { return nil, err } - } else { //Use Incluster Configuration + } else { // Use Incluster Configuration config, err = rest.InClusterConfig() if err != nil { return nil, err diff --git a/scripts/e2e-cluster-cleanup.sh b/scripts/e2e-cluster-cleanup.sh new file mode 100755 index 000000000..b50052996 --- /dev/null +++ b/scripts/e2e-cluster-cleanup.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# Cleanup script for e2e test cluster +# Run this after e2e tests complete: ./scripts/e2e-cluster-cleanup.sh +# +# This removes: +# - Reloader test resources (namespaces, cluster roles, etc.) +# - Vault and its namespace +# - CSI Secrets Store Driver +# - Argo Rollouts +# +# Resources are removed in reverse dependency order. + +set -euo pipefail + +# ============================================================================= +# Configuration +# ============================================================================= + +ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}" +ARGO_ROLLOUTS_NAMESPACE="argo-rollouts" +CSI_DRIVER_VERSION="${CSI_DRIVER_VERSION:-1.5.5}" +CSI_NAMESPACE="kube-system" +VAULT_NAMESPACE="vault" + +# ============================================================================= +# Helper Functions +# ============================================================================= + +log_header() { + echo "" + echo "=== $1 ===" +} + +log_info() { + echo "$1" +} + +log_success() { + echo "✓ $1" +} + +log_warning() { + echo "⚠ $1" +} + +log_error() { + echo "✗ $1" >&2 +} + +check_command() { + if ! command -v "$1" &> /dev/null; then + log_error "$1 is not installed or not in PATH" + return 1 + fi + return 0 +} + +# Safe delete that ignores "not found" errors +safe_delete() { + kubectl delete "$@" --ignore-not-found 2>/dev/null || true +} + +# ============================================================================= +# Dependency Checks +# ============================================================================= + +check_dependencies() { + log_header "Checking Dependencies" + + if ! check_command kubectl; then + log_error "kubectl is required for cleanup" + exit 1 + fi + + log_success "Dependencies available" +} + +check_cluster_connectivity() { + log_header "Checking Cluster Connectivity" + + if ! kubectl cluster-info &> /dev/null; then + log_error "Cannot connect to Kubernetes cluster" + exit 1 + fi + + local context + context=$(kubectl config current-context) + log_success "Connected to cluster (context: $context)" +} + +# ============================================================================= +# Reloader Test Resources Cleanup +# ============================================================================= + +cleanup_reloader_resources() { + log_header "Cleaning Up Reloader Test Resources" + + # Delete test namespaces (created by test suites) + log_info "Deleting test namespaces..." + local namespaces + namespaces=$(kubectl get namespaces -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true) + if [[ -n "$namespaces" ]]; then + for ns in $namespaces; do + log_info " Deleting namespace: $ns" + kubectl delete namespace "$ns" --ignore-not-found --wait=false 2>/dev/null || true + done + else + log_info " No test namespaces found" + fi + + # Delete Reloader cluster-scoped resources + log_info "Deleting cluster roles..." + local clusterroles + clusterroles=$(kubectl get clusterrole -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true) + for cr in $clusterroles; do + log_info " Deleting ClusterRole: $cr" + safe_delete clusterrole "$cr" + done + + log_info "Deleting cluster role bindings..." + local clusterrolebindings + clusterrolebindings=$(kubectl get clusterrolebinding -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true) + for crb in $clusterrolebindings; do + log_info " Deleting ClusterRoleBinding: $crb" + safe_delete clusterrolebinding "$crb" + done + + log_success "Reloader test resources cleaned up" +} + +# ============================================================================= +# Vault Cleanup +# ============================================================================= + +cleanup_vault() { + log_header "Uninstalling Vault" + + # Check if Vault is installed + if ! kubectl get namespace "$VAULT_NAMESPACE" &> /dev/null; then + log_info "Vault namespace not found, skipping" + return 0 + fi + + # Uninstall via Helm if available + if command -v helm &> /dev/null; then + if helm list -n "$VAULT_NAMESPACE" 2>/dev/null | grep -q vault; then + log_info "Uninstalling Vault via Helm..." + helm uninstall vault -n "$VAULT_NAMESPACE" --wait --timeout 60s 2>/dev/null || true + fi + fi + + # Delete namespace + log_info "Deleting Vault namespace..." + safe_delete namespace "$VAULT_NAMESPACE" --timeout=60s + + log_success "Vault cleaned up" +} + +# ============================================================================= +# CSI Secrets Store Driver Cleanup +# ============================================================================= + +cleanup_csi_driver() { + log_header "Uninstalling CSI Secrets Store Driver" + + # Delete all SecretProviderClass resources first + log_info "Deleting SecretProviderClass resources..." + kubectl delete secretproviderclasses.secrets-store.csi.x-k8s.io \ + --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true + + log_info "Deleting SecretProviderClassPodStatus resources..." + kubectl delete secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io \ + --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true + + # Uninstall via Helm if available + if command -v helm &> /dev/null; then + if helm list -n "$CSI_NAMESPACE" 2>/dev/null | grep -q csi-secrets-store; then + log_info "Uninstalling CSI Secrets Store Driver via Helm..." + helm uninstall csi-secrets-store -n "$CSI_NAMESPACE" --wait --timeout 60s 2>/dev/null || true + fi + else + # Fallback to kubectl delete + log_info "Deleting CSI Secrets Store Driver resources via kubectl..." + local csi_url="https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v${CSI_DRIVER_VERSION}/deploy/secrets-store-csi-driver.yaml" + kubectl delete -f "$csi_url" --ignore-not-found --timeout=60s 2>/dev/null || true + fi + + # Delete CRDs + log_info "Deleting CSI Secrets Store CRDs..." + local csi_crds="secretproviderclasses.secrets-store.csi.x-k8s.io secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io" + for crd in $csi_crds; do + safe_delete crd "$crd" --timeout=30s + done + + log_success "CSI Secrets Store Driver cleaned up" +} + +# ============================================================================= +# Argo Rollouts Cleanup +# ============================================================================= + +cleanup_argo_rollouts() { + log_header "Uninstalling Argo Rollouts" + + # Check if Argo Rollouts is installed + if ! kubectl get namespace "$ARGO_ROLLOUTS_NAMESPACE" &> /dev/null; then + log_info "Argo Rollouts namespace not found, skipping" + return 0 + fi + + # Stop the controller first + log_info "Stopping Argo Rollouts controller..." + safe_delete deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --timeout=30s + + # Delete all Argo Rollouts custom resources to avoid finalizer issues + log_info "Deleting Argo Rollouts custom resources..." + local argo_resources="rollouts analysisruns analysistemplates experiments" + for res in $argo_resources; do + kubectl delete "${res}.argoproj.io" --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true + done + + # Delete using the install manifest + log_info "Deleting Argo Rollouts installation..." + local argo_url="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml" + kubectl delete -f "$argo_url" --ignore-not-found --timeout=60s 2>/dev/null || true + + # Give resources time to be cleaned up + sleep 2 + + # Delete CRDs + log_info "Deleting Argo Rollouts CRDs..." + local argo_crds="rollouts.argoproj.io analysisruns.argoproj.io analysistemplates.argoproj.io clusteranalysistemplates.argoproj.io experiments.argoproj.io" + for crd in $argo_crds; do + safe_delete crd "$crd" --timeout=30s + done + + # Delete namespace + log_info "Deleting Argo Rollouts namespace..." + safe_delete namespace "$ARGO_ROLLOUTS_NAMESPACE" --timeout=30s + + # Delete cluster-scoped RBAC + log_info "Deleting Argo Rollouts cluster RBAC..." + safe_delete clusterrole argo-rollouts argo-rollouts-aggregate-to-admin argo-rollouts-aggregate-to-edit argo-rollouts-aggregate-to-view + safe_delete clusterrolebinding argo-rollouts + + log_success "Argo Rollouts cleaned up" +} + +# ============================================================================= +# Main +# ============================================================================= + +main() { + echo "=== E2E Cluster Cleanup ===" + + # Pre-flight checks + check_dependencies + check_cluster_connectivity + + # Cleanup in reverse dependency order + # 1. First cleanup test resources (they depend on everything else) + cleanup_reloader_resources + + # 2. Then Vault (depends on CSI driver) + cleanup_vault + + # 3. Then CSI driver + cleanup_csi_driver + + # 4. Finally Argo Rollouts (independent) + cleanup_argo_rollouts + + # Summary + log_header "E2E Cluster Cleanup Complete" + echo "" + echo "Removed components:" + echo " ✓ Reloader test namespaces and cluster resources" + echo " ✓ Vault" + echo " ✓ CSI Secrets Store Driver" + echo " ✓ Argo Rollouts" +} + +main "$@" diff --git a/scripts/e2e-cluster-setup.sh b/scripts/e2e-cluster-setup.sh new file mode 100755 index 000000000..20d1b819b --- /dev/null +++ b/scripts/e2e-cluster-setup.sh @@ -0,0 +1,351 @@ +#!/bin/bash +# Setup script for e2e test cluster +# Run this before running e2e tests: ./scripts/e2e-cluster-setup.sh +# +# This installs: +# - Argo Rollouts (for Rollout workload testing) +# - CSI Secrets Store Driver (for SecretProviderClass testing) +# - Vault with CSI Provider (as the secrets backend for CSI) +# +# All versions are pinned for reproducibility and can be overridden via environment variables. + +set -euo pipefail + +# ============================================================================= +# Configuration (all versions pinned for reproducibility) +# ============================================================================= + +# Argo Rollouts +ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}" +ARGO_ROLLOUTS_NAMESPACE="argo-rollouts" + +# CSI Secrets Store Driver +CSI_DRIVER_VERSION="${CSI_DRIVER_VERSION:-1.5.5}" +CSI_NAMESPACE="kube-system" + +# Vault (HashiCorp) +VAULT_CHART_VERSION="${VAULT_CHART_VERSION:-0.31.0}" +VAULT_VERSION="${VAULT_VERSION:-1.20.4}" +VAULT_CSI_PROVIDER_VERSION="${VAULT_CSI_PROVIDER_VERSION:-1.7.0}" +VAULT_NAMESPACE="vault" + +# ============================================================================= +# Helper Functions +# ============================================================================= + +log_header() { + echo "" + echo "=== $1 ===" +} + +log_info() { + echo "$1" +} + +log_success() { + echo "✓ $1" +} + +log_warning() { + echo "⚠ $1" +} + +log_error() { + echo "✗ $1" >&2 +} + +check_command() { + if ! command -v "$1" &> /dev/null; then + log_error "$1 is not installed or not in PATH" + return 1 + fi + return 0 +} + +wait_for_rollout() { + local resource_type="$1" + local resource_name="$2" + local namespace="$3" + local timeout="${4:-180s}" + + kubectl rollout status "$resource_type/$resource_name" -n "$namespace" --timeout="$timeout" +} + +wait_for_condition() { + local condition="$1" + local resource="$2" + local namespace="${3:-}" + local timeout="${4:-60s}" + + if [[ -n "$namespace" ]]; then + kubectl wait --for="condition=$condition" "$resource" -n "$namespace" --timeout="$timeout" + else + kubectl wait --for="condition=$condition" "$resource" --timeout="$timeout" + fi +} + +# ============================================================================= +# Dependency Checks +# ============================================================================= + +check_dependencies() { + log_header "Checking Dependencies" + + local missing_deps=() + + # Required: kubectl + if ! check_command kubectl; then + missing_deps+=("kubectl") + fi + + # Required: helm (for CSI driver and Vault installation) + if ! check_command helm; then + missing_deps+=("helm") + fi + + if [[ ${#missing_deps[@]} -gt 0 ]]; then + log_error "Missing required dependencies: ${missing_deps[*]}" + log_error "Please install the missing tools and try again." + exit 1 + fi + + log_success "All required dependencies are available" +} + +check_cluster_connectivity() { + log_header "Checking Cluster Connectivity" + + if ! kubectl cluster-info &> /dev/null; then + log_error "Cannot connect to Kubernetes cluster" + log_error "Please ensure your kubeconfig is correctly configured" + exit 1 + fi + + local context + context=$(kubectl config current-context) + log_success "Connected to cluster (context: $context)" +} + +# ============================================================================= +# Argo Rollouts Installation +# ============================================================================= + +install_argo_rollouts() { + log_header "Installing Argo Rollouts ${ARGO_ROLLOUTS_VERSION}" + + # Check if already installed + if kubectl get crd rollouts.argoproj.io &> /dev/null; then + if kubectl get deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" &> /dev/null; then + log_success "Argo Rollouts is already installed" + return 0 + fi + log_info "Argo Rollouts CRD exists but controller not running, reinstalling..." + fi + + # Create namespace + kubectl create namespace "$ARGO_ROLLOUTS_NAMESPACE" 2>/dev/null || true + + # Install from official manifest + local argo_url="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml" + log_info "Applying manifest from: $argo_url" + kubectl apply -n "$ARGO_ROLLOUTS_NAMESPACE" -f "$argo_url" + + # Wait for deployment to be created + sleep 2 + + # Patch deployment to remove resource requirements (for Kind cluster compatibility) + log_info "Patching deployment for Kind compatibility..." + local patch_json='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' + if ! kubectl patch deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --type=json -p "$patch_json" 2>/dev/null; then + patch_json='{"spec":{"template":{"spec":{"containers":[{"name":"argo-rollouts","resources":{"limits":null,"requests":null}}]}}}}' + kubectl patch deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --type=strategic -p "$patch_json" 2>/dev/null || true + fi + + # Wait for controller to be ready + log_info "Waiting for Argo Rollouts controller..." + wait_for_condition "available" "deployment/argo-rollouts" "$ARGO_ROLLOUTS_NAMESPACE" "180s" + wait_for_condition "established" "crd/rollouts.argoproj.io" "" "60s" + + log_success "Argo Rollouts ${ARGO_ROLLOUTS_VERSION} installed" +} + +# ============================================================================= +# CSI Secrets Store Driver Installation +# ============================================================================= + +install_csi_driver() { + log_header "Installing CSI Secrets Store Driver ${CSI_DRIVER_VERSION}" + + # Check if already installed + if kubectl get crd secretproviderclasses.secrets-store.csi.x-k8s.io &> /dev/null; then + if kubectl get daemonset -n "$CSI_NAMESPACE" -l app=secrets-store-csi-driver &> /dev/null 2>&1; then + log_success "CSI Secrets Store Driver is already installed" + return 0 + fi + log_info "CSI Driver CRD exists but DaemonSet not found, installing..." + fi + + # Add Helm repo + helm repo add secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts 2>/dev/null || true + helm repo update secrets-store-csi-driver + + # Install via Helm with pinned version + log_info "Installing via Helm (version ${CSI_DRIVER_VERSION})..." + helm upgrade --install csi-secrets-store secrets-store-csi-driver/secrets-store-csi-driver \ + --namespace "$CSI_NAMESPACE" \ + --version "$CSI_DRIVER_VERSION" \ + --set syncSecret.enabled=true \ + --set enableSecretRotation=true \ + --set rotationPollInterval=2s \ + --wait \ + --timeout 180s + + # Wait for CRDs to be established + log_info "Waiting for CRDs to be established..." + wait_for_condition "established" "crd/secretproviderclasses.secrets-store.csi.x-k8s.io" "" "60s" + wait_for_condition "established" "crd/secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io" "" "60s" + + # Wait for DaemonSet to be ready (try different names as they vary by installation method) + log_info "Waiting for CSI driver pods..." + kubectl rollout status daemonset/csi-secrets-store-secrets-store-csi-driver -n "$CSI_NAMESPACE" --timeout=180s 2>/dev/null || \ + kubectl rollout status daemonset/secrets-store-csi-driver -n "$CSI_NAMESPACE" --timeout=180s 2>/dev/null || \ + log_warning "Could not verify DaemonSet status (name may vary)" + + log_success "CSI Secrets Store Driver ${CSI_DRIVER_VERSION} installed" +} + +# ============================================================================= +# Vault Installation +# ============================================================================= + +install_vault() { + log_header "Installing Vault ${VAULT_VERSION} (Chart ${VAULT_CHART_VERSION})" + + # Check if already installed + if kubectl get pods -n "$VAULT_NAMESPACE" -l app.kubernetes.io/name=vault 2>/dev/null | grep -q Running; then + log_success "Vault is already installed and running" + return 0 + fi + + # Add Helm repo + helm repo add hashicorp https://helm.releases.hashicorp.com 2>/dev/null || true + helm repo update hashicorp + + # Install Vault in dev mode with CSI provider + # Dev mode: single server, in-memory storage, pre-unsealed, root token = "root" + log_info "Installing Vault via Helm..." + helm upgrade --install vault hashicorp/vault \ + --namespace "$VAULT_NAMESPACE" \ + --create-namespace \ + --version "$VAULT_CHART_VERSION" \ + --set "server.image.tag=${VAULT_VERSION}" \ + --set "server.dev.enabled=true" \ + --set "server.dev.devRootToken=root" \ + --set "server.resources.requests.memory=64Mi" \ + --set "server.resources.requests.cpu=50m" \ + --set "server.resources.limits.memory=128Mi" \ + --set "server.resources.limits.cpu=100m" \ + --set "injector.enabled=false" \ + --set "csi.enabled=true" \ + --set "csi.image.tag=${VAULT_CSI_PROVIDER_VERSION}" \ + --set "csi.resources.requests.memory=64Mi" \ + --set "csi.resources.requests.cpu=50m" \ + --set "csi.resources.limits.memory=128Mi" \ + --set "csi.resources.limits.cpu=100m" \ + --wait \ + --timeout 180s + + # Wait for pods to be ready + log_info "Waiting for Vault pod..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=vault -n "$VAULT_NAMESPACE" --timeout=120s + + log_info "Waiting for Vault CSI provider..." + wait_for_rollout "daemonset" "vault-csi-provider" "$VAULT_NAMESPACE" "120s" + + log_success "Vault ${VAULT_VERSION} installed" +} + +configure_vault() { + log_header "Configuring Vault for Kubernetes Authentication" + + # Enable KV secrets engine (ignore error if already enabled - dev mode has it by default) + log_info "Enabling KV secrets engine..." + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault secrets enable -path=secret kv-v2 2>/dev/null || true + + # Create test secrets for e2e tests + log_info "Creating test secrets..." + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/test username="test-user" password="test-password" + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/app1 api_key="app1-api-key-v1" db_password="app1-db-pass-v1" + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/app2 api_key="app2-api-key-v1" db_password="app2-db-pass-v1" + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/rotation-test value="initial-value-v1" + + # Enable Kubernetes auth method + log_info "Enabling Kubernetes auth..." + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault auth enable kubernetes 2>/dev/null || true + + # Configure Kubernetes auth to use in-cluster config + log_info "Configuring Kubernetes auth..." + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- sh -c \ + 'vault write auth/kubernetes/config kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443"' + + # Create policy for reading test secrets + log_info "Creating Vault policy..." + kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- sh -c 'vault policy write test-policy - < -l app.kubernetes.io/name=reloader -f + +# Test resources +kubectl get deploy,ds,sts,cm,secret -n + +# CSI resources +kubectl get secretproviderclass,secretproviderclasspodstatus -A +``` diff --git a/test/e2e/advanced/advanced_suite_test.go b/test/e2e/advanced/advanced_suite_test.go new file mode 100644 index 000000000..a9ca6749e --- /dev/null +++ b/test/e2e/advanced/advanced_suite_test.go @@ -0,0 +1,63 @@ +package advanced + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + csiClient csiclient.Interface + restConfig *rest.Config + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestAdvanced(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Advanced E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-advanced") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + csiClient = testEnv.CSIClient + restConfig = testEnv.RestConfig + testNamespace = testEnv.Namespace + + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.watchGlobally": "false", + } + + if utils.IsCSIDriverInstalled(ctx, csiClient) { + deployValues["reloader.enableCSIIntegration"] = "true" + GinkgoWriter.Println("Deploying Reloader with CSI integration support") + } + + err = testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + GinkgoWriter.Println("Advanced E2E Suite cleanup complete") +}) diff --git a/test/e2e/advanced/job_reload_test.go b/test/e2e/advanced/job_reload_test.go new file mode 100644 index 000000000..a54136ab6 --- /dev/null +++ b/test/e2e/advanced/job_reload_test.go @@ -0,0 +1,248 @@ +package advanced + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Job Workload Recreation Tests", func() { + var ( + jobName string + configMapName string + secretName string + spcName string + vaultSecretPath string + jobAdapter *utils.JobAdapter + ) + + BeforeEach(func() { + jobName = utils.RandName("job") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + spcName = utils.RandName("spc") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("vault")) + jobAdapter = utils.NewJobAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteJob(ctx, kubeClient, testNamespace, jobName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + }) + + Context("Job with ConfigMap reference", func() { + It("should recreate Job when referenced ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"JOB_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with ConfigMap envFrom") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapEnvFrom(configMapName), + utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName))) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"JOB_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when ConfigMap changes") + }) + }) + + Context("Job with Secret reference", func() { + It("should recreate Job when referenced Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"JOB_SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with Secret envFrom") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, utils.WithJobSecretEnvFrom(secretName), + utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName))) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"JOB_SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when Secret changes") + }) + }) + + Context("Job with auto annotation", func() { + It("should recreate Job with auto=true when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"AUTO_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with auto annotation") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapEnvFrom(configMapName), + utils.WithJobAnnotations(utils.BuildAutoTrueAnnotation())) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"AUTO_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job with auto=true should be recreated when ConfigMap changes") + }) + }) + + Context("Job with valueFrom ConfigMap reference", func() { + It("should recreate Job when ConfigMap referenced via valueFrom changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with valueFrom.configMapKeyRef") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapKeyRef(configMapName, "config_key", "MY_CONFIG"), + utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName))) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), + "Job with valueFrom.configMapKeyRef should be recreated when ConfigMap changes") + }) + }) + + Context("Job with valueFrom Secret reference", func() { + It("should recreate Job when Secret referenced via valueFrom changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with valueFrom.secretKeyRef") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobSecretKeyRef(secretName, "secret_key", "MY_SECRET"), + utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName))) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"secret_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job with valueFrom.secretKeyRef should be recreated when Secret changes") + }) + }) + + Context("Job with SecretProviderClass reference", Label("csi"), func() { + BeforeEach(func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed - skipping CSI test") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed - skipping CSI test") + } + }) + + It("should recreate Job when Vault secret changes", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret( + ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key", + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with CSI volume and SPC reload annotation") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobCommand("sleep 300"), + utils.WithJobCSIVolume(spcName), + utils.WithJobAnnotations(utils.BuildSecretProviderClassReloadAnnotation(spcName))) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to be ready") + err = jobAdapter.WaitReady(ctx, testNamespace, jobName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForSPC( + ctx, csiClient, testNamespace, spcName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Found SPCPS: %s\n", spcpsName) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Initial SPCPS version: %s\n", initialVersion) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Println("CSI driver synced new secret version") + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := jobAdapter.WaitRecreated(ctx, testNamespace, jobName, originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when Vault secret changes") + }) + }) +}) diff --git a/test/e2e/advanced/multi_container_test.go b/test/e2e/advanced/multi_container_test.go new file mode 100644 index 000000000..bcba8bdca --- /dev/null +++ b/test/e2e/advanced/multi_container_test.go @@ -0,0 +1,219 @@ +package advanced + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Multi-Container Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) + }) + + Context("Multiple containers same ConfigMap", func() { + It("should reload when ConfigMap used by multiple containers changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"shared-key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple containers using the same ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithMultipleContainers(2), + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"shared-key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with multiple containers should be reloaded") + }) + }) + + Context("Multiple containers different ConfigMaps", func() { + It("should reload when any container's ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "initial1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple containers using different ConfigMaps") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithMultipleContainersAndEnv(configMapName, configMapName2), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the first ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key1": "updated1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when first container's ConfigMap changes") + }) + }) + + Context("Init container with CSI volume", Label("csi"), func() { + var ( + spcName string + vaultSecretPath string + ) + + BeforeEach(func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + spcName = utils.RandName("spc") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("test")) + }) + + AfterEach(func() { + if spcName != "" { + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + } + if vaultSecretPath != "" { + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + } + }) + + It("should reload when SecretProviderClassPodStatus used by init container changes", func() { + By("Creating a Vault secret") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "initial-init-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with init container using CSI volume") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithInitContainerCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassReloadAnnotation(spcName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "updated-init-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync (SPCPS version change)") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with init container using CSI volume should be reloaded") + }) + + It("should reload with auto annotation when init container CSI volume changes", func() { + By("Creating a Vault secret") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "initial-init-auto-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with init container using CSI volume and auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithInitContainerCSIVolume(spcName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "updated-init-auto-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync (SPCPS version change)") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with init container CSI volume and auto=true should be reloaded") + }) + }) +}) diff --git a/test/e2e/advanced/regex_test.go b/test/e2e/advanced/regex_test.go new file mode 100644 index 000000000..989bf0ab8 --- /dev/null +++ b/test/e2e/advanced/regex_test.go @@ -0,0 +1,134 @@ +package advanced + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Regex Pattern Tests", func() { + var ( + deploymentName string + matchingCM string + nonMatchingCM string + matchingSecret string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + matchingCM = "app-config-" + utils.RandName("cm") + nonMatchingCM = "other-" + utils.RandName("cm") + matchingSecret = "app-secret-" + utils.RandName("secret") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, matchingCM) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, matchingSecret) + }) + + Context("ConfigMap regex pattern", func() { + It("should reload when ConfigMap matching pattern changes", func() { + By("Creating a ConfigMap matching the pattern") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(map[string]string{ + utils.AnnotationConfigMapReload: "app-config-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the matching ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, matchingCM, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching ConfigMap changes") + }) + + It("should NOT reload when ConfigMap NOT matching pattern changes", func() { + By("Creating ConfigMaps - one matching, one not") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM, + map[string]string{"other": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(map[string]string{ + utils.AnnotationConfigMapReload: "app-config-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-matching ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM, map[string]string{"other": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (pattern mismatch)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when non-matching ConfigMap changes") + }) + }) + + Context("Secret regex pattern", func() { + It("should reload when Secret matching pattern changes", func() { + By("Creating a Secret matching the pattern") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with Secret pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(matchingSecret), + utils.WithAnnotations(map[string]string{ + utils.AnnotationSecretReload: "app-secret-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the matching Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching Secret changes") + }) + }) +}) diff --git a/test/e2e/annotations/annotations_suite_test.go b/test/e2e/annotations/annotations_suite_test.go new file mode 100644 index 000000000..586dfaf7c --- /dev/null +++ b/test/e2e/annotations/annotations_suite_test.go @@ -0,0 +1,85 @@ +package annotations + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + csiClient csiclient.Interface + restConfig *rest.Config + testNamespace string + ctx context.Context + cancel context.CancelFunc + testEnv *utils.TestEnvironment + registry *utils.AdapterRegistry +) + +func TestAnnotations(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Annotations Strategy E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-annotations-test") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + csiClient = testEnv.CSIClient + restConfig = testEnv.RestConfig + testNamespace = testEnv.Namespace + + registry = utils.NewAdapterRegistry(kubeClient) + + if utils.IsArgoRolloutsInstalled(ctx, testEnv.RolloutsClient) { + GinkgoWriter.Println("Argo Rollouts detected, registering ArgoRolloutAdapter") + registry.RegisterAdapter(utils.NewArgoRolloutAdapter(testEnv.RolloutsClient)) + } else { + GinkgoWriter.Println("Argo Rollouts not detected, skipping ArgoRolloutAdapter registration") + } + + if utils.HasDeploymentConfigSupport(testEnv.DiscoveryClient) && testEnv.OpenShiftClient != nil { + GinkgoWriter.Println("OpenShift detected, registering DeploymentConfigAdapter") + registry.RegisterAdapter(utils.NewDeploymentConfigAdapter(testEnv.OpenShiftClient)) + } else { + GinkgoWriter.Println("OpenShift not detected, skipping DeploymentConfigAdapter registration") + } + + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.watchGlobally": "false", + } + + if utils.IsCSIDriverInstalled(ctx, csiClient) { + deployValues["reloader.enableCSIIntegration"] = "true" + GinkgoWriter.Println("Deploying Reloader with CSI integration support") + } + + err = testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + if cancel != nil { + cancel() + } + + GinkgoWriter.Println("Annotations E2E Suite cleanup complete") +}) diff --git a/test/e2e/annotations/auto_reload_test.go b/test/e2e/annotations/auto_reload_test.go new file mode 100644 index 000000000..c407fa393 --- /dev/null +++ b/test/e2e/annotations/auto_reload_test.go @@ -0,0 +1,408 @@ +package annotations + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Auto Reload Annotation Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + spcName string + vaultSecretPath string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + spcName = utils.RandName("spc") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("test")) + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + if csiClient != nil { + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + } + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + }) + + Context("with reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment when any referenced ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded") + }) + + It("should reload Deployment when any referenced Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for Secret change") + }) + + It("should reload Deployment when either ConfigMap or Secret changes", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation referencing both") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for ConfigMap change") + }) + }) + + // Note: auto=false test is now in core/workloads_test.go as a DescribeTable for all workload types + + Context("with configmap.reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment only when ConfigMap changes, not Secret", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with configmap auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildConfigMapAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for ConfigMap change") + }) + }) + + Context("with secret.reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment only when Secret changes, not ConfigMap", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with secret auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildSecretAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"secret": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for Secret change") + }) + }) + + Context("with secretproviderclass.reloader.stakater.com/auto=true annotation", Label("csi"), func() { + BeforeEach(func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + }) + + It("should reload Deployment when SecretProviderClassPodStatus changes", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with secretproviderclass auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Found SPCPS: %s\n", spcpsName) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Initial SPCPS version: %s\n", initialVersion) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Println("CSI driver synced new secret version") + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for Vault secret change") + }) + + It("should NOT reload Deployment when ConfigMap changes (only SPC auto enabled)", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a ConfigMap") + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with CSI volume AND ConfigMap, but only SPC auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSecretProviderClassAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap (should NOT trigger reload with SPC auto only)") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded for ConfigMap change") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment with SPC auto only should NOT have been reloaded for ConfigMap change") + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret (should trigger reload)") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded for SPC change") + reloaded, err = adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for Vault secret change") + }) + + It("should reload when using combined auto=true annotation for SPC", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with CSI volume and general auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for Vault secret change") + }) + }) + + Context("with auto annotation and explicit reload annotation together", func() { + It("should reload when auto-detected resource changes", func() { + configMapName2 := utils.RandName("cm2") + defer func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }() + + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and explicit reload for first ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap (auto-detected)") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"key2": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for auto-detected ConfigMap change") + }) + }) +}) diff --git a/test/e2e/annotations/combination_test.go b/test/e2e/annotations/combination_test.go new file mode 100644 index 000000000..e7f02efa5 --- /dev/null +++ b/test/e2e/annotations/combination_test.go @@ -0,0 +1,346 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Combination Annotation Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + secretName string + secretName2 string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + secretName = utils.RandName("secret") + secretName2 = utils.RandName("secret2") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) + }) + + Context("auto=true with explicit reload annotations", func() { + It("should reload when both auto-detected and explicitly listed ConfigMaps change", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"extra": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the auto-detected ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when auto-detected ConfigMap changes") + }) + + It("should reload when explicitly listed ConfigMap changes with auto=true", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"extra": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the explicitly listed ConfigMap (not mounted)") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"extra": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed ConfigMap changes") + }) + + It("should reload when Secret changes with auto=true and explicit Secret annotation", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"api-key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretReloadAnnotation(secretName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the explicitly listed Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, map[string]string{"api-key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed Secret changes") + }) + }) + + Context("auto=true with exclude annotations", func() { + It("should NOT reload when excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"excluded": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes") + }) + + It("should reload when non-excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes") + }) + + It("should NOT reload when excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, map[string]string{"excluded": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes") + }) + }) + + Context("multiple explicit references", func() { + It("should reload when any of multiple explicitly listed ConfigMaps change", func() { + By("Creating multiple ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple ConfigMaps in reload annotation (comma-separated)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"key2": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed ConfigMaps changes") + }) + + It("should reload when any of multiple explicitly listed Secrets change", func() { + By("Creating multiple Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"key1": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"key2": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple Secrets in reload annotation (comma-separated)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName, secretName2)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the first Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"key1": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed Secrets changes") + }) + + It("should reload when both ConfigMap and Secret annotations are present", func() { + By("Creating a ConfigMap and a Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with both ConfigMap and Secret reload annotations") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when Secret changes with both annotations present") + }) + }) +}) diff --git a/test/e2e/annotations/exclude_test.go b/test/e2e/annotations/exclude_test.go new file mode 100644 index 000000000..73e0e8f0c --- /dev/null +++ b/test/e2e/annotations/exclude_test.go @@ -0,0 +1,385 @@ +package annotations + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Exclude Annotation Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + secretName string + secretName2 string + workloadName string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + secretName = utils.RandName("secret") + secretName2 = utils.RandName("secret2") + workloadName = utils.RandName("workload") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) + }) + + Context("ConfigMap exclude annotation", func() { + It("should NOT reload when excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and configmaps.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (excluded ConfigMap)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes") + }) + + It("should reload when non-excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and configmaps.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"key2": "updated2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes") + }) + }) + + Context("Secret exclude annotation", func() { + It("should NOT reload when excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"password2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secrets.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (excluded Secret)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes") + }) + + It("should reload when non-excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"password2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secrets.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, map[string]string{"password2": "updated2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded Secret changes") + }) + }) + + // TODO: Reloader currently only reads exclude annotations from workload metadata, not pod template. + // This test documents the expected behavior but needs Reloader code changes to pass. + Context("Exclude annotation on pod template", func() { + PDescribeTable("should NOT reload when exclude annotation is on pod template only", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true and exclude annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + PodTemplateAnnotations: utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (excluded ConfigMap)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload with exclude on pod template", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + Context("SecretProviderClass exclude annotation", Label("csi"), func() { + var ( + spcName string + spcName2 string + vaultSecretPath string + vaultSecretPath2 string + ) + + BeforeEach(func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + spcName = utils.RandName("spc") + spcName2 = utils.RandName("spc2") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("test")) + vaultSecretPath2 = fmt.Sprintf("secret/%s", utils.RandName("test2")) + }) + + AfterEach(func() { + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName2) + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath2) + }) + + It("should NOT reload when excluded SecretProviderClassPodStatus changes", func() { + By("Creating Vault secret for the excluded SPC") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "initial-excluded-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Creating SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secretproviderclasses.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretProviderClassExcludeAnnotation(spcName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret for excluded SPC") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "updated-excluded-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync (SPCPS version change)") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (excluded SPC)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded SecretProviderClassPodStatus changes") + }) + + It("should reload when non-excluded SecretProviderClassPodStatus changes", func() { + By("Creating two Vault secrets") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{ + "api_key": "initial-excluded-value", + }) + Expect(err).NotTo(HaveOccurred()) + + err = utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath2, map[string]string{ + "api_key": "initial-nonexcluded-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Creating two SecretProviderClasses") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName2, vaultSecretPath2, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secretproviderclasses.exclude for first SPC only") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithCSIVolume(spcName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretProviderClassExcludeAnnotation(spcName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS for non-excluded SPC") + + spcpsName2, err := utils.FindSPCPSForSPC(ctx, csiClient, testNamespace, spcName2, 30*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version for non-excluded SPC") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName2) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret for non-excluded SPC") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath2, map[string]string{ + "api_key": "updated-nonexcluded-value", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync (SPCPS version change)") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName2, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded SecretProviderClassPodStatus changes") + }) + }) +}) diff --git a/test/e2e/annotations/pause_period_test.go b/test/e2e/annotations/pause_period_test.go new file mode 100644 index 000000000..869aed1dc --- /dev/null +++ b/test/e2e/annotations/pause_period_test.go @@ -0,0 +1,143 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Pause Period Tests", func() { + var ( + deploymentName string + configMapName string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + Context("with pause-period annotation", func() { + It("should pause Deployment after reload", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with pause-period annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildPausePeriodAnnotation("10s"), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded") + + By("Verifying Deployment has paused-at annotation") + paused, err := adapter.WaitPaused(ctx, testNamespace, deploymentName, + utils.AnnotationDeploymentPausedAt, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(paused).To(BeTrue(), "Deployment should have paused-at annotation after reload") + }) + + It("should NOT pause Deployment without pause-period annotation", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT pause-period annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded") + + By("Verifying Deployment does NOT have paused-at annotation") + time.Sleep(utils.NegativeTestWait) + paused, err := adapter.WaitPaused(ctx, testNamespace, deploymentName, + utils.AnnotationDeploymentPausedAt, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(paused).To(BeFalse(), "Deployment should NOT have paused-at annotation without pause-period") + }) + + // FUTURE: Reloader currently only reads pause-period from deployment metadata, not pod template. + // This test is pending (skipped) and documents the expected future behavior. + // Requires Reloader code changes to support reading pause-period from pod template annotations. + PIt("should pause Deployment when pause-period annotation is on pod template", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with pause-period annotation on pod template ONLY") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithPodTemplateAnnotations(utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildPausePeriodAnnotation("10s"), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded") + + By("Verifying Deployment has paused-at annotation") + paused, err := adapter.WaitPaused(ctx, testNamespace, deploymentName, + utils.AnnotationDeploymentPausedAt, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(paused).To(BeTrue(), "Deployment should have paused-at annotation with pause-period on pod template") + }) + }) +}) diff --git a/test/e2e/annotations/resource_ignore_test.go b/test/e2e/annotations/resource_ignore_test.go new file mode 100644 index 000000000..132c91a65 --- /dev/null +++ b/test/e2e/annotations/resource_ignore_test.go @@ -0,0 +1,94 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Resource Ignore Annotation Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + Context("with reloader.stakater.com/ignore annotation on resource", func() { + It("should NOT reload when ConfigMap has ignore=true annotation", func() { + By("Creating a ConfigMap with ignore=true annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildIgnoreAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap reference annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap has ignore=true") + }) + + It("should NOT reload when Secret has ignore=true annotation", func() { + By("Creating a Secret with ignore=true annotation") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, + utils.BuildIgnoreAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with Secret reference annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when Secret has ignore=true") + }) + }) +}) diff --git a/test/e2e/annotations/search_match_test.go b/test/e2e/annotations/search_match_test.go new file mode 100644 index 000000000..02a1153ca --- /dev/null +++ b/test/e2e/annotations/search_match_test.go @@ -0,0 +1,215 @@ +package annotations + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Search and Match Annotation Tests", func() { + var ( + deploymentName string + configMapName string + workloadName string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + workloadName = utils.RandName("workload") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + Context("with search and match annotations", func() { + It("should reload when workload has search annotation and ConfigMap has match annotation", func() { + By("Creating a ConfigMap with match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload when ConfigMap has match annotation") + }) + + It("should NOT reload when workload has search but ConfigMap has no match", func() { + By("Creating a ConfigMap WITHOUT match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap lacks match annotation") + }) + + It("should NOT reload when resource has match but no Deployment has search", func() { + By("Creating a ConfigMap WITH match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT search annotation (only standard annotation)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName)) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment without search annotation should NOT reload even when ConfigMap has match") + }) + + It("should reload only the deployment with search annotation when multiple deployments use same ConfigMap", func() { + deploymentName2 := utils.RandName("deploy2") + defer func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName2) + }() + + By("Creating a ConfigMap with match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating first Deployment WITH search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating second Deployment WITHOUT search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName2, + utils.WithConfigMapEnvFrom(configMapName), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for both Deployments to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + err = adapter.WaitReady(ctx, testNamespace, deploymentName2, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload") + + By("Verifying second Deployment was NOT reloaded") + reloaded2, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName2, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded2).To(BeFalse(), "Deployment without search annotation should NOT reload") + }) + }) + + // TODO: Reloader currently only reads search annotations from workload metadata, not pod template. + // This test documents the expected behavior but needs Reloader code changes to pass. + Context("with search annotation on pod template", func() { + PDescribeTable("should reload when search annotation is on pod template only", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap with match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with search annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + PodTemplateAnnotations: utils.BuildSearchAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with search annotation on pod template", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) +}) diff --git a/test/e2e/argo/argo_suite_test.go b/test/e2e/argo/argo_suite_test.go new file mode 100644 index 000000000..0dcf616e4 --- /dev/null +++ b/test/e2e/argo/argo_suite_test.go @@ -0,0 +1,58 @@ +package argo + +import ( + "context" + "testing" + + rolloutsclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + rolloutsClient rolloutsclient.Interface + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestArgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Argo Rollouts E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-argo") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + rolloutsClient = testEnv.RolloutsClient + testNamespace = testEnv.Namespace + + if !utils.IsArgoRolloutsInstalled(ctx, rolloutsClient) { + Skip("Argo Rollouts is not installed. Run ./scripts/e2e-cluster-setup.sh first") + } + GinkgoWriter.Println("Argo Rollouts is installed") + + err = testEnv.DeployAndWait(map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.isArgoRollouts": "true", + }) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + GinkgoWriter.Println("Argo Rollouts E2E Suite cleanup complete (Argo Rollouts preserved for other suites)") +}) diff --git a/test/e2e/argo/rollout_test.go b/test/e2e/argo/rollout_test.go new file mode 100644 index 000000000..019df62b8 --- /dev/null +++ b/test/e2e/argo/rollout_test.go @@ -0,0 +1,89 @@ +package argo + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +// Note: Basic Argo Rollout reload tests (ConfigMap, Secret, auto=true, volume mounts, label-only negative) +// are covered by core/workloads_test.go with Label("argo"). +// This file contains only Argo-specific tests that cannot be parameterized. + +var _ = Describe("Argo Rollout Strategy Tests", func() { + var ( + rolloutName string + configMapName string + adapter *utils.ArgoRolloutAdapter + ) + + BeforeEach(func() { + rolloutName = utils.RandName("rollout") + configMapName = utils.RandName("cm") + adapter = utils.NewArgoRolloutAdapter(rolloutsClient) + }) + + AfterEach(func() { + _ = utils.DeleteRollout(ctx, rolloutsClient, testNamespace, rolloutName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + Context("Rollout strategy annotation", func() { + It("should use default rollout strategy (annotation-based reload)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating an Argo Rollout with auto=true (default strategy)") + _, err = utils.CreateRollout(ctx, rolloutsClient, testNamespace, rolloutName, + utils.WithRolloutConfigMapEnvFrom(configMapName), + utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be ready") + err = adapter.WaitReady(ctx, testNamespace, rolloutName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be reloaded with annotation") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, rolloutName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Argo Rollout should be reloaded with default rollout strategy") + }) + + It("should use restart strategy when specified (sets restartAt field)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating an Argo Rollout with restart strategy annotation") + _, err = utils.CreateRollout(ctx, rolloutsClient, testNamespace, rolloutName, + utils.WithRolloutConfigMapEnvFrom(configMapName), + utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()), + utils.WithRolloutObjectAnnotations(utils.BuildRolloutRestartStrategyAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be ready") + err = adapter.WaitReady(ctx, testNamespace, rolloutName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to have restartAt field set") + restarted, err := adapter.WaitRestartAt(ctx, testNamespace, rolloutName, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(restarted).To(BeTrue(), "Argo Rollout should have restartAt field set with restart strategy") + }) + }) +}) diff --git a/test/e2e/core/core_suite_test.go b/test/e2e/core/core_suite_test.go new file mode 100644 index 000000000..d3449ba57 --- /dev/null +++ b/test/e2e/core/core_suite_test.go @@ -0,0 +1,90 @@ +package core + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + csiClient csiclient.Interface + restConfig *rest.Config + testNamespace string + ctx context.Context + cancel context.CancelFunc + testEnv *utils.TestEnvironment + registry *utils.AdapterRegistry +) + +func TestCore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Core Workload E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-core-test") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + csiClient = testEnv.CSIClient + restConfig = testEnv.RestConfig + testNamespace = testEnv.Namespace + + registry = utils.NewAdapterRegistry(kubeClient) + + if utils.IsArgoRolloutsInstalled(ctx, testEnv.RolloutsClient) { + GinkgoWriter.Println("Argo Rollouts detected, registering ArgoRolloutAdapter") + registry.RegisterAdapter(utils.NewArgoRolloutAdapter(testEnv.RolloutsClient)) + } else { + GinkgoWriter.Println("Argo Rollouts not detected, skipping ArgoRolloutAdapter registration") + } + + if utils.HasDeploymentConfigSupport(testEnv.DiscoveryClient) && testEnv.OpenShiftClient != nil { + GinkgoWriter.Println("OpenShift detected, registering DeploymentConfigAdapter") + registry.RegisterAdapter(utils.NewDeploymentConfigAdapter(testEnv.OpenShiftClient)) + } else { + GinkgoWriter.Println("OpenShift not detected, skipping DeploymentConfigAdapter registration") + } + + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.watchGlobally": "false", + } + + if utils.IsArgoRolloutsInstalled(ctx, testEnv.RolloutsClient) { + deployValues["reloader.isArgoRollouts"] = "true" + GinkgoWriter.Println("Deploying Reloader with Argo Rollouts support") + } + + if utils.IsCSIDriverInstalled(ctx, csiClient) { + deployValues["reloader.enableCSIIntegration"] = "true" + GinkgoWriter.Println("Deploying Reloader with CSI integration support") + } + + err = testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + if cancel != nil { + cancel() + } + + GinkgoWriter.Println("Core E2E Suite cleanup complete") +}) diff --git a/test/e2e/core/reference_methods_test.go b/test/e2e/core/reference_methods_test.go new file mode 100644 index 000000000..f3c0b8fd5 --- /dev/null +++ b/test/e2e/core/reference_methods_test.go @@ -0,0 +1,540 @@ +package core + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reference Method Tests", func() { + var ( + configMapName string + secretName string + workloadName string + ) + + BeforeEach(func() { + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + workloadName = utils.RandName("workload") + }) + + AfterEach(func() { + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + // ============================================================ + // valueFrom.configMapKeyRef TESTS + // ============================================================ + Context("valueFrom.configMapKeyRef", func() { + DescribeTable("should reload when ConfigMap referenced via valueFrom.configMapKeyRef changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "initial_value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with valueFrom.configMapKeyRef") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapKeyRef: true, + ConfigMapKey: "config_key", + EnvVarName: "MY_CONFIG_VAR", + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config_key": "updated_value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with valueFrom.configMapKeyRef should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // valueFrom.secretKeyRef TESTS + // ============================================================ + Context("valueFrom.secretKeyRef", func() { + DescribeTable("should reload when Secret referenced via valueFrom.secretKeyRef changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "initial_secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with valueFrom.secretKeyRef") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretKeyRef: true, + SecretKey: "secret_key", + EnvVarName: "MY_SECRET_VAR", + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"secret_key": "updated_secret"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with valueFrom.secretKeyRef should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // PROJECTED VOLUME TESTS + // ============================================================ + Context("Projected Volumes", func() { + DescribeTable("should reload when ConfigMap in projected volume changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected ConfigMap volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseProjectedVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with projected ConfigMap volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret in projected volume changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected Secret volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with projected Secret volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when ConfigMap changes in mixed projected volume", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected volume containing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when ConfigMap in mixed projected volume changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret changes in mixed projected volume", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected volume containing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when Secret in mixed projected volume changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // INIT CONTAINER TESTS + // ============================================================ + Context("Init Container with envFrom", func() { + DescribeTable("should reload when ConfigMap referenced by init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"INIT_VAR": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container referencing ConfigMap") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseInitContainer: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"INIT_VAR": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret referenced by init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"INIT_SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container referencing Secret") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseInitContainer: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"INIT_SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container Secret should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + Context("Init Container with Volume Mount", func() { + DescribeTable("should reload when ConfigMap volume mounted in init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container using ConfigMap volume mount") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseInitContainerVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret volume mounted in init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container using Secret volume mount") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseInitContainerVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container Secret volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // AUTO ANNOTATION WITH VALUEFROM TESTS + // ============================================================ + Context("Auto Annotation with valueFrom", func() { + DescribeTable("should reload with auto=true when ConfigMap referenced via valueFrom changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"auto_config_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true and valueFrom") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapKeyRef: true, + ConfigMapKey: "auto_config_key", + EnvVarName: "AUTO_CONFIG_VAR", + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"auto_config_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with auto=true and valueFrom should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) +}) diff --git a/test/e2e/core/workloads_test.go b/test/e2e/core/workloads_test.go new file mode 100644 index 000000000..ac47abdcd --- /dev/null +++ b/test/e2e/core/workloads_test.go @@ -0,0 +1,1756 @@ +package core + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Workload Reload Tests", func() { + var ( + configMapName string + secretName string + workloadName string + spcName string + vaultSecretPath string + ) + + BeforeEach(func() { + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + workloadName = utils.RandName("workload") + spcName = utils.RandName("spc") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("test")) + }) + + AfterEach(func() { + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + if csiClient != nil { + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + } + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + }) + + // ============================================================ + // ANNOTATIONS STRATEGY TESTS + // ============================================================ + Context("Annotations Strategy", func() { + // Standard workloads that support annotation-based reload + standardWorkloads := []utils.WorkloadType{ + utils.WorkloadDeployment, + utils.WorkloadDaemonSet, + utils.WorkloadStatefulSet, + } + + // ConfigMap reload tests for standard workloads + DescribeTable("should reload when ConfigMap changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Secret reload tests for standard workloads + DescribeTable("should reload when Secret changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // SecretProviderClassPodStatus (CSI) reload tests with real Vault + DescribeTable("should reload when SecretProviderClassPodStatus changes", func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, + "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with CSI volume and SPC reload annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.BuildSecretProviderClassReloadAnnotation(spcName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Found SPCPS: %s\n", spcpsName) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Initial SPCPS version: %s\n", initialVersion) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, + 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Println("CSI driver synced new secret version") + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should have been reloaded when Vault secret changed", workloadType) + }, Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + // Auto=true annotation tests + DescribeTable("should reload with auto=true annotation when ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with auto=true should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Negative tests: label-only changes should NOT trigger reload + DescribeTable("should NOT reload when only ConfigMap labels change (no data change)", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the ConfigMap labels (no data change)") + err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when only ConfigMap labels change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should NOT reload when only Secret labels change (no data change)", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the Secret labels (no data change)") + err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when only Secret labels change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Negative test: SPCPS label-only changes should NOT trigger reload + DescribeTable("should NOT reload when only SecretProviderClassPodStatus labels change", + func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with CSI volume and SPC reload annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.BuildSecretProviderClassReloadAnnotation(spcName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the SPCPS labels (no objects change)") + err = utils.UpdateSecretProviderClassPodStatusLabels(ctx, csiClient, testNamespace, spcpsName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when only SPCPS labels change", workloadType) + }, Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + // CronJob special handling - triggers a Job instead of annotation + Context("CronJob (special handling)", func() { + var cronJobAdapter *utils.CronJobAdapter + + BeforeEach(func() { + adapter := registry.Get(utils.WorkloadCronJob) + Expect(adapter).NotTo(BeNil()) + var ok bool + cronJobAdapter, ok = adapter.(*utils.CronJobAdapter) + Expect(ok).To(BeTrue(), "Should be able to cast to CronJobAdapter") + }) + + It("should trigger a Job when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with ConfigMap reference annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation") + }) + + It("should trigger a Job when Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with Secret reference annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation") + }) + + It("should trigger a Job with auto=true annotation when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto=true annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob with auto=true should have triggered a Job creation") + }) + }) + + // Volume mount tests + DescribeTable("should reload when volume-mounted ConfigMap changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "setting: initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"config.yaml": "setting: updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with volume-mounted ConfigMap should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when volume-mounted Secret changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials.yaml": "secret: initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"credentials.yaml": "secret: updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with volume-mounted Secret should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Test for workloads without Reloader annotation + DescribeTable("should NOT reload without Reloader annotation", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload WITHOUT Reloader annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, // No Reloader annotations + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload is NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, utils.AnnotationLastReloadedFrom, + utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s without Reloader annotation should NOT be reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + // Variable to track for use in lint + _ = standardWorkloads + + // ============================================================ + // EDGE CASE TESTS + // These tests verify edge cases that should work across all workload types. + // ============================================================ + Context("Edge Cases", func() { + DescribeTable("should reload with multiple ConfigMaps when any one changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + configMapName2 := utils.RandName("cm2") + DeferCleanup(func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }) + + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload referencing both ConfigMaps") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, map[string]string{"key2": "updated-value2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when second ConfigMap changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload with multiple Secrets when any one changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + secretName2 := utils.RandName("secret2") + DeferCleanup(func() { _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) }) + + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload referencing both Secrets") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName, secretName2), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, map[string]string{"key2": "updated-value2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when second Secret changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload multiple times for sequential ConfigMap updates", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "v1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("First update to ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first reload") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue()) + + By("Getting first reload annotation value") + firstReloadValue, err := adapter.GetPodTemplateAnnotation(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom) + Expect(err).NotTo(HaveOccurred()) + + By("Second update to ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "v3"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for second reload with different annotation value") + Eventually(func() string { + val, _ := adapter.GetPodTemplateAnnotation(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom) + return val + }, utils.ReloadTimeout, utils.DefaultInterval).ShouldNot(Equal(firstReloadValue), + "Reload annotation should change after second update") + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when either ConfigMap or Secret changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload referencing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseConfigMapEnvFrom: true, + UseSecretEnvFrom: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"secret": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when Secret changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should NOT reload with auto=false annotation", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=false annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoFalseAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload is NOT reloaded (auto=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s with auto=false should NOT be reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // POD TEMPLATE ANNOTATION TESTS + // These tests verify that annotations placed on the pod template + // (spec.template.metadata.annotations) work the same as annotations + // placed on the workload metadata (metadata.annotations). + // ============================================================ + Context("Pod Template Annotations", func() { + DescribeTable("should reload when ConfigMap annotation is on pod template only", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + PodTemplateAnnotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with pod template annotation", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret annotation is on pod template only", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + PodTemplateAnnotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with pod template annotation", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when auto=true annotation is on pod template only", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + PodTemplateAnnotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with auto=true on pod template should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when SecretProviderClass annotation is on pod template only", + func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with SPC annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + PodTemplateAnnotations: utils.BuildSecretProviderClassReloadAnnotation(spcName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, + workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, + initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with SPC annotation on pod template", workloadType) + }, + Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when secretproviderclass auto annotation is on pod template only", + func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with SPC auto annotation on pod template ONLY") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + PodTemplateAnnotations: utils.BuildSecretProviderClassAutoAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, + workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, + initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with SPC auto on pod template", workloadType) + }, + Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when annotations are on both workload and pod template", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with annotations on BOTH workload metadata and pod template") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + PodTemplateAnnotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload with annotations on both locations", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should NOT reload when pod template has ConfigMap annotation but Secret is updated", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap annotation on pod template but using Secret") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + PodTemplateAnnotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret (not the ConfigMap)") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when updating different resource than annotated", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + }) + + // ============================================================ + // ENVVARS STRATEGY TESTS + // ============================================================ + Context("EnvVars Strategy", Label("envvars"), Ordered, ContinueOnFailure, func() { + // Redeploy Reloader with envvars strategy for this context + BeforeAll(func() { + By("Redeploying Reloader with envvars strategy") + deployValues := map[string]string{ + "reloader.reloadStrategy": "env-vars", + } + // Preserve Argo support if available + if utils.IsArgoRolloutsInstalled(ctx, testEnv.RolloutsClient) { + deployValues["reloader.isArgoRollouts"] = "true" + } + // Enable CSI integration if CSI driver is installed + if utils.IsCSIDriverInstalled(ctx, csiClient) { + deployValues["reloader.enableCSIIntegration"] = "true" + } + err := testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to redeploy Reloader with envvars strategy") + }) + + AfterAll(func() { + By("Restoring Reloader to annotations strategy") + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + } + // Preserve Argo support if available + if utils.IsArgoRolloutsInstalled(ctx, testEnv.RolloutsClient) { + deployValues["reloader.isArgoRollouts"] = "true" + } + // Preserve CSI integration if CSI driver is installed + if utils.IsCSIDriverInstalled(ctx, csiClient) { + deployValues["reloader.enableCSIIntegration"] = "true" + } + err := testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to restore Reloader to annotations strategy") + }) + + DescribeTable("should add STAKATER_ env var when ConfigMap changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after ConfigMap change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should add STAKATER_ env var when Secret changes", func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after Secret change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // CSI SecretProviderClassPodStatus env var tests with real Vault + DescribeTable("should add STAKATER_ env var when SecretProviderClassPodStatus changes", + func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with CSI volume and SPC reload annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.BuildSecretProviderClassReloadAnnotation(spcName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, + 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after Vault secret change", workloadType) + }, Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + // Negative tests for env var strategy + DescribeTable("should NOT add STAKATER_ env var when only ConfigMap labels change", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the ConfigMap labels") + err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + DescribeTable("should NOT add STAKATER_ env var when only Secret labels change", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the Secret labels") + err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + // CSI SPCPS label-only change negative test with real Vault + DescribeTable("should NOT add STAKATER_ env var when only SecretProviderClassPodStatus labels change", + func(workloadType utils.WorkloadType) { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(workloadType) + if adapter == nil { + Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) + } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with CSI volume and SPC reload annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.BuildSecretProviderClassReloadAnnotation(spcName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the SPCPS labels (should NOT trigger reload)") + err = utils.UpdateSecretProviderClassPodStatusLabels(ctx, csiClient, testNamespace, spcpsName, map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for SPCPS label-only change", + workloadType) + }, Entry("Deployment", Label("csi"), utils.WorkloadDeployment), + Entry("DaemonSet", Label("csi"), utils.WorkloadDaemonSet), + Entry("StatefulSet", Label("csi"), utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("csi", "argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("csi", "openshift"), utils.WorkloadDeploymentConfig), + ) + + // CSI auto annotation with EnvVar strategy and real Vault + It("should add STAKATER_ env var with secretproviderclass auto annotation", Label("csi"), func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, + "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with CSI volume and SPC auto annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.BuildSecretProviderClassAutoAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, + 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "Deployment with SPC auto annotation should have STAKATER_ env var") + }) + + // CSI exclude annotation with EnvVar strategy and real Vault + It("should NOT add STAKATER_ env var when excluded SecretProviderClassPodStatus changes", Label("csi"), func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, vaultSecretPath, + "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with auto=true and SPC exclude annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SPCName: spcName, + UseCSIVolume: true, + Annotations: utils.MergeAnnotations(utils.BuildAutoTrueAnnotation(), + utils.BuildSecretProviderClassExcludeAnnotation(spcName)), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret (excluded SPC - should NOT trigger reload)") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, + 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, utils.StakaterEnvVarPrefix, + utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "Deployment should NOT have STAKATER_ env var for excluded SPCPS change") + }) + + // CSI init container with EnvVar strategy and real Vault + It("should add STAKATER_ env var when SecretProviderClassPodStatus used by init container changes", Label("csi"), func() { + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed") + } + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed") + } + + By("Creating a secret in Vault") + err := utils.CreateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret(ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key") + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with init container using CSI volume") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, workloadName, + utils.WithInitContainerCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassReloadAnnotation(spcName))) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, workloadName) }) + + adapter := utils.NewDeploymentAdapter(kubeClient) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment(ctx, csiClient, kubeClient, testNamespace, workloadName, + utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, + 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, + utils.StakaterEnvVarPrefix, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "Deployment with init container CSI should have STAKATER_ env var") + }) + }) +}) diff --git a/test/e2e/csi/csi_suite_test.go b/test/e2e/csi/csi_suite_test.go new file mode 100644 index 000000000..a8746bbbe --- /dev/null +++ b/test/e2e/csi/csi_suite_test.go @@ -0,0 +1,70 @@ +package csi + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + csiClient csiclient.Interface + restConfig *rest.Config + testNamespace string + ctx context.Context + cancel context.CancelFunc + testEnv *utils.TestEnvironment +) + +func TestCSI(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CSI SecretProviderClass E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-csi-test") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + csiClient = testEnv.CSIClient + restConfig = testEnv.RestConfig + testNamespace = testEnv.Namespace + + if !utils.IsCSIDriverInstalled(ctx, csiClient) { + Skip("CSI secrets store driver not installed - skipping CSI suite") + } + + if !utils.IsVaultProviderInstalled(ctx, kubeClient) { + Skip("Vault CSI provider not installed - skipping CSI suite") + } + + err = testEnv.DeployAndWait(map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.watchGlobally": "false", + "reloader.enableCSIIntegration": "true", + }) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + if cancel != nil { + cancel() + } + + GinkgoWriter.Println("CSI E2E Suite cleanup complete") +}) diff --git a/test/e2e/csi/csi_test.go b/test/e2e/csi/csi_test.go new file mode 100644 index 000000000..498280380 --- /dev/null +++ b/test/e2e/csi/csi_test.go @@ -0,0 +1,330 @@ +package csi + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("CSI SecretProviderClass Tests", Label("csi"), func() { + var ( + deploymentName string + configMapName string + spcName string + vaultSecretPath string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + spcName = utils.RandName("spc") + vaultSecretPath = fmt.Sprintf("secret/%s", utils.RandName("test")) + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecretProviderClass(ctx, csiClient, testNamespace, spcName) + _ = utils.DeleteVaultSecret(ctx, kubeClient, restConfig, vaultSecretPath) + }) + + Context("Real Vault Integration Tests", func() { + It("should reload when Vault secret changes", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "initial-value-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret( + ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "api_key", + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with CSI volume and SPC reload annotation") + _, err = utils.CreateDeployment( + ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassReloadAnnotation(spcName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS created by CSI driver") + spcpsName, err := utils.FindSPCPSForDeployment( + ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Found SPCPS: %s\n", spcpsName) + + By("Getting initial SPCPS version") + initialVersion, err := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("Initial SPCPS version: %s\n", initialVersion) + + By("Updating the Vault secret") + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"api_key": "updated-value-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync the new secret version") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Println("CSI driver synced new secret version") + + By("Waiting for Deployment to be reloaded by Reloader") + reloaded, err := adapter.WaitReloaded( + ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded after Vault secret change") + }) + + It("should handle multiple Vault secret updates", func() { + By("Creating a secret in Vault") + err := utils.CreateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"password": "pass-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret( + ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "password", + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with CSI volume") + _, err = utils.CreateDeployment( + ctx, kubeClient, testNamespace, deploymentName, + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassReloadAnnotation(spcName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the SPCPS") + spcpsName, err := utils.FindSPCPSForDeployment( + ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + + By("First update to Vault secret") + initialVersion, _ := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"password": "pass-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first CSI sync") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first reload") + reloaded, err := adapter.WaitReloaded( + ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue()) + + By("Getting annotation value after first reload") + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, deploymentName) + Expect(err).NotTo(HaveOccurred()) + firstReloadValue := deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + Expect(firstReloadValue).NotTo(BeEmpty()) + + By("Waiting for Deployment to stabilize") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Finding the NEW SPCPS after first reload (new pod = new SPCPS)") + newSpcpsName, err := utils.FindSPCPSForDeployment( + ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("New SPCPS after first reload: %s\n", newSpcpsName) + + By("Second update to Vault secret") + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"password": "pass-v3"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for second reload with different annotation value") + Eventually(func() string { + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, deploymentName) + if err != nil { + return "" + } + return deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + }, utils.ReloadTimeout).ShouldNot(Equal(firstReloadValue), "Annotation should change after second Vault secret update") + }) + }) + + Context("Typed Auto Annotation Tests", func() { + It("should reload only SPC changes with secretproviderclass auto annotation, not ConfigMap", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap( + ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil, + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a secret in Vault") + err = utils.CreateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"token": "token-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret( + ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "token", + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with ConfigMap envFrom AND CSI volume, but only SPC auto annotation") + _, err = utils.CreateDeployment( + ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildSecretProviderClassAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap (should NOT trigger reload)") + err = utils.UpdateConfigMap( + ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded for ConfigMap change") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded( + ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "SPC auto annotation should not trigger reload for ConfigMap changes") + + By("Finding the SPCPS") + spcpsName, err := utils.FindSPCPSForDeployment( + ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + + By("Getting SPCPS version before Vault update") + initialVersion, _ := utils.GetSPCPSVersion(ctx, csiClient, testNamespace, spcpsName) + + By("Updating the Vault secret (should trigger reload)") + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"token": "token-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for CSI driver to sync") + err = utils.WaitForSPCPSVersionChange(ctx, csiClient, testNamespace, spcpsName, initialVersion, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment WAS reloaded for Vault secret change") + reloaded, err = adapter.WaitReloaded( + ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "SPC auto annotation should trigger reload for Vault secret changes") + }) + + It("should reload for both ConfigMap and SPC when using combined auto=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap( + ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil, + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a secret in Vault") + err = utils.CreateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"secret": "secret-v1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a SecretProviderClass pointing to Vault secret") + _, err = utils.CreateSecretProviderClassWithSecret( + ctx, csiClient, testNamespace, spcName, + vaultSecretPath, "secret", + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating Deployment with ConfigMap envFrom AND CSI volume with combined auto=true") + _, err = utils.CreateDeployment( + ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithCSIVolume(spcName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap (should trigger reload with auto=true)") + err = utils.UpdateConfigMap( + ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment WAS reloaded for ConfigMap change") + reloaded, err := adapter.WaitReloaded( + ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Combined auto=true should trigger reload for ConfigMap changes") + + By("Waiting for Deployment to stabilize") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Getting current annotation value") + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, deploymentName) + Expect(err).NotTo(HaveOccurred()) + firstReloadValue := deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + + By("Finding the NEW SPCPS after ConfigMap reload (new pod = new SPCPS)") + newSpcpsName, err := utils.FindSPCPSForDeployment( + ctx, csiClient, kubeClient, testNamespace, deploymentName, utils.WorkloadReadyTimeout, + ) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("New SPCPS after ConfigMap reload: %s\n", newSpcpsName) + + By("Updating the Vault secret (should also trigger reload with auto=true)") + err = utils.UpdateVaultSecret( + ctx, kubeClient, restConfig, vaultSecretPath, map[string]string{"secret": "secret-v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment WAS reloaded for Vault secret change") + Eventually(func() string { + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, deploymentName) + if err != nil { + return "" + } + return deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + }, utils.ReloadTimeout).ShouldNot(Equal(firstReloadValue), + "Combined auto=true should trigger reload for Vault secret changes", + ) + }) + }) +}) diff --git a/test/e2e/flags/auto_reload_all_test.go b/test/e2e/flags/auto_reload_all_test.go new file mode 100644 index 000000000..39ccb49fb --- /dev/null +++ b/test/e2e/flags/auto_reload_all_test.go @@ -0,0 +1,107 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Auto Reload All Flag Tests", func() { + var ( + deploymentName string + configMapName string + autoNamespace string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + autoNamespace = "auto-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, autoNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, autoNamespace, configMapName) + }) + + Context("with autoReloadAll=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, autoNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.autoReloadAll": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, autoNamespace) + }) + + It("should reload workloads without any annotations when autoReloadAll is true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT any Reloader annotations") + _, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, autoNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (autoReloadAll=true)") + reloaded, err := adapter.WaitReloaded(ctx, autoNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment without annotations should reload when autoReloadAll=true") + }) + + It("should respect auto=false annotation even when autoReloadAll is true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=false annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoFalseAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, autoNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (auto=false overrides autoReloadAll)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, autoNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT reload even with autoReloadAll=true") + }) + }) +}) diff --git a/test/e2e/flags/flags_suite_test.go b/test/e2e/flags/flags_suite_test.go new file mode 100644 index 000000000..dc922cb12 --- /dev/null +++ b/test/e2e/flags/flags_suite_test.go @@ -0,0 +1,66 @@ +package flags + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var ( + kubeClient kubernetes.Interface + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestFlags(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Flag-Based E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-flags") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + kubeClient = testEnv.KubeClient + testNamespace = testEnv.Namespace +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + GinkgoWriter.Println("Flags E2E Suite cleanup complete") +}) + +// deployReloaderWithFlags deploys Reloader with the specified Helm value overrides. +// This is a convenience function for tests that need to deploy with specific flags. +func deployReloaderWithFlags(values map[string]string) error { + if values == nil { + values = make(map[string]string) + } + if _, ok := values["reloader.reloadStrategy"]; !ok { + values["reloader.reloadStrategy"] = "annotations" + } + return testEnv.DeployAndWait(values) +} + +// undeployReloader removes the Reloader installation. +func undeployReloader() error { + return utils.UndeployReloader(testNamespace, testEnv.ReleaseName) +} + +// waitForReloaderReady waits for the Reloader deployment to be ready. +func waitForReloaderReady() error { + return testEnv.WaitForReloader() +} diff --git a/test/e2e/flags/ignore_resources_test.go b/test/e2e/flags/ignore_resources_test.go new file mode 100644 index 000000000..369cd24d7 --- /dev/null +++ b/test/e2e/flags/ignore_resources_test.go @@ -0,0 +1,188 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Ignore Resources Flag Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + ignoreNS string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + ignoreNS = "ignore-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, ignoreNS, secretName) + }) + + Context("with ignoreSecrets=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreSecrets": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload when Secret changes with ignoreSecrets=true", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, ignoreNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignoreSecrets=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreSecrets=true") + }) + + It("should still reload when ConfigMap changes with ignoreSecrets=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, ignoreNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (ConfigMap should still work)") + reloaded, err := adapter.WaitReloaded(ctx, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "ConfigMap changes should still trigger reload with ignoreSecrets=true") + }) + }) + + Context("with ignoreConfigMaps=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreConfigMaps": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload when ConfigMap changes with ignoreConfigMaps=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, ignoreNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignoreConfigMaps=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreConfigMaps=true") + }) + + It("should still reload when Secret changes with ignoreConfigMaps=true", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, ignoreNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (Secret should still work)") + reloaded, err := adapter.WaitReloaded(ctx, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Secret changes should still trigger reload with ignoreConfigMaps=true") + }) + }) +}) diff --git a/test/e2e/flags/ignored_workloads_test.go b/test/e2e/flags/ignored_workloads_test.go new file mode 100644 index 000000000..33a8fba0e --- /dev/null +++ b/test/e2e/flags/ignored_workloads_test.go @@ -0,0 +1,157 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Ignored Workloads Flag Tests", func() { + var ( + cronJobName string + configMapName string + ignoreNS string + cronJobAdapter *utils.CronJobAdapter + deploymentAdater *utils.DeploymentAdapter + ) + + BeforeEach(func() { + cronJobName = utils.RandName("cj") + configMapName = utils.RandName("cm") + ignoreNS = "ignore-wl-" + utils.RandName("ns") + cronJobAdapter = utils.NewCronJobAdapter(kubeClient) + deploymentAdater = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteCronJob(ctx, kubeClient, ignoreNS, cronJobName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName) + }) + + Context("with ignoreCronJobs=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreCronJobs": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload CronJobs when ignoreCronJobs=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto annotation referencing the ConfigMap") + _, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName, + utils.WithCronJobConfigMapEnvFrom(configMapName), + utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying CronJob was NOT reloaded (ignoreCronJobs=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := cronJobAdapter.WaitReloaded(ctx, ignoreNS, cronJobName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true") + }) + + It("should still reload Deployments when ignoreCronJobs=true", func() { + deploymentName := utils.RandName("deploy") + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName) + }() + + By("Waiting for Deployment to be ready") + err = deploymentAdater.WaitReady(ctx, ignoreNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, map[string]string{"key": "updated-deploy"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (Deployment should still work)") + reloaded, err := deploymentAdater.WaitReloaded(ctx, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should still reload with ignoreCronJobs=true") + }) + }) + + Context("with both ignoreCronJobs=true and ignoreJobs=true flags", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreCronJobs": "true", + "reloader.ignoreJobs": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload CronJobs when both job flags are true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto annotation") + _, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName, + utils.WithCronJobConfigMapEnvFrom(configMapName), + utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying CronJob was NOT reloaded") + time.Sleep(utils.NegativeTestWait) + reloaded, err := cronJobAdapter.WaitReloaded(ctx, ignoreNS, cronJobName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true and ignoreJobs=true") + }) + }) +}) diff --git a/test/e2e/flags/namespace_ignore_test.go b/test/e2e/flags/namespace_ignore_test.go new file mode 100644 index 000000000..5fd2caad3 --- /dev/null +++ b/test/e2e/flags/namespace_ignore_test.go @@ -0,0 +1,115 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Namespace Ignore Flag Tests", func() { + var ( + deploymentName string + configMapName string + ignoredNamespace string + watchedNamespace string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + ignoredNamespace = "ignored-" + utils.RandName("ns") + watchedNamespace = "watched-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoredNamespace, deploymentName) + _ = utils.DeleteDeployment(ctx, kubeClient, watchedNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoredNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, watchedNamespace, configMapName) + }) + + Context("with ignoreNamespaces flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoredNamespace) + Expect(err).NotTo(HaveOccurred()) + err = utils.CreateNamespace(ctx, kubeClient, watchedNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreNamespaces": ignoredNamespace, + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoredNamespace) + _ = utils.DeleteNamespace(ctx, kubeClient, watchedNamespace) + }) + + It("should NOT reload in ignored namespace", func() { + By("Creating a ConfigMap in the ignored namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in the ignored namespace") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoredNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, ignoredNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignored namespace)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, ignoredNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in ignored namespace should NOT be reloaded") + }) + + It("should reload in watched (non-ignored) namespace", func() { + By("Creating a ConfigMap in the watched namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, watchedNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in the watched namespace") + _, err = utils.CreateDeployment(ctx, kubeClient, watchedNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, watchedNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, watchedNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, watchedNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in non-ignored namespace should be reloaded") + }) + }) +}) diff --git a/test/e2e/flags/namespace_selector_test.go b/test/e2e/flags/namespace_selector_test.go new file mode 100644 index 000000000..da3492774 --- /dev/null +++ b/test/e2e/flags/namespace_selector_test.go @@ -0,0 +1,116 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Namespace Selector Flag Tests", func() { + var ( + deploymentName string + configMapName string + matchingNS string + nonMatchingNS string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + matchingNS = "match-" + utils.RandName("ns") + nonMatchingNS = "nomatch-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, matchingNS, deploymentName) + _ = utils.DeleteDeployment(ctx, kubeClient, nonMatchingNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, matchingNS, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, nonMatchingNS, configMapName) + }) + + Context("with namespaceSelector flag", func() { + BeforeEach(func() { + err := utils.CreateNamespaceWithLabels(ctx, kubeClient, matchingNS, map[string]string{"env": "test"}) + Expect(err).NotTo(HaveOccurred()) + + err = utils.CreateNamespace(ctx, kubeClient, nonMatchingNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.namespaceSelector": "env=test", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, matchingNS) + _ = utils.DeleteNamespace(ctx, kubeClient, nonMatchingNS) + }) + + It("should reload workloads in matching namespaces", func() { + By("Creating a ConfigMap in matching namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, matchingNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in matching namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, matchingNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, matchingNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, matchingNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, matchingNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in matching namespace should be reloaded") + }) + + It("should NOT reload workloads in non-matching namespaces", func() { + By("Creating a ConfigMap in non-matching namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in non-matching namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, nonMatchingNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, nonMatchingNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (non-matching namespace)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, nonMatchingNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in non-matching namespace should NOT be reloaded") + }) + }) +}) diff --git a/test/e2e/flags/reload_on_create_test.go b/test/e2e/flags/reload_on_create_test.go new file mode 100644 index 000000000..52a1b08cb --- /dev/null +++ b/test/e2e/flags/reload_on_create_test.go @@ -0,0 +1,142 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reload On Create Flag Tests", func() { + var ( + deploymentName string + configMapName string + createNamespace string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + createNamespace = "create-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, createNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, createNamespace, configMapName) + }) + + Context("with reloadOnCreate=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, createNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.reloadOnCreate": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, createNamespace) + }) + + It("should reload when a new ConfigMap is created", func() { + By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, createNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the ConfigMap that the Deployment references") + _, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnCreate=true)") + reloaded, err := adapter.WaitReloaded(ctx, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is created") + }) + + It("should reload when a new Secret is created", func() { + secretName := utils.RandName("secret") + defer func() { _ = utils.DeleteSecret(ctx, kubeClient, createNamespace, secretName) }() + + By("Creating a Deployment with annotation for a Secret that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, createNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the Secret that the Deployment references") + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, createNamespace, secretName, + map[string]string{"password": "secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnCreate=true)") + reloaded, err := adapter.WaitReloaded(ctx, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is created") + }) + }) + + Context("with reloadOnCreate=false (default)", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, createNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, createNamespace) + }) + + It("should NOT reload when a new ConfigMap is created (default behavior)", func() { + By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, createNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the ConfigMap that the Deployment references") + _, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (reloadOnCreate=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on create when reloadOnCreate=false") + }) + }) +}) diff --git a/test/e2e/flags/reload_on_delete_test.go b/test/e2e/flags/reload_on_delete_test.go new file mode 100644 index 000000000..f0f3b1e8d --- /dev/null +++ b/test/e2e/flags/reload_on_delete_test.go @@ -0,0 +1,153 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reload On Delete Flag Tests", func() { + var ( + deploymentName string + configMapName string + deleteNamespace string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + deleteNamespace = "delete-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, deleteNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + }) + + Context("with reloadOnDelete=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.reloadOnDelete": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace) + }) + + It("should reload when a referenced ConfigMap is deleted", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, deleteNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the ConfigMap") + err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnDelete=true)") + reloaded, err := adapter.WaitReloaded(ctx, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is deleted") + }) + + It("should reload when a referenced Secret is deleted", func() { + secretName := utils.RandName("secret") + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, deleteNamespace, secretName, + map[string]string{"password": "secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, deleteNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the Secret") + err = utils.DeleteSecret(ctx, kubeClient, deleteNamespace, secretName) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnDelete=true)") + reloaded, err := adapter.WaitReloaded(ctx, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is deleted") + }) + }) + + Context("with reloadOnDelete=false (default)", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace) + }) + + It("should NOT reload when a referenced ConfigMap is deleted (default behavior)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, deleteNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the ConfigMap") + err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (reloadOnDelete=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on delete when reloadOnDelete=false") + }) + }) +}) diff --git a/test/e2e/flags/resource_selector_test.go b/test/e2e/flags/resource_selector_test.go new file mode 100644 index 000000000..84063109e --- /dev/null +++ b/test/e2e/flags/resource_selector_test.go @@ -0,0 +1,112 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Resource Label Selector Flag Tests", func() { + var ( + deploymentName string + matchingCM string + nonMatchingCM string + resourceNS string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + matchingCM = utils.RandName("match-cm") + nonMatchingCM = utils.RandName("nomatch-cm") + resourceNS = "resource-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, resourceNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, matchingCM) + _ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM) + }) + + Context("with resourceLabelSelector flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, resourceNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.resourceLabelSelector": "reload=true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, resourceNS) + }) + + It("should reload when labeled ConfigMap changes", func() { + By("Creating a ConfigMap with matching label") + _, err := utils.CreateConfigMapWithLabels(ctx, kubeClient, resourceNS, matchingCM, + map[string]string{"key": "initial"}, + map[string]string{"reload": "true"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, resourceNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the labeled ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, matchingCM, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, resourceNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when labeled ConfigMap changes") + }) + + It("should NOT reload when unlabeled ConfigMap changes", func() { + By("Creating a ConfigMap WITHOUT matching label") + _, err := utils.CreateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName, + utils.WithConfigMapEnvFrom(nonMatchingCM), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, resourceNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the unlabeled ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (unlabeled ConfigMap)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, resourceNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when unlabeled ConfigMap changes") + }) + }) +}) diff --git a/test/e2e/flags/watch_globally_test.go b/test/e2e/flags/watch_globally_test.go new file mode 100644 index 000000000..177daf206 --- /dev/null +++ b/test/e2e/flags/watch_globally_test.go @@ -0,0 +1,164 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Watch Globally Flag Tests", func() { + var ( + deploymentName string + configMapName string + otherNS string + adapter *utils.DeploymentAdapter + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + otherNS = "other-" + utils.RandName("ns") + adapter = utils.NewDeploymentAdapter(kubeClient) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteDeployment(ctx, kubeClient, otherNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, otherNS, configMapName) + }) + + Context("with watchGlobally=false flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, otherNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.watchGlobally": "false", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, otherNS) + }) + + It("should reload workloads in Reloader's namespace when watchGlobally=false", func() { + By("Creating a ConfigMap in Reloader's namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in Reloader's namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (same namespace should work)") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in Reloader's namespace should reload with watchGlobally=false") + }) + + It("should NOT reload workloads in other namespaces when watchGlobally=false", func() { + By("Creating a ConfigMap in another namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, otherNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in another namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, otherNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, otherNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap in the other namespace") + err = utils.UpdateConfigMap(ctx, kubeClient, otherNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (different namespace with watchGlobally=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, otherNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in other namespace should NOT reload with watchGlobally=false") + }) + }) + + Context("with watchGlobally=true flag (default)", func() { + var globalNS string + + BeforeEach(func() { + globalNS = "global-" + utils.RandName("ns") + + err := utils.CreateNamespace(ctx, kubeClient, globalNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.watchGlobally": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, globalNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, globalNS, configMapName) + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, globalNS) + }) + + It("should reload workloads in any namespace when watchGlobally=true", func() { + By("Creating a ConfigMap in a different namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, globalNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in a different namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, globalNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, globalNS, deploymentName, utils.WorkloadReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, globalNS, configMapName, map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (watchGlobally=true)") + reloaded, err := adapter.WaitReloaded(ctx, globalNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in any namespace should reload with watchGlobally=true") + }) + }) +}) diff --git a/test/e2e/utils/accessors.go b/test/e2e/utils/accessors.go new file mode 100644 index 000000000..514de999f --- /dev/null +++ b/test/e2e/utils/accessors.go @@ -0,0 +1,171 @@ +package utils + +import ( + "strings" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + + rolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + openshiftappsv1 "github.com/openshift/api/apps/v1" +) + +// Deployment accessors +var ( + DeploymentPodTemplate PodTemplateAccessor[*appsv1.Deployment] = func(d *appsv1.Deployment) *corev1.PodTemplateSpec { + return &d.Spec.Template + } + DeploymentAnnotations AnnotationAccessor[*appsv1.Deployment] = func(d *appsv1.Deployment) map[string]string { + return d.Annotations + } + DeploymentContainers ContainerAccessor[*appsv1.Deployment] = func(d *appsv1.Deployment) []corev1.Container { + return d.Spec.Template.Spec.Containers + } + DeploymentIsReady StatusAccessor[*appsv1.Deployment] = func(d *appsv1.Deployment) bool { + if d.Spec.Replicas == nil { + return false + } + return d.Status.ReadyReplicas == *d.Spec.Replicas && + d.Status.UpdatedReplicas == *d.Spec.Replicas && + d.Status.AvailableReplicas == *d.Spec.Replicas + } +) + +// DaemonSet accessors +var ( + DaemonSetPodTemplate PodTemplateAccessor[*appsv1.DaemonSet] = func(d *appsv1.DaemonSet) *corev1.PodTemplateSpec { + return &d.Spec.Template + } + DaemonSetAnnotations AnnotationAccessor[*appsv1.DaemonSet] = func(d *appsv1.DaemonSet) map[string]string { + return d.Annotations + } + DaemonSetContainers ContainerAccessor[*appsv1.DaemonSet] = func(d *appsv1.DaemonSet) []corev1.Container { + return d.Spec.Template.Spec.Containers + } + DaemonSetIsReady StatusAccessor[*appsv1.DaemonSet] = func(d *appsv1.DaemonSet) bool { + return d.Status.DesiredNumberScheduled > 0 && + d.Status.NumberReady == d.Status.DesiredNumberScheduled + } +) + +// StatefulSet accessors +var ( + StatefulSetPodTemplate PodTemplateAccessor[*appsv1.StatefulSet] = func(s *appsv1.StatefulSet) *corev1.PodTemplateSpec { + return &s.Spec.Template + } + StatefulSetAnnotations AnnotationAccessor[*appsv1.StatefulSet] = func(s *appsv1.StatefulSet) map[string]string { + return s.Annotations + } + StatefulSetContainers ContainerAccessor[*appsv1.StatefulSet] = func(s *appsv1.StatefulSet) []corev1.Container { + return s.Spec.Template.Spec.Containers + } + StatefulSetIsReady StatusAccessor[*appsv1.StatefulSet] = func(s *appsv1.StatefulSet) bool { + if s.Spec.Replicas == nil { + return false + } + return s.Status.ReadyReplicas == *s.Spec.Replicas + } +) + +// Job accessors +var ( + JobPodTemplate PodTemplateAccessor[*batchv1.Job] = func(j *batchv1.Job) *corev1.PodTemplateSpec { + return &j.Spec.Template + } + JobAnnotations AnnotationAccessor[*batchv1.Job] = func(j *batchv1.Job) map[string]string { + return j.Annotations + } + JobContainers ContainerAccessor[*batchv1.Job] = func(j *batchv1.Job) []corev1.Container { + return j.Spec.Template.Spec.Containers + } + JobIsReady StatusAccessor[*batchv1.Job] = func(j *batchv1.Job) bool { + return j.Status.Active > 0 || j.Status.Succeeded > 0 + } + JobUID UIDAccessor[*batchv1.Job] = func(j *batchv1.Job) types.UID { + return j.UID + } +) + +// CronJob accessors +var ( + CronJobPodTemplate PodTemplateAccessor[*batchv1.CronJob] = func(c *batchv1.CronJob) *corev1.PodTemplateSpec { + return &c.Spec.JobTemplate.Spec.Template + } + CronJobAnnotations AnnotationAccessor[*batchv1.CronJob] = func(c *batchv1.CronJob) map[string]string { + return c.Annotations + } + CronJobContainers ContainerAccessor[*batchv1.CronJob] = func(c *batchv1.CronJob) []corev1.Container { + return c.Spec.JobTemplate.Spec.Template.Spec.Containers + } + CronJobExists StatusAccessor[*batchv1.CronJob] = func(c *batchv1.CronJob) bool { + return true + } +) + +// Argo Rollout accessors +var ( + RolloutPodTemplate PodTemplateAccessor[*rolloutsv1alpha1.Rollout] = func(r *rolloutsv1alpha1.Rollout) *corev1.PodTemplateSpec { + return &r.Spec.Template + } + RolloutAnnotations AnnotationAccessor[*rolloutsv1alpha1.Rollout] = func(r *rolloutsv1alpha1.Rollout) map[string]string { + return r.Annotations + } + RolloutContainers ContainerAccessor[*rolloutsv1alpha1.Rollout] = func(r *rolloutsv1alpha1.Rollout) []corev1.Container { + return r.Spec.Template.Spec.Containers + } + RolloutIsReady StatusAccessor[*rolloutsv1alpha1.Rollout] = func(r *rolloutsv1alpha1.Rollout) bool { + if r.Spec.Replicas == nil { + return false + } + return r.Status.ReadyReplicas == *r.Spec.Replicas + } + RolloutHasRestartAt StatusAccessor[*rolloutsv1alpha1.Rollout] = func(r *rolloutsv1alpha1.Rollout) bool { + return r.Spec.RestartAt != nil + } +) + +// OpenShift DeploymentConfig accessors +var ( + DeploymentConfigPodTemplate PodTemplateAccessor[*openshiftappsv1.DeploymentConfig] = func(d *openshiftappsv1.DeploymentConfig) *corev1.PodTemplateSpec { + return d.Spec.Template + } + DeploymentConfigAnnotations AnnotationAccessor[*openshiftappsv1.DeploymentConfig] = func(d *openshiftappsv1.DeploymentConfig) map[string]string { + return d.Annotations + } + DeploymentConfigContainers ContainerAccessor[*openshiftappsv1.DeploymentConfig] = func(d *openshiftappsv1.DeploymentConfig) []corev1.Container { + if d.Spec.Template == nil { + return nil + } + return d.Spec.Template.Spec.Containers + } + DeploymentConfigIsReady StatusAccessor[*openshiftappsv1.DeploymentConfig] = func(d *openshiftappsv1.DeploymentConfig) bool { + return d.Status.ReadyReplicas == d.Spec.Replicas + } +) + +// SecretProviderClassPodStatus accessors +var ( + SPCPSIsMounted StatusAccessor[*csiv1.SecretProviderClassPodStatus] = func(s *csiv1.SecretProviderClassPodStatus) bool { + return s.Status.Mounted + } + SPCPSClassName ValueAccessor[*csiv1.SecretProviderClassPodStatus, string] = func(s *csiv1.SecretProviderClassPodStatus) string { + return s.Status.SecretProviderClassName + } + SPCPSPodName ValueAccessor[*csiv1.SecretProviderClassPodStatus, string] = func(s *csiv1.SecretProviderClassPodStatus) string { + return s.Status.PodName + } + // SPCPSVersions returns concatenated versions of all objects for change detection. + SPCPSVersions ValueAccessor[*csiv1.SecretProviderClassPodStatus, string] = func(s *csiv1.SecretProviderClassPodStatus) string { + if len(s.Status.Objects) == 0 { + return "" + } + var versions []string + for _, obj := range s.Status.Objects { + versions = append(versions, obj.Version) + } + return strings.Join(versions, ",") + } +) diff --git a/test/e2e/utils/annotations.go b/test/e2e/utils/annotations.go new file mode 100644 index 000000000..60c0132b3 --- /dev/null +++ b/test/e2e/utils/annotations.go @@ -0,0 +1,241 @@ +package utils + +// Annotation key constants used by Reloader. +// These follow the pattern: {scope}.reloader.stakater.com/{action} +// where scope can be empty (all resources), "configmap", "secret", "deployment", etc. +const ( + // ============================================================ + // Core reload annotations + // ============================================================ + + // AnnotationLastReloadedFrom is set by Reloader on workloads to track the last resource + // that triggered a reload. Format: "{namespace}/{resource-type}/{resource-name}" + AnnotationLastReloadedFrom = "reloader.stakater.com/last-reloaded-from" + + // AnnotationConfigMapReload triggers reload when specified ConfigMap(s) change. + // Value: comma-separated list of ConfigMap names, e.g., "config1,config2" + AnnotationConfigMapReload = "configmap.reloader.stakater.com/reload" + + // AnnotationSecretReload triggers reload when specified Secret(s) change. + // Value: comma-separated list of Secret names, e.g., "secret1,secret2" + AnnotationSecretReload = "secret.reloader.stakater.com/reload" + + // AnnotationSecretProviderClassReload triggers reload when specified SecretProviderClass(es) change. + // Value: comma-separated list of SecretProviderClass names, e.g., "spc1,spc2" + // Note: Reloader actually watches SecretProviderClassPodStatus resources, not SecretProviderClass. + AnnotationSecretProviderClassReload = "secretproviderclass.reloader.stakater.com/reload" + + // ============================================================ + // Auto-reload annotations + // ============================================================ + + // AnnotationAuto enables auto-reload for all referenced ConfigMaps and Secrets. + // Value: "true" or "false" + AnnotationAuto = "reloader.stakater.com/auto" + + // AnnotationConfigMapAuto enables auto-reload for all referenced ConfigMaps only. + // Value: "true" or "false" + AnnotationConfigMapAuto = "configmap.reloader.stakater.com/auto" + + // AnnotationSecretAuto enables auto-reload for all referenced Secrets only. + // Value: "true" or "false" + AnnotationSecretAuto = "secret.reloader.stakater.com/auto" + + // AnnotationSecretProviderClassAuto enables auto-reload for all referenced SecretProviderClasses only. + // Value: "true" or "false" + AnnotationSecretProviderClassAuto = "secretproviderclass.reloader.stakater.com/auto" + + // ============================================================ + // Exclude annotations (used with auto=true to exclude specific resources) + // ============================================================ + + // AnnotationConfigMapExclude excludes specified ConfigMaps from auto-reload. + // Value: comma-separated list of ConfigMap names + AnnotationConfigMapExclude = "configmaps.exclude.reloader.stakater.com/reload" + + // AnnotationSecretExclude excludes specified Secrets from auto-reload. + // Value: comma-separated list of Secret names + AnnotationSecretExclude = "secrets.exclude.reloader.stakater.com/reload" + + // AnnotationSecretProviderClassExclude excludes specified SecretProviderClasses from auto-reload. + // Value: comma-separated list of SecretProviderClass names + AnnotationSecretProviderClassExclude = "secretproviderclasses.exclude.reloader.stakater.com/reload" + + // ============================================================ + // Search annotations (for regex matching) + // ============================================================ + + // AnnotationSearch enables regex search mode for ConfigMap/Secret names. + // Value: "true" + // Used with reload annotation where value is a regex pattern. + AnnotationSearch = "reloader.stakater.com/search" + + // AnnotationMatch is an alias for AnnotationSearch. + // Value: "true" + AnnotationMatch = "reloader.stakater.com/match" + + // ============================================================ + // Resource-level annotations (placed on ConfigMap/Secret) + // ============================================================ + + // AnnotationIgnore prevents Reloader from triggering reloads for this resource. + // Place this on a ConfigMap or Secret to exclude it from reload triggers. + // Value: "true" + AnnotationIgnore = "reloader.stakater.com/ignore" + + // ============================================================ + // Pause/period annotations + // ============================================================ + + // AnnotationDeploymentPausePeriod sets a pause period before triggering reload. + // Value: duration string, e.g., "10s", "1m" + AnnotationDeploymentPausePeriod = "deployment.reloader.stakater.com/pause-period" + + // AnnotationDeploymentPausedAt is set by Reloader when a workload is paused. + // Value: RFC3339 timestamp + AnnotationDeploymentPausedAt = "deployment.reloader.stakater.com/paused-at" + + // ============================================================ + // Argo Rollouts specific annotations + // ============================================================ + + // AnnotationRolloutStrategy specifies the strategy for Argo Rollouts. + // Value: "restart" (sets spec.restartAt) + AnnotationRolloutStrategy = "reloader.stakater.com/rollout-strategy" +) + +// Annotation values. +const ( + // AnnotationValueTrue is the string "true" for annotation values. + AnnotationValueTrue = "true" + + // AnnotationValueFalse is the string "false" for annotation values. + AnnotationValueFalse = "false" + + // AnnotationValueRestart is the "restart" strategy value for Argo Rollouts. + AnnotationValueRestart = "restart" +) + +// BuildConfigMapReloadAnnotation creates an annotation map for ConfigMap reload. +func BuildConfigMapReloadAnnotation(configMapNames ...string) map[string]string { + return map[string]string{ + AnnotationConfigMapReload: joinNames(configMapNames), + } +} + +// BuildSecretReloadAnnotation creates an annotation map for Secret reload. +func BuildSecretReloadAnnotation(secretNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretReload: joinNames(secretNames), + } +} + +// BuildSecretProviderClassReloadAnnotation creates an annotation map for SecretProviderClass reload. +func BuildSecretProviderClassReloadAnnotation(spcNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretProviderClassReload: joinNames(spcNames), + } +} + +// BuildAutoTrueAnnotation creates an annotation map with auto=true. +func BuildAutoTrueAnnotation() map[string]string { + return map[string]string{ + AnnotationAuto: AnnotationValueTrue, + } +} + +// BuildAutoFalseAnnotation creates an annotation map with auto=false. +func BuildAutoFalseAnnotation() map[string]string { + return map[string]string{ + AnnotationAuto: AnnotationValueFalse, + } +} + +// BuildConfigMapAutoAnnotation creates an annotation map with configmap auto=true. +func BuildConfigMapAutoAnnotation() map[string]string { + return map[string]string{ + AnnotationConfigMapAuto: AnnotationValueTrue, + } +} + +// BuildSecretAutoAnnotation creates an annotation map with secret auto=true. +func BuildSecretAutoAnnotation() map[string]string { + return map[string]string{ + AnnotationSecretAuto: AnnotationValueTrue, + } +} + +// BuildSecretProviderClassAutoAnnotation creates an annotation map with secretproviderclass auto=true. +func BuildSecretProviderClassAutoAnnotation() map[string]string { + return map[string]string{ + AnnotationSecretProviderClassAuto: AnnotationValueTrue, + } +} + +// BuildSearchAnnotation creates an annotation map to enable search mode. +func BuildSearchAnnotation() map[string]string { + return map[string]string{ + AnnotationSearch: AnnotationValueTrue, + } +} + +// BuildMatchAnnotation creates an annotation map to enable match mode. +func BuildMatchAnnotation() map[string]string { + return map[string]string{ + AnnotationMatch: AnnotationValueTrue, + } +} + +// BuildIgnoreAnnotation creates an annotation map to ignore a resource. +func BuildIgnoreAnnotation() map[string]string { + return map[string]string{ + AnnotationIgnore: AnnotationValueTrue, + } +} + +// BuildRolloutRestartStrategyAnnotation creates an annotation for Argo Rollout restart strategy. +func BuildRolloutRestartStrategyAnnotation() map[string]string { + return map[string]string{ + AnnotationRolloutStrategy: AnnotationValueRestart, + } +} + +// BuildConfigMapExcludeAnnotation creates an annotation to exclude ConfigMaps from auto-reload. +func BuildConfigMapExcludeAnnotation(configMapNames ...string) map[string]string { + return map[string]string{ + AnnotationConfigMapExclude: joinNames(configMapNames), + } +} + +// BuildSecretExcludeAnnotation creates an annotation to exclude Secrets from auto-reload. +func BuildSecretExcludeAnnotation(secretNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretExclude: joinNames(secretNames), + } +} + +// BuildSecretProviderClassExcludeAnnotation creates an annotation to exclude SecretProviderClasses from auto-reload. +func BuildSecretProviderClassExcludeAnnotation(spcNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretProviderClassExclude: joinNames(spcNames), + } +} + +// BuildPausePeriodAnnotation creates an annotation for deployment pause period. +func BuildPausePeriodAnnotation(duration string) map[string]string { + return map[string]string{ + AnnotationDeploymentPausePeriod: duration, + } +} + +// joinNames joins names with comma separator. +func joinNames(names []string) string { + if len(names) == 0 { + return "" + } + result := names[0] + for i := 1; i < len(names); i++ { + result += "," + names[i] + } + return result +} diff --git a/test/e2e/utils/annotations_test.go b/test/e2e/utils/annotations_test.go new file mode 100644 index 000000000..fa0d699c8 --- /dev/null +++ b/test/e2e/utils/annotations_test.go @@ -0,0 +1,303 @@ +package utils + +import ( + "testing" +) + +func TestBuildConfigMapReloadAnnotation(t *testing.T) { + tests := []struct { + name string + configMaps []string + expected map[string]string + }{ + { + name: "single ConfigMap", + configMaps: []string{"my-config"}, + expected: map[string]string{ + AnnotationConfigMapReload: "my-config", + }, + }, + { + name: "multiple ConfigMaps", + configMaps: []string{"config1", "config2", "config3"}, + expected: map[string]string{ + AnnotationConfigMapReload: "config1,config2,config3", + }, + }, + { + name: "empty list", + configMaps: []string{}, + expected: map[string]string{ + AnnotationConfigMapReload: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildConfigMapReloadAnnotation(tt.configMaps...) + if len(result) != len(tt.expected) { + t.Errorf("BuildConfigMapReloadAnnotation() returned %d entries, want %d", len(result), len(tt.expected)) + } + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("BuildConfigMapReloadAnnotation()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestBuildSecretReloadAnnotation(t *testing.T) { + tests := []struct { + name string + secrets []string + expected map[string]string + }{ + { + name: "single Secret", + secrets: []string{"my-secret"}, + expected: map[string]string{ + AnnotationSecretReload: "my-secret", + }, + }, + { + name: "multiple Secrets", + secrets: []string{"secret1", "secret2"}, + expected: map[string]string{ + AnnotationSecretReload: "secret1,secret2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildSecretReloadAnnotation(tt.secrets...) + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("BuildSecretReloadAnnotation()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestBuildAutoAnnotations(t *testing.T) { + t.Run("BuildAutoTrueAnnotation", func(t *testing.T) { + result := BuildAutoTrueAnnotation() + if result[AnnotationAuto] != AnnotationValueTrue { + t.Errorf("BuildAutoTrueAnnotation()[%q] = %q, want %q", + AnnotationAuto, result[AnnotationAuto], AnnotationValueTrue) + } + }) + + t.Run("BuildAutoFalseAnnotation", func(t *testing.T) { + result := BuildAutoFalseAnnotation() + if result[AnnotationAuto] != AnnotationValueFalse { + t.Errorf("BuildAutoFalseAnnotation()[%q] = %q, want %q", + AnnotationAuto, result[AnnotationAuto], AnnotationValueFalse) + } + }) + + t.Run("BuildConfigMapAutoAnnotation", func(t *testing.T) { + result := BuildConfigMapAutoAnnotation() + if result[AnnotationConfigMapAuto] != AnnotationValueTrue { + t.Errorf("BuildConfigMapAutoAnnotation()[%q] = %q, want %q", + AnnotationConfigMapAuto, result[AnnotationConfigMapAuto], AnnotationValueTrue) + } + }) + + t.Run("BuildSecretAutoAnnotation", func(t *testing.T) { + result := BuildSecretAutoAnnotation() + if result[AnnotationSecretAuto] != AnnotationValueTrue { + t.Errorf("BuildSecretAutoAnnotation()[%q] = %q, want %q", + AnnotationSecretAuto, result[AnnotationSecretAuto], AnnotationValueTrue) + } + }) +} + +func TestBuildSearchMatchAnnotations(t *testing.T) { + t.Run("BuildSearchAnnotation", func(t *testing.T) { + result := BuildSearchAnnotation() + if result[AnnotationSearch] != AnnotationValueTrue { + t.Errorf("BuildSearchAnnotation()[%q] = %q, want %q", + AnnotationSearch, result[AnnotationSearch], AnnotationValueTrue) + } + }) + + t.Run("BuildMatchAnnotation", func(t *testing.T) { + result := BuildMatchAnnotation() + if result[AnnotationMatch] != AnnotationValueTrue { + t.Errorf("BuildMatchAnnotation()[%q] = %q, want %q", + AnnotationMatch, result[AnnotationMatch], AnnotationValueTrue) + } + }) +} + +func TestBuildIgnoreAnnotation(t *testing.T) { + result := BuildIgnoreAnnotation() + if result[AnnotationIgnore] != AnnotationValueTrue { + t.Errorf("BuildIgnoreAnnotation()[%q] = %q, want %q", + AnnotationIgnore, result[AnnotationIgnore], AnnotationValueTrue) + } +} + +func TestBuildRolloutRestartStrategyAnnotation(t *testing.T) { + result := BuildRolloutRestartStrategyAnnotation() + if result[AnnotationRolloutStrategy] != AnnotationValueRestart { + t.Errorf("BuildRolloutRestartStrategyAnnotation()[%q] = %q, want %q", + AnnotationRolloutStrategy, result[AnnotationRolloutStrategy], AnnotationValueRestart) + } +} + +func TestBuildExcludeAnnotations(t *testing.T) { + t.Run("BuildConfigMapExcludeAnnotation single", func(t *testing.T) { + result := BuildConfigMapExcludeAnnotation("excluded-cm") + if result[AnnotationConfigMapExclude] != "excluded-cm" { + t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q", + AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], "excluded-cm") + } + }) + + t.Run("BuildConfigMapExcludeAnnotation multiple", func(t *testing.T) { + result := BuildConfigMapExcludeAnnotation("cm1", "cm2", "cm3") + expected := "cm1,cm2,cm3" + if result[AnnotationConfigMapExclude] != expected { + t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q", + AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], expected) + } + }) + + t.Run("BuildSecretExcludeAnnotation single", func(t *testing.T) { + result := BuildSecretExcludeAnnotation("excluded-secret") + if result[AnnotationSecretExclude] != "excluded-secret" { + t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q", + AnnotationSecretExclude, result[AnnotationSecretExclude], "excluded-secret") + } + }) + + t.Run("BuildSecretExcludeAnnotation multiple", func(t *testing.T) { + result := BuildSecretExcludeAnnotation("s1", "s2") + expected := "s1,s2" + if result[AnnotationSecretExclude] != expected { + t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q", + AnnotationSecretExclude, result[AnnotationSecretExclude], expected) + } + }) +} + +func TestBuildPausePeriodAnnotation(t *testing.T) { + tests := []struct { + name string + duration string + expected string + }{ + { + name: "10 seconds", + duration: "10s", + expected: "10s", + }, + { + name: "1 minute", + duration: "1m", + expected: "1m", + }, + { + name: "30 minutes", + duration: "30m", + expected: "30m", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildPausePeriodAnnotation(tt.duration) + if result[AnnotationDeploymentPausePeriod] != tt.expected { + t.Errorf("BuildPausePeriodAnnotation(%q)[%q] = %q, want %q", + tt.duration, AnnotationDeploymentPausePeriod, + result[AnnotationDeploymentPausePeriod], tt.expected) + } + }) + } +} + +func TestJoinNames(t *testing.T) { + tests := []struct { + name string + names []string + expected string + }{ + { + name: "empty slice", + names: []string{}, + expected: "", + }, + { + name: "single name", + names: []string{"one"}, + expected: "one", + }, + { + name: "two names", + names: []string{"one", "two"}, + expected: "one,two", + }, + { + name: "three names", + names: []string{"alpha", "beta", "gamma"}, + expected: "alpha,beta,gamma", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := joinNames(tt.names) + if result != tt.expected { + t.Errorf("joinNames(%v) = %q, want %q", tt.names, result, tt.expected) + } + }) + } +} + +func TestAnnotationConstants(t *testing.T) { + tests := []struct { + name string + constant string + expected string + }{ + {"AnnotationLastReloadedFrom", AnnotationLastReloadedFrom, "reloader.stakater.com/last-reloaded-from"}, + {"AnnotationConfigMapReload", AnnotationConfigMapReload, "configmap.reloader.stakater.com/reload"}, + {"AnnotationSecretReload", AnnotationSecretReload, "secret.reloader.stakater.com/reload"}, + {"AnnotationAuto", AnnotationAuto, "reloader.stakater.com/auto"}, + {"AnnotationConfigMapAuto", AnnotationConfigMapAuto, "configmap.reloader.stakater.com/auto"}, + {"AnnotationSecretAuto", AnnotationSecretAuto, "secret.reloader.stakater.com/auto"}, + {"AnnotationConfigMapExclude", AnnotationConfigMapExclude, "configmaps.exclude.reloader.stakater.com/reload"}, + {"AnnotationSecretExclude", AnnotationSecretExclude, "secrets.exclude.reloader.stakater.com/reload"}, + {"AnnotationSearch", AnnotationSearch, "reloader.stakater.com/search"}, + {"AnnotationMatch", AnnotationMatch, "reloader.stakater.com/match"}, + {"AnnotationIgnore", AnnotationIgnore, "reloader.stakater.com/ignore"}, + {"AnnotationDeploymentPausePeriod", AnnotationDeploymentPausePeriod, "deployment.reloader.stakater.com/pause-period"}, + {"AnnotationDeploymentPausedAt", AnnotationDeploymentPausedAt, "deployment.reloader.stakater.com/paused-at"}, + {"AnnotationRolloutStrategy", AnnotationRolloutStrategy, "reloader.stakater.com/rollout-strategy"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.constant != tt.expected { + t.Errorf("%s = %q, want %q", tt.name, tt.constant, tt.expected) + } + }) + } +} + +func TestAnnotationValues(t *testing.T) { + if AnnotationValueTrue != "true" { + t.Errorf("AnnotationValueTrue = %q, want \"true\"", AnnotationValueTrue) + } + if AnnotationValueFalse != "false" { + t.Errorf("AnnotationValueFalse = %q, want \"false\"", AnnotationValueFalse) + } + if AnnotationValueRestart != "restart" { + t.Errorf("AnnotationValueRestart = %q, want \"restart\"", AnnotationValueRestart) + } +} diff --git a/test/e2e/utils/argo.go b/test/e2e/utils/argo.go new file mode 100644 index 000000000..b06da6c4c --- /dev/null +++ b/test/e2e/utils/argo.go @@ -0,0 +1,120 @@ +package utils + +import ( + "context" + + rolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + rolloutsclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +// RolloutOption is a function that modifies a Rollout. +type RolloutOption func(*rolloutv1alpha1.Rollout) + +// IsArgoRolloutsInstalled checks if Argo Rollouts CRD is installed in the cluster. +func IsArgoRolloutsInstalled(ctx context.Context, client rolloutsclient.Interface) bool { + if client == nil { + return false + } + _, err := client.ArgoprojV1alpha1().Rollouts("default").List(ctx, metav1.ListOptions{Limit: 1}) + return err == nil +} + +// CreateRollout creates an Argo Rollout with the given options. +func CreateRollout(ctx context.Context, client rolloutsclient.Interface, namespace, name string, opts ...RolloutOption) (*rolloutv1alpha1.Rollout, error) { + rollout := &rolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: rolloutv1alpha1.RolloutSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }}, + }, + }, + Strategy: rolloutv1alpha1.RolloutStrategy{ + Canary: &rolloutv1alpha1.CanaryStrategy{ + Steps: []rolloutv1alpha1.CanaryStep{ + {SetWeight: ptr.To[int32](100)}, + }, + }, + }, + }, + } + + for _, opt := range opts { + opt(rollout) + } + + return client.ArgoprojV1alpha1().Rollouts(namespace).Create(ctx, rollout, metav1.CreateOptions{}) +} + +// DeleteRollout deletes an Argo Rollout using typed client. +func DeleteRollout(ctx context.Context, client rolloutsclient.Interface, namespace, name string) error { + return client.ArgoprojV1alpha1().Rollouts(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// WithRolloutConfigMapEnvFrom adds a ConfigMap envFrom to the Rollout. +func WithRolloutConfigMapEnvFrom(configMapName string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + AddEnvFromSource(&r.Spec.Template.Spec, 0, configMapName, false) + } +} + +// WithRolloutSecretEnvFrom adds a Secret envFrom to the Rollout. +func WithRolloutSecretEnvFrom(secretName string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + AddEnvFromSource(&r.Spec.Template.Spec, 0, secretName, true) + } +} + +// WithRolloutConfigMapVolume adds a ConfigMap volume to the Rollout. +func WithRolloutConfigMapVolume(configMapName string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + AddConfigMapVolume(&r.Spec.Template.Spec, 0, configMapName) + } +} + +// WithRolloutSecretVolume adds a Secret volume to the Rollout. +func WithRolloutSecretVolume(secretName string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + AddSecretVolume(&r.Spec.Template.Spec, 0, secretName) + } +} + +// WithRolloutAnnotations adds annotations to the Rollout level (where Reloader checks them). +func WithRolloutAnnotations(annotations map[string]string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + if len(annotations) > 0 { + if r.Annotations == nil { + r.Annotations = make(map[string]string) + } + for k, v := range annotations { + r.Annotations[k] = v + } + } + } +} + +// WithRolloutObjectAnnotations adds annotations to the Rollout's top-level metadata. +func WithRolloutObjectAnnotations(annotations map[string]string) RolloutOption { + return func(r *rolloutv1alpha1.Rollout) { + if r.Annotations == nil { + r.Annotations = make(map[string]string) + } + for k, v := range annotations { + r.Annotations[k] = v + } + } +} diff --git a/test/e2e/utils/conditions.go b/test/e2e/utils/conditions.go new file mode 100644 index 000000000..cd374ce39 --- /dev/null +++ b/test/e2e/utils/conditions.go @@ -0,0 +1,188 @@ +package utils + +import ( + "strings" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" +) + +// PodTemplateAccessor extracts PodTemplateSpec from a workload. +type PodTemplateAccessor[T any] func(T) *corev1.PodTemplateSpec + +// AnnotationAccessor extracts annotations from a resource. +type AnnotationAccessor[T any] func(T) map[string]string + +// ContainerAccessor extracts containers from a resource. +type ContainerAccessor[T any] func(T) []corev1.Container + +// StatusAccessor extracts ready status from a resource. +type StatusAccessor[T any] func(T) bool + +// UIDAccessor extracts UID from a resource. +type UIDAccessor[T any] func(T) types.UID + +// ValueAccessor extracts a comparable value from a resource. +type ValueAccessor[T any, V comparable] func(T) V + +// HasPodTemplateAnnotation returns a condition that checks for an annotation on the pod template. +func HasPodTemplateAnnotation[T any](accessor PodTemplateAccessor[T], key string) Condition[T] { + return func(obj T) bool { + template := accessor(obj) + if template == nil || template.Annotations == nil { + return false + } + _, ok := template.Annotations[key] + return ok + } +} + +// HasAnnotation returns a condition that checks for an annotation on the resource. +func HasAnnotation[T any](accessor AnnotationAccessor[T], key string) Condition[T] { + return func(obj T) bool { + annotations := accessor(obj) + if annotations == nil { + return false + } + _, ok := annotations[key] + return ok + } +} + +// NoAnnotation returns a condition that checks an annotation is absent. +func NoAnnotation[T any](accessor AnnotationAccessor[T], key string) Condition[T] { + return func(obj T) bool { + annotations := accessor(obj) + if annotations == nil { + return true + } + _, ok := annotations[key] + return !ok + } +} + +// HasEnvVarPrefix returns a condition that checks for an env var with the given prefix. +func HasEnvVarPrefix[T any](accessor ContainerAccessor[T], prefix string) Condition[T] { + return func(obj T) bool { + containers := accessor(obj) + for _, container := range containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, prefix) { + return true + } + } + } + return false + } +} + +// IsReady returns a condition that checks if the resource is ready. +func IsReady[T any](accessor StatusAccessor[T]) Condition[T] { + return func(obj T) bool { + return accessor(obj) + } +} + +// HasDifferentUID returns a condition that checks if the UID differs from original. +func HasDifferentUID[T any](accessor UIDAccessor[T], originalUID types.UID) Condition[T] { + return func(obj T) bool { + return accessor(obj) != originalUID + } +} + +// HasDifferentValue returns a condition that checks if a value differs from original. +func HasDifferentValue[T any, V comparable](accessor ValueAccessor[T, V], original V) Condition[T] { + return func(obj T) bool { + return accessor(obj) != original + } +} + +// And combines multiple conditions with AND logic. +func And[T any](conditions ...Condition[T]) Condition[T] { + return func(obj T) bool { + for _, cond := range conditions { + if !cond(obj) { + return false + } + } + return true + } +} + +// Or combines multiple conditions with OR logic. +func Or[T any](conditions ...Condition[T]) Condition[T] { + return func(obj T) bool { + for _, cond := range conditions { + if cond(obj) { + return true + } + } + return false + } +} + +// Always returns a condition that always returns true (for existence checks). +func Always[T any]() Condition[T] { + return func(obj T) bool { + return true + } +} + +// IsTriggeredJobForCronJob returns a condition that checks if a Job was triggered +// by Reloader for the specified CronJob (has owner reference and instantiate annotation). +func IsTriggeredJobForCronJob(cronJobName string) Condition[*batchv1.Job] { + return func(job *batchv1.Job) bool { + for _, ownerRef := range job.OwnerReferences { + if ownerRef.Kind == "CronJob" && ownerRef.Name == cronJobName { + if job.Annotations != nil { + if _, ok := job.Annotations["cronjob.kubernetes.io/instantiate"]; ok { + return true + } + } + } + } + return false + } +} + +// SPCPSVersionChanged returns a condition that checks if the SPCPS version has changed +// from the initial version and the SPCPS is mounted. +func SPCPSVersionChanged(initialVersion string) Condition[*csiv1.SecretProviderClassPodStatus] { + return func(spcps *csiv1.SecretProviderClassPodStatus) bool { + if !spcps.Status.Mounted || len(spcps.Status.Objects) == 0 { + return false + } + for _, obj := range spcps.Status.Objects { + if obj.Version != initialVersion { + return true + } + } + return false + } +} + +// SPCPSForSPC returns a condition that checks if the SPCPS references a specific +// SecretProviderClass and is mounted. +func SPCPSForSPC(spcName string) Condition[*csiv1.SecretProviderClassPodStatus] { + return func(spcps *csiv1.SecretProviderClassPodStatus) bool { + return spcps.Status.SecretProviderClassName == spcName && spcps.Status.Mounted + } +} + +// SPCPSForPod returns a condition that checks if the SPCPS references a specific +// pod and is mounted. +func SPCPSForPod(podName string) Condition[*csiv1.SecretProviderClassPodStatus] { + return func(spcps *csiv1.SecretProviderClassPodStatus) bool { + return spcps.Status.PodName == podName && spcps.Status.Mounted + } +} + +// SPCPSForPods returns a condition that checks if the SPCPS references any of the +// specified pods and is mounted. +func SPCPSForPods(podNames map[string]bool) Condition[*csiv1.SecretProviderClassPodStatus] { + return func(spcps *csiv1.SecretProviderClassPodStatus) bool { + return podNames[spcps.Status.PodName] && spcps.Status.Mounted + } +} diff --git a/test/e2e/utils/csi.go b/test/e2e/utils/csi.go new file mode 100644 index 000000000..3a34ff2a4 --- /dev/null +++ b/test/e2e/utils/csi.go @@ -0,0 +1,338 @@ +package utils + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" +) + +// CSI Driver constants +const ( + // CSIDriverName is the name of the secrets-store CSI driver + CSIDriverName = "secrets-store.csi.k8s.io" + + // DefaultCSIProvider is the default provider name for testing (Vault) + DefaultCSIProvider = "vault" + + // VaultAddress is the default Vault address in the cluster + VaultAddress = "http://vault.vault:8200" + + // VaultRole is the Kubernetes auth role configured in Vault for testing + VaultRole = "test-role" + + // VaultNamespace is the namespace where Vault is deployed + VaultNamespace = "vault" + + // VaultPodName is the name of the Vault pod (dev mode) + VaultPodName = "vault-0" + + // CSIVolumeName is the default volume name for CSI volumes in tests + CSIVolumeName = "csi-secrets-store" + + // CSIMountPath is the default mount path for CSI volumes in tests + CSIMountPath = "/mnt/secrets-store" + + // CSIRotationPollInterval is how often CSI driver checks for secret changes + CSIRotationPollInterval = 2 * time.Second +) + +// NewCSIClient creates a new CSI client using the default kubeconfig. +func NewCSIClient() (csiclient.Interface, error) { + kubeconfig := GetKubeconfig() + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("building config from kubeconfig: %w", err) + } + return NewCSIClientFromConfig(config) +} + +// NewCSIClientFromConfig creates a new CSI client from a rest.Config. +func NewCSIClientFromConfig(config *rest.Config) (csiclient.Interface, error) { + client, err := csiclient.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating CSI client: %w", err) + } + return client, nil +} + +// IsCSIDriverInstalled checks if the CSI secrets store driver CRDs are available in the cluster. +// This checks for the SecretProviderClass CRD which is required for CSI tests. +func IsCSIDriverInstalled(ctx context.Context, client csiclient.Interface) bool { + if client == nil { + return false + } + + // Try to list SecretProviderClasses - if CRD doesn't exist, this will fail + _, err := client.SecretsstoreV1().SecretProviderClasses("default").List(ctx, metav1.ListOptions{Limit: 1}) + return err == nil +} + +// IsVaultProviderInstalled checks if Vault CSI provider is installed by checking for the vault-csi-provider DaemonSet. +// This is used to determine if CSI tests with actual volume mounting can run. +func IsVaultProviderInstalled(ctx context.Context, kubeClient kubernetes.Interface) bool { + if kubeClient == nil { + return false + } + + // Check if vault-csi-provider DaemonSet exists in vault namespace + _, err := kubeClient.AppsV1().DaemonSets("vault").Get(ctx, "vault-csi-provider", metav1.GetOptions{}) + return err == nil +} + +// CreateSecretProviderClass creates a SecretProviderClass in the given namespace. +// If params is nil, it creates a Vault-compatible SecretProviderClass with default test settings. +func CreateSecretProviderClass(ctx context.Context, client csiclient.Interface, namespace, name string, params map[string]string) ( + *csiv1.SecretProviderClass, error, +) { + if params == nil { + params = map[string]string{ + "vaultAddress": VaultAddress, + "roleName": VaultRole, + "objects": `- objectName: "test-secret" + secretPath: "secret/data/test" + secretKey: "username"`, + } + } + + spc := &csiv1.SecretProviderClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: csiv1.SecretProviderClassSpec{ + Provider: DefaultCSIProvider, + Parameters: params, + }, + } + + created, err := client.SecretsstoreV1().SecretProviderClasses(namespace).Create(ctx, spc, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("creating SecretProviderClass %s/%s: %w", namespace, name, err) + } + return created, nil +} + +// CreateSecretProviderClassWithSecret creates a SecretProviderClass that fetches a specific secret from Vault. +// secretPath should be like "secret/mysecret" (the function converts it to KV v2 format "secret/data/mysecret"). +// secretKey is the key within that secret to fetch. +func CreateSecretProviderClassWithSecret(ctx context.Context, client csiclient.Interface, namespace, name, secretPath, secretKey string) ( + *csiv1.SecretProviderClass, error, +) { + kvV2Path := secretPath + if strings.HasPrefix(secretPath, "secret/") && !strings.HasPrefix(secretPath, "secret/data/") { + kvV2Path = strings.Replace(secretPath, "secret/", "secret/data/", 1) + } + + params := map[string]string{ + "vaultAddress": VaultAddress, + "roleName": VaultRole, + "objects": fmt.Sprintf( + `- objectName: "%s" + secretPath: "%s" + secretKey: "%s"`, secretKey, kvV2Path, secretKey, + ), + } + return CreateSecretProviderClass(ctx, client, namespace, name, params) +} + +// DeleteSecretProviderClass deletes a SecretProviderClass by name. +func DeleteSecretProviderClass(ctx context.Context, client csiclient.Interface, namespace, name string) error { + err := client.SecretsstoreV1().SecretProviderClasses(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("deleting SecretProviderClass %s/%s: %w", namespace, name, err) + } + return nil +} + +// UpdateSecretProviderClassPodStatusLabels updates only the labels on a SecretProviderClassPodStatus. +// This should NOT trigger a reload (used for negative testing to verify Reloader ignores label-only changes). +func UpdateSecretProviderClassPodStatusLabels(ctx context.Context, client csiclient.Interface, namespace, name string, labels map[string]string) error { + spcps, err := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("getting SecretProviderClassPodStatus %s/%s: %w", namespace, name, err) + } + + if spcps.Labels == nil { + spcps.Labels = make(map[string]string) + } + for k, v := range labels { + spcps.Labels[k] = v + } + + _, err = client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Update(ctx, spcps, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("updating SecretProviderClassPodStatus labels %s/%s: %w", namespace, name, err) + } + return nil +} + +// ============================================================================= +// Vault Integration Helpers +// ============================================================================= + +// CreateVaultSecret creates a new secret in Vault. +// secretPath should be like "secret/test" (without "data" prefix - it's added automatically). +// data is a map of key-value pairs to store in the secret. +func CreateVaultSecret(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, secretPath string, data map[string]string) error { + return UpdateVaultSecret(ctx, kubeClient, restConfig, secretPath, data) +} + +// UpdateVaultSecret updates a secret in Vault. This triggers the CSI driver to +// sync the new secret version, which creates/updates the SecretProviderClassPodStatus. +// secretPath should be like "secret/test" (without "data" prefix - it's added automatically). +// data is a map of key-value pairs to store in the secret. +func UpdateVaultSecret(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, secretPath string, data map[string]string) error { + args := []string{"kv", "put", secretPath} + for k, v := range data { + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + + if err := execInVaultPod(ctx, kubeClient, restConfig, args); err != nil { + return fmt.Errorf("updating Vault secret %s: %w", secretPath, err) + } + return nil +} + +// DeleteVaultSecret deletes a secret from Vault. +// secretPath should be like "secret/test". +func DeleteVaultSecret(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, secretPath string) error { + args := []string{"kv", "metadata", "delete", secretPath} + if err := execInVaultPod(ctx, kubeClient, restConfig, args); err != nil { + if strings.Contains(err.Error(), "No value found") { + return nil + } + return fmt.Errorf("deleting Vault secret %s: %w", secretPath, err) + } + return nil +} + +// execInVaultPod executes a vault command in the Vault pod. +func execInVaultPod(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, args []string) error { + req := kubeClient.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(VaultPodName). + Namespace(VaultNamespace). + SubResource("exec"). + VersionedParams( + &corev1.PodExecOptions{ + Container: "vault", + Command: append([]string{"vault"}, args...), + Stdout: true, + Stderr: true, + }, scheme.ParameterCodec, + ) + + exec, err := remotecommand.NewSPDYExecutor(restConfig, "POST", req.URL()) + if err != nil { + return fmt.Errorf("creating executor: %w", err) + } + + var stdout, stderr bytes.Buffer + err = exec.StreamWithContext( + ctx, remotecommand.StreamOptions{ + Stdout: &stdout, + Stderr: &stderr, + }, + ) + if err != nil { + return fmt.Errorf("executing command: %w (stderr: %s)", err, stderr.String()) + } + + return nil +} + +// WaitForSPCPSVersionChange waits for the SecretProviderClassPodStatus version to change +// from the initial version using watches. This is used after updating a Vault secret to +// wait for CSI driver to sync the new version. +func WaitForSPCPSVersionChange(ctx context.Context, client csiclient.Interface, namespace, spcpsName, initialVersion string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Watch(ctx, opts) + } + + _, err := WatchUntil(ctx, watchFunc, spcpsName, SPCPSVersionChanged(initialVersion), timeout) + if errors.Is(err, ErrWatchTimeout) { + return fmt.Errorf("timeout waiting for SecretProviderClassPodStatus %s/%s version to change from %s", namespace, spcpsName, initialVersion) + } + return err +} + +// FindSPCPSForDeployment finds the SecretProviderClassPodStatus created by CSI driver +// for pods of a given deployment using watches. Returns the first matching SPCPS name. +func FindSPCPSForDeployment(ctx context.Context, csiClient csiclient.Interface, kubeClient kubernetes.Interface, namespace, deploymentName string, timeout time.Duration) ( + string, error, +) { + pods, err := kubeClient.CoreV1().Pods(namespace).List( + ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", deploymentName), + }, + ) + if err != nil { + return "", fmt.Errorf("listing pods for deployment %s: %w", deploymentName, err) + } + + podNames := make(map[string]bool) + for _, pod := range pods.Items { + podNames[pod.Name] = true + } + + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return csiClient.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Watch(ctx, opts) + } + + spcps, err := WatchUntil(ctx, watchFunc, "", SPCPSForPods(podNames), timeout) + if errors.Is(err, ErrWatchTimeout) { + return "", fmt.Errorf("timeout finding SecretProviderClassPodStatus for deployment %s/%s", namespace, deploymentName) + } + if err != nil { + return "", err + } + return spcps.Name, nil +} + +// FindSPCPSForSPC finds the SecretProviderClassPodStatus created by CSI driver +// that references a specific SecretProviderClass using watches. Returns the first matching SPCPS name. +func FindSPCPSForSPC(ctx context.Context, csiClient csiclient.Interface, namespace, spcName string, timeout time.Duration) (string, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return csiClient.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Watch(ctx, opts) + } + + spcps, err := WatchUntil(ctx, watchFunc, "", SPCPSForSPC(spcName), timeout) + if errors.Is(err, ErrWatchTimeout) { + return "", fmt.Errorf("timeout finding SecretProviderClassPodStatus for SPC %s/%s", namespace, spcName) + } + if err != nil { + return "", err + } + return spcps.Name, nil +} + +// GetSPCPSVersion gets the current version string from a SecretProviderClassPodStatus. +// Returns the version of the first object, or empty string if not found. +func GetSPCPSVersion(ctx context.Context, client csiclient.Interface, namespace, name string) (string, error) { + spcps, err := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("getting SecretProviderClassPodStatus %s/%s: %w", namespace, name, err) + } + if len(spcps.Status.Objects) == 0 { + return "", nil + } + var versions []string + for _, obj := range spcps.Status.Objects { + versions = append(versions, obj.Version) + } + return strings.Join(versions, ","), nil +} diff --git a/test/e2e/utils/helm.go b/test/e2e/utils/helm.go new file mode 100644 index 000000000..a2ba2c9a9 --- /dev/null +++ b/test/e2e/utils/helm.go @@ -0,0 +1,212 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +// Helm-related constants. +const ( + // DefaultTestImage is the default image to test if RELOADER_IMAGE is not set. + DefaultTestImage = "ghcr.io/stakater/reloader:test" + + // DefaultHelmReleaseName is the Helm release name for Reloader. + DefaultHelmReleaseName = "reloader" + + // DefaultHelmChartPath is the path to the Helm chart relative to project root. + DefaultHelmChartPath = "deployments/kubernetes/chart/reloader" + + // StakaterEnvVarPrefix is the prefix for Stakater environment variables. + StakaterEnvVarPrefix = "STAKATER_" +) + +// DeployOptions configures how Reloader is deployed. +type DeployOptions struct { + // Namespace to deploy Reloader into. + Namespace string + + // Image is the full image reference (e.g., "ghcr.io/stakater/reloader:test"). + Image string + + // Values are additional Helm values to set (key=value pairs). + Values map[string]string + + // ReleaseName is the Helm release name. Defaults to DefaultHelmReleaseName. + ReleaseName string + + // Timeout for Helm operations. Defaults to "120s". + Timeout string +} + +// DeployReloader deploys Reloader using Helm with the specified options. +func DeployReloader(opts DeployOptions) error { + projectDir, err := GetProjectDir() + if err != nil { + return fmt.Errorf("getting project dir: %w", err) + } + + if opts.ReleaseName == "" { + opts.ReleaseName = DefaultHelmReleaseName + } + if opts.Timeout == "" { + opts.Timeout = "120s" + } + if opts.Image == "" { + opts.Image = GetTestImage() + } + + cleanupClusterResources(opts.ReleaseName) + + chartPath := filepath.Join(projectDir, DefaultHelmChartPath) + + args := []string{ + "upgrade", "--install", opts.ReleaseName, + chartPath, + "--namespace", opts.Namespace, + "--create-namespace", + "--reset-values", + "--set", fmt.Sprintf("image.repository=%s", GetImageRepository(opts.Image)), + "--set", fmt.Sprintf("image.tag=%s", GetImageTag(opts.Image)), + "--set", "image.pullPolicy=IfNotPresent", + "--wait", + "--timeout", opts.Timeout, + } + + for key, value := range opts.Values { + args = append(args, "--set", fmt.Sprintf("%s=%s", key, value)) + } + + cmd := exec.Command("helm", args...) + output, err := Run(cmd) + if err != nil { + return fmt.Errorf("helm install failed: %s: %w", output, err) + } + + return nil +} + +// UndeployReloader removes the Reloader Helm release and cleans up cluster-scoped resources. +// This function waits for all resources to be fully deleted to prevent race conditions +// between test suites. +func UndeployReloader(namespace, releaseName string) error { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + + cmd := exec.Command("helm", "uninstall", releaseName, "--namespace", namespace, "--ignore-not-found", "--wait") + output, err := Run(cmd) + if err != nil { + return fmt.Errorf("helm uninstall failed: %s: %w", output, err) + } + + clusterResources := []struct { + kind string + name string + }{ + {"clusterrole", releaseName + "-reloader-role"}, + {"clusterrolebinding", releaseName + "-reloader-role-binding"}, + } + + for _, res := range clusterResources { + cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true") + _, _ = Run(cmd) + } + + waitForReloaderGone(namespace, releaseName) + + return nil +} + +// waitForReloaderGone waits for the Reloader deployment to be fully removed. +func waitForReloaderGone(namespace, releaseName string) { + deploymentName := ReloaderDeploymentName(releaseName) + + for i := 0; i < 30; i++ { + cmd := exec.Command("kubectl", "get", "deployment", deploymentName, "-n", namespace, "--ignore-not-found", "-o", "name") + output, _ := Run(cmd) + if strings.TrimSpace(output) == "" { + return + } + time.Sleep(1 * time.Second) + } +} + +// cleanupClusterResources removes cluster-scoped resources that might be left over +// from a previous test run. This is called before deploying to ensure clean state. +func cleanupClusterResources(releaseName string) { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + + clusterResources := []struct { + kind string + name string + }{ + {"clusterrole", releaseName + "-reloader-role"}, + {"clusterrolebinding", releaseName + "-reloader-role-binding"}, + } + + for _, res := range clusterResources { + cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true") + _, _ = Run(cmd) + } + + time.Sleep(500 * time.Millisecond) +} + +// GetTestImage returns the test image from environment or the default. +func GetTestImage() string { + if img := os.Getenv("RELOADER_IMAGE"); img != "" { + return img + } + return DefaultTestImage +} + +// GetImageRepository extracts the repository (without tag) from a full image reference. +// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "ghcr.io/stakater/reloader" +func GetImageRepository(image string) string { + for i := len(image) - 1; i >= 0; i-- { + if image[i] == ':' { + return image[:i] + } + if image[i] == '/' { + break + } + } + return image +} + +// GetImageTag extracts the tag from a full image reference. +// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "v1.0.0" +// Returns "latest" if no tag is found. +func GetImageTag(image string) string { + for i := len(image) - 1; i >= 0; i-- { + if image[i] == ':' { + return image[i+1:] + } + if image[i] == '/' { + break + } + } + return "latest" +} + +// ReloaderDeploymentName returns the full deployment name for Reloader. +func ReloaderDeploymentName(releaseName string) string { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + return releaseName + "-reloader" +} + +// ReloaderPodSelector returns the label selector for Reloader pods. +func ReloaderPodSelector(releaseName string) string { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + return "app=" + releaseName + "-reloader" +} diff --git a/test/e2e/utils/helm_test.go b/test/e2e/utils/helm_test.go new file mode 100644 index 000000000..63a3e3fad --- /dev/null +++ b/test/e2e/utils/helm_test.go @@ -0,0 +1,157 @@ +package utils + +import ( + "testing" +) + +func TestGetImageRepository(t *testing.T) { + tests := []struct { + name string + image string + expected string + }{ + { + name: "full image with tag", + image: "ghcr.io/stakater/reloader:v1.0.0", + expected: "ghcr.io/stakater/reloader", + }, + { + name: "image with latest tag", + image: "nginx:latest", + expected: "nginx", + }, + { + name: "image without tag", + image: "ghcr.io/stakater/reloader", + expected: "ghcr.io/stakater/reloader", + }, + { + name: "image with digest (not fully supported)", + image: "nginx@sha256:abc123", + expected: "nginx@sha256", + }, + { + name: "simple image name", + image: "nginx", + expected: "nginx", + }, + { + name: "image with port in registry", + image: "localhost:5000/myimage:v1", + expected: "localhost:5000/myimage", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetImageRepository(tt.image) + if result != tt.expected { + t.Errorf("GetImageRepository(%q) = %q, want %q", tt.image, result, tt.expected) + } + }) + } +} + +func TestGetImageTag(t *testing.T) { + tests := []struct { + name string + image string + expected string + }{ + { + name: "full image with tag", + image: "ghcr.io/stakater/reloader:v1.0.0", + expected: "v1.0.0", + }, + { + name: "image with latest tag", + image: "nginx:latest", + expected: "latest", + }, + { + name: "image without tag", + image: "ghcr.io/stakater/reloader", + expected: "latest", + }, + { + name: "simple image name", + image: "nginx", + expected: "latest", + }, + { + name: "image with port in registry", + image: "localhost:5000/myimage:v1", + expected: "v1", + }, + { + name: "tag with sha", + image: "myimage:sha-abc123", + expected: "sha-abc123", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetImageTag(tt.image) + if result != tt.expected { + t.Errorf("GetImageTag(%q) = %q, want %q", tt.image, result, tt.expected) + } + }) + } +} + +func TestReloaderDeploymentName(t *testing.T) { + tests := []struct { + name string + releaseName string + expected string + }{ + { + name: "default release name", + releaseName: "", + expected: "reloader-reloader", + }, + { + name: "custom release name", + releaseName: "my-reloader", + expected: "my-reloader-reloader", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ReloaderDeploymentName(tt.releaseName) + if result != tt.expected { + t.Errorf("ReloaderDeploymentName(%q) = %q, want %q", tt.releaseName, result, tt.expected) + } + }) + } +} + +func TestReloaderPodSelector(t *testing.T) { + tests := []struct { + name string + releaseName string + expected string + }{ + { + name: "default release name", + releaseName: "", + expected: "app=reloader-reloader", + }, + { + name: "custom release name", + releaseName: "my-reloader", + expected: "app=my-reloader-reloader", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ReloaderPodSelector(tt.releaseName) + if result != tt.expected { + t.Errorf("ReloaderPodSelector(%q) = %q, want %q", tt.releaseName, result, tt.expected) + } + }) + } +} diff --git a/test/e2e/utils/openshift.go b/test/e2e/utils/openshift.go new file mode 100644 index 000000000..b2ec1d91e --- /dev/null +++ b/test/e2e/utils/openshift.go @@ -0,0 +1,23 @@ +package utils + +import ( + "k8s.io/client-go/discovery" +) + +// HasDeploymentConfigSupport checks if the cluster has OpenShift DeploymentConfig API available. +func HasDeploymentConfigSupport(discoveryClient discovery.DiscoveryInterface) bool { + _, apiLists, err := discoveryClient.ServerGroupsAndResources() + if err != nil { + return false + } + + for _, apiList := range apiLists { + for _, resource := range apiList.APIResources { + if resource.Kind == "DeploymentConfig" { + return true + } + } + } + + return false +} diff --git a/test/e2e/utils/podspec.go b/test/e2e/utils/podspec.go new file mode 100644 index 000000000..d8a6dd513 --- /dev/null +++ b/test/e2e/utils/podspec.go @@ -0,0 +1,267 @@ +package utils + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// AddEnvFromSource adds ConfigMap or Secret envFrom to a container. +func AddEnvFromSource(spec *corev1.PodSpec, containerIdx int, name string, isSecret bool) { + if containerIdx >= len(spec.Containers) { + return + } + source := corev1.EnvFromSource{} + if isSecret { + source.SecretRef = &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + } + } else { + source.ConfigMapRef = &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + } + } + spec.Containers[containerIdx].EnvFrom = append(spec.Containers[containerIdx].EnvFrom, source) +} + +// AddVolume adds a volume and mount to a container. +func AddVolume(spec *corev1.PodSpec, containerIdx int, volume corev1.Volume, mountPath string) { + spec.Volumes = append(spec.Volumes, volume) + if containerIdx < len(spec.Containers) { + spec.Containers[containerIdx].VolumeMounts = append( + spec.Containers[containerIdx].VolumeMounts, + corev1.VolumeMount{Name: volume.Name, MountPath: mountPath}, + ) + } +} + +// AddConfigMapVolume adds ConfigMap volume and mount. +func AddConfigMapVolume(spec *corev1.PodSpec, containerIdx int, name string) { + AddVolume(spec, containerIdx, corev1.Volume{ + Name: "cm-" + name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }, "/etc/config/"+name) +} + +// AddSecretVolume adds Secret volume and mount. +func AddSecretVolume(spec *corev1.PodSpec, containerIdx int, name string) { + AddVolume(spec, containerIdx, corev1.Volume{ + Name: "secret-" + name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: name}, + }, + }, "/etc/secrets/"+name) +} + +// AddProjectedVolume adds projected volume with ConfigMap and/or Secret. +func AddProjectedVolume(spec *corev1.PodSpec, containerIdx int, cmName, secretName string) { + sources := []corev1.VolumeProjection{} + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + AddVolume(spec, containerIdx, corev1.Volume{ + Name: "projected-config", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: sources}, + }, + }, "/etc/projected") +} + +// AddKeyRef adds env var from ConfigMap or Secret key. +func AddKeyRef(spec *corev1.PodSpec, containerIdx int, resourceName, key, envVarName string, isSecret bool) { + if containerIdx >= len(spec.Containers) { + return + } + envVar := corev1.EnvVar{Name: envVarName} + if isSecret { + envVar.ValueFrom = &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: resourceName}, + Key: key, + }, + } + } else { + envVar.ValueFrom = &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: resourceName}, + Key: key, + }, + } + } + spec.Containers[containerIdx].Env = append(spec.Containers[containerIdx].Env, envVar) +} + +// AddCSIVolume adds CSI volume referencing SecretProviderClass. +func AddCSIVolume(spec *corev1.PodSpec, containerIdx int, spcName string) { + volumeName := "csi-" + spcName + mountPath := "/mnt/secrets-store/" + spcName + spec.Volumes = append(spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: CSIDriverName, + ReadOnly: ptr.To(true), + VolumeAttributes: map[string]string{ + "secretProviderClass": spcName, + }, + }, + }, + }) + if containerIdx < len(spec.Containers) { + spec.Containers[containerIdx].VolumeMounts = append( + spec.Containers[containerIdx].VolumeMounts, + corev1.VolumeMount{Name: volumeName, MountPath: mountPath, ReadOnly: true}, + ) + } +} + +// AddInitContainer adds init container with optional envFrom references. +func AddInitContainer(spec *corev1.PodSpec, cmName, secretName string) { + init := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + if cmName != "" { + init.EnvFrom = append(init.EnvFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + init.EnvFrom = append(init.EnvFrom, corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + spec.InitContainers = append(spec.InitContainers, init) +} + +// AddInitContainerWithVolumes adds init container with volume mounts. +func AddInitContainerWithVolumes(spec *corev1.PodSpec, cmName, secretName string) { + init := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + if cmName != "" { + volumeName := "init-cm-" + cmName + spec.Volumes = append(spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }, + }) + init.VolumeMounts = append(init.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/init-config/" + cmName, + }) + } + if secretName != "" { + volumeName := "init-secret-" + secretName + spec.Volumes = append(spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: secretName}, + }, + }) + init.VolumeMounts = append(init.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/init-secrets/" + secretName, + }) + } + spec.InitContainers = append(spec.InitContainers, init) +} + +// ApplyWorkloadConfig applies all WorkloadConfig settings to a PodTemplateSpec. +// This includes both pod template annotations and pod spec configuration. +func ApplyWorkloadConfig(template *corev1.PodTemplateSpec, cfg WorkloadConfig) { + if len(cfg.PodTemplateAnnotations) > 0 { + if template.Annotations == nil { + template.Annotations = make(map[string]string) + } + for k, v := range cfg.PodTemplateAnnotations { + template.Annotations[k] = v + } + } + + spec := &template.Spec + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + AddEnvFromSource(spec, 0, cfg.ConfigMapName, false) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + AddEnvFromSource(spec, 0, cfg.SecretName, true) + } + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + AddConfigMapVolume(spec, 0, cfg.ConfigMapName) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + AddSecretVolume(spec, 0, cfg.SecretName) + } + if cfg.UseProjectedVolume { + AddProjectedVolume(spec, 0, cfg.ConfigMapName, cfg.SecretName) + } + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + AddKeyRef(spec, 0, cfg.ConfigMapName, key, envVar, false) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + AddKeyRef(spec, 0, cfg.SecretName, key, envVar, true) + } + if cfg.UseCSIVolume && cfg.SPCName != "" { + AddCSIVolume(spec, 0, cfg.SPCName) + } + if cfg.UseInitContainer { + AddInitContainer(spec, cfg.ConfigMapName, cfg.SecretName) + } + if cfg.UseInitContainerVolume { + AddInitContainerWithVolumes(spec, cfg.ConfigMapName, cfg.SecretName) + } + if cfg.UseInitContainerCSI && cfg.SPCName != "" { + AddCSIVolume(spec, 0, cfg.SPCName) + } + if cfg.MultipleContainers > 1 { + for i := 1; i < cfg.MultipleContainers; i++ { + spec.Containers = append(spec.Containers, corev1.Container{ + Name: fmt.Sprintf("container-%d", i), + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }) + } + } +} diff --git a/test/e2e/utils/rand.go b/test/e2e/utils/rand.go new file mode 100644 index 000000000..601b14ab3 --- /dev/null +++ b/test/e2e/utils/rand.go @@ -0,0 +1,26 @@ +package utils + +import ( + "math/rand" + "time" +) + +const letters = "abcdefghijklmnopqrstuvwxyz" + +var randSource = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec + +// RandSeq generates a random lowercase string of length n. +// This is useful for creating unique resource names in tests. +func RandSeq(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letters[randSource.Intn(len(letters))] + } + return string(b) +} + +// RandName generates a unique name with the given prefix. +// Format: prefix-xxxxx where x is a random lowercase letter. +func RandName(prefix string) string { + return prefix + "-" + RandSeq(5) +} diff --git a/test/e2e/utils/rand_test.go b/test/e2e/utils/rand_test.go new file mode 100644 index 000000000..6dea55399 --- /dev/null +++ b/test/e2e/utils/rand_test.go @@ -0,0 +1,122 @@ +package utils + +import ( + "regexp" + "testing" +) + +func TestRandSeq(t *testing.T) { + tests := []struct { + name string + length int + }{ + {"length 0", 0}, + {"length 1", 1}, + {"length 5", 5}, + {"length 10", 10}, + {"length 100", 100}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := RandSeq(tt.length) + + if len(result) != tt.length { + t.Errorf("RandSeq(%d) returned string of length %d, want %d", + tt.length, len(result), tt.length) + } + + if tt.length > 0 { + matched, _ := regexp.MatchString("^[a-z]+$", result) + if !matched { + t.Errorf("RandSeq(%d) = %q, contains non-lowercase letters", tt.length, result) + } + } + }) + } +} + +func TestRandSeqRandomness(t *testing.T) { + const iterations = 10 + const length = 20 + + seen := make(map[string]bool) + for i := 0; i < iterations; i++ { + s := RandSeq(length) + if seen[s] { + t.Errorf("RandSeq generated duplicate: %q", s) + } + seen[s] = true + } + + if len(seen) != iterations { + t.Errorf("Expected %d unique strings, got %d", iterations, len(seen)) + } +} + +func TestRandName(t *testing.T) { + tests := []struct { + name string + prefix string + }{ + {"deploy prefix", "deploy"}, + {"cm prefix", "cm"}, + {"secret prefix", "secret"}, + {"test-app prefix", "test-app"}, + {"empty prefix", ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := RandName(tt.prefix) + + expectedPrefix := tt.prefix + "-" + if len(result) <= len(expectedPrefix) { + t.Errorf("RandName(%q) = %q, too short", tt.prefix, result) + return + } + + if result[:len(expectedPrefix)] != expectedPrefix { + t.Errorf("RandName(%q) = %q, doesn't start with %q", + tt.prefix, result, expectedPrefix) + } + + suffix := result[len(expectedPrefix):] + if len(suffix) != 5 { + t.Errorf("RandName(%q) suffix length = %d, want 5", tt.prefix, len(suffix)) + } + + matched, _ := regexp.MatchString("^[a-z]{5}$", suffix) + if !matched { + t.Errorf("RandName(%q) suffix = %q, should be 5 lowercase letters", + tt.prefix, suffix) + } + }) + } +} + +func TestRandNameUniqueness(t *testing.T) { + const prefix = "test" + const iterations = 100 + + seen := make(map[string]bool) + for i := 0; i < iterations; i++ { + name := RandName(prefix) + if seen[name] { + t.Errorf("RandName generated duplicate: %q", name) + } + seen[name] = true + } +} + +func TestRandNameKubernetesCompatibility(t *testing.T) { + prefixes := []string{"deploy", "cm", "secret", "test-app", "my-resource"} + k8sNamePattern := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) + + for _, prefix := range prefixes { + name := RandName(prefix) + if !k8sNamePattern.MatchString(name) { + t.Errorf("RandName(%q) = %q is not a valid Kubernetes name", prefix, name) + } + } +} diff --git a/test/e2e/utils/resources.go b/test/e2e/utils/resources.go new file mode 100644 index 000000000..47ca2b03f --- /dev/null +++ b/test/e2e/utils/resources.go @@ -0,0 +1,1014 @@ +package utils + +import ( + "context" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" +) + +const ( + // DefaultImage is the default container image used for test workloads. + DefaultImage = "busybox:1.36" + // DefaultCommand is the default command for test containers. + DefaultCommand = "sleep 3600" +) + +// CreateNamespace creates a namespace with the given name. +func CreateNamespace(ctx context.Context, client kubernetes.Interface, name string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + return err +} + +// CreateNamespaceWithLabels creates a namespace with the given name and labels. +func CreateNamespaceWithLabels(ctx context.Context, client kubernetes.Interface, name string, labels map[string]string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } + _, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + return err +} + +// DeleteNamespace deletes the namespace with the given name. +func DeleteNamespace(ctx context.Context, client kubernetes.Interface, name string) error { + return client.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{}) +} + +// CreateConfigMap creates a ConfigMap with the given name, data, and optional annotations. +func CreateConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, annotations map[string]string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) +} + +// CreateConfigMapWithLabels creates a ConfigMap with the given name, data, labels, and optional annotations. +func CreateConfigMapWithLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, labels, annotations map[string]string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) +} + +// CreateSecret creates a Secret with the given name, data, and optional annotations. +func CreateSecret(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string][]byte, annotations map[string]string) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) +} + +// UpdateConfigMap updates a ConfigMap's data. +func UpdateConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string) error { + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + cm.Data = data + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// UpdateConfigMapLabels updates a ConfigMap's labels. +func UpdateConfigMapLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, labels map[string]string) error { + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + for k, v := range labels { + cm.Labels[k] = v + } + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// UpdateSecret updates a Secret's data. +func UpdateSecret(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string][]byte) error { + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + secret.Data = data + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// UpdateSecretLabels updates a Secret's labels. +func UpdateSecretLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, labels map[string]string) error { + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if secret.Labels == nil { + secret.Labels = make(map[string]string) + } + for k, v := range labels { + secret.Labels[k] = v + } + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// stringToByteMap converts a string map to a byte map for Secret data. +func stringToByteMap(data map[string]string) map[string][]byte { + result := make(map[string][]byte) + for k, v := range data { + result[k] = []byte(v) + } + return result +} + +// CreateSecretFromStrings creates a Secret with string data (convenience wrapper). +func CreateSecretFromStrings(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, annotations map[string]string) (*corev1.Secret, error) { + return CreateSecret(ctx, client, namespace, name, stringToByteMap(data), annotations) +} + +// UpdateSecretFromStrings updates a Secret's data using string values. +func UpdateSecretFromStrings(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string) error { + return UpdateSecret(ctx, client, namespace, name, stringToByteMap(data)) +} + +// DeleteConfigMap deletes a ConfigMap. +func DeleteConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().ConfigMaps(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DeleteSecret deletes a Secret. +func DeleteSecret(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().Secrets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DeploymentOption is a functional option for configuring a Deployment. +type DeploymentOption func(*appsv1.Deployment) + +// CreateDeployment creates a Deployment with the given options. +func CreateDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...DeploymentOption) (*appsv1.Deployment, error) { + deploy := baseDeploymentResource(namespace, name) + for _, opt := range opts { + opt(deploy) + } + return client.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{}) +} + +// WithAnnotations adds annotations to the Deployment metadata. +func WithAnnotations(annotations map[string]string) DeploymentOption { + return func(d *appsv1.Deployment) { + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + for k, v := range annotations { + d.Annotations[k] = v + } + } +} + +// WithConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithConfigMapEnvFrom(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithSecretEnvFrom adds an envFrom reference to a Secret. +func WithSecretEnvFrom(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithConfigMapVolume adds a volume mount for a ConfigMap. +func WithConfigMapVolume(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := fmt.Sprintf("cm-%s", name) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/config/%s", name), + }, + ) + } +} + +// WithSecretVolume adds a volume mount for a Secret. +func WithSecretVolume(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := fmt.Sprintf("secret-%s", name) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/secrets/%s", name), + }, + ) + } +} + +// WithProjectedVolume adds a projected volume with ConfigMap and/or Secret sources. +func WithProjectedVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} + +// WithInitContainer adds an init container that references ConfigMap and/or Secret. +func WithInitContainer(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithMultipleContainers adds additional containers to the pod. +func WithMultipleContainers(count int) DeploymentOption { + return func(d *appsv1.Deployment) { + for i := 1; i < count; i++ { + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, corev1.Container{ + Name: fmt.Sprintf("container-%d", i), + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }) + } + } +} + +// WithMultipleContainersAndEnv creates two containers, each with a different ConfigMap envFrom. +func WithMultipleContainersAndEnv(cm1Name, cm2Name string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].EnvFrom = append(d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cm1Name}, + }, + }) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, corev1.Container{ + Name: "container-1", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cm2Name}, + }, + }, + }, + }) + } +} + +// WithReplicas sets the number of replicas. +func WithReplicas(replicas int32) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.To(replicas) + } +} + +// WithConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to the container. +func WithConfigMapKeyRef(cmName, key, envVarName string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = append( + d.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithSecretKeyRef adds a valueFrom.secretKeyRef env var to the container. +func WithSecretKeyRef(secretName, key, envVarName string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = append( + d.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithPodTemplateAnnotations adds annotations to the pod template metadata (not deployment metadata). +func WithPodTemplateAnnotations(annotations map[string]string) DeploymentOption { + return func(d *appsv1.Deployment) { + if d.Spec.Template.Annotations == nil { + d.Spec.Template.Annotations = make(map[string]string) + } + for k, v := range annotations { + d.Spec.Template.Annotations[k] = v + } + } +} + +// WithInitContainerVolume adds an init container with ConfigMap/Secret volume mounts. +func WithInitContainerVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithInitContainerProjectedVolume adds an init container with projected volume. +func WithInitContainerProjectedVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := "init-projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/etc/init-projected", + }, + }, + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithCSIVolume adds a CSI volume referencing a SecretProviderClass to a Deployment. +func WithCSIVolume(spcName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := csiVolumeName(spcName) + mountPath := csiMountPath(spcName) + + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: CSIDriverName, + ReadOnly: ptr.To(true), + VolumeAttributes: map[string]string{ + "secretProviderClass": spcName, + }, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + ReadOnly: true, + }, + ) + } +} + +// WithInitContainerCSIVolume adds an init container with a CSI volume mount. +func WithInitContainerCSIVolume(spcName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := csiVolumeName(spcName) + mountPath := csiMountPath(spcName) + + hasCSIVolume := false + for _, v := range d.Spec.Template.Spec.Volumes { + if v.Name == volumeName { + hasCSIVolume = true + break + } + } + if !hasCSIVolume { + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: CSIDriverName, + ReadOnly: ptr.To(true), + VolumeAttributes: map[string]string{ + "secretProviderClass": spcName, + }, + }, + }, + }) + } + + initContainer := corev1.Container{ + Name: fmt.Sprintf("init-csi-%s", spcName), + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: mountPath, + ReadOnly: true, + }, + }, + } + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +func baseDeploymentResource(namespace, name string) *appsv1.Deployment { + labels := map[string]string{"app": name} + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteDeployment deletes a Deployment. +func DeleteDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().Deployments(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DaemonSetOption is a functional option for configuring a DaemonSet. +type DaemonSetOption func(*appsv1.DaemonSet) + +// CreateDaemonSet creates a DaemonSet with the given options. +func CreateDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...DaemonSetOption) (*appsv1.DaemonSet, error) { + ds := baseDaemonSetResource(namespace, name) + for _, opt := range opts { + opt(ds) + } + return client.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{}) +} + +// baseDaemonSetResource creates a base DaemonSet template. +func baseDaemonSetResource(namespace, name string) *appsv1.DaemonSet { + labels := map[string]string{"app": name} + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteDaemonSet deletes a DaemonSet. +func DeleteDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().DaemonSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// StatefulSetOption is a functional option for configuring a StatefulSet. +type StatefulSetOption func(*appsv1.StatefulSet) + +// CreateStatefulSet creates a StatefulSet with the given options. +func CreateStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...StatefulSetOption) (*appsv1.StatefulSet, error) { + ss := baseStatefulSetResource(namespace, name) + for _, opt := range opts { + opt(ss) + } + return client.AppsV1().StatefulSets(namespace).Create(ctx, ss, metav1.CreateOptions{}) +} + +// baseStatefulSetResource creates a base StatefulSet template. +func baseStatefulSetResource(namespace, name string) *appsv1.StatefulSet { + labels := map[string]string{"app": name} + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: name, + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteStatefulSet deletes a StatefulSet. +func DeleteStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().StatefulSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// CronJobOption is a functional option for configuring a CronJob. +type CronJobOption func(*batchv1.CronJob) + +// CreateCronJob creates a CronJob with the given options. +func CreateCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...CronJobOption) (*batchv1.CronJob, error) { + cj := baseCronJobResource(namespace, name) + for _, opt := range opts { + opt(cj) + } + return client.BatchV1().CronJobs(namespace).Create(ctx, cj, metav1.CreateOptions{}) +} + +// WithCronJobAnnotations adds annotations to the CronJob metadata. +func WithCronJobAnnotations(annotations map[string]string) CronJobOption { + return func(cj *batchv1.CronJob) { + if cj.Annotations == nil { + cj.Annotations = make(map[string]string) + } + for k, v := range annotations { + cj.Annotations[k] = v + } + } +} + +// WithCronJobConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithCronJobConfigMapEnvFrom(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithCronJobSecretEnvFrom adds an envFrom reference to a Secret. +func WithCronJobSecretEnvFrom(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// baseCronJobResource creates a base CronJob template. +func baseCronJobResource(namespace, name string) *batchv1.CronJob { + labels := map[string]string{"app": name} + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "* * * * *", + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + Containers: []corev1.Container{ + { + Name: "job", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo done"}, + }, + }, + }, + }, + }, + }, + }, + } +} + +// DeleteCronJob deletes a CronJob. +func DeleteCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.BatchV1().CronJobs(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// JobOption is a functional option for configuring a Job. +type JobOption func(*batchv1.Job) + +// CreateJob creates a Job with the given options. +func CreateJob(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...JobOption) (*batchv1.Job, error) { + job := baseJobResource(namespace, name) + for _, opt := range opts { + opt(job) + } + return client.BatchV1().Jobs(namespace).Create(ctx, job, metav1.CreateOptions{}) +} + +// WithJobAnnotations adds annotations to the Job metadata. +func WithJobAnnotations(annotations map[string]string) JobOption { + return func(j *batchv1.Job) { + if j.Annotations == nil { + j.Annotations = make(map[string]string) + } + for k, v := range annotations { + j.Annotations[k] = v + } + } +} + +// WithJobConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithJobConfigMapEnvFrom(name string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].EnvFrom = append( + j.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithJobSecretEnvFrom adds an envFrom reference to a Secret. +func WithJobSecretEnvFrom(name string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].EnvFrom = append( + j.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithJobConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to a Job. +func WithJobConfigMapKeyRef(cmName, key, envVarName string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].Env = append( + j.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithJobSecretKeyRef adds a valueFrom.secretKeyRef env var to a Job. +func WithJobSecretKeyRef(secretName, key, envVarName string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].Env = append( + j.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithJobCommand sets the command for the Job's container. +func WithJobCommand(command string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", command} + } +} + +// WithJobCSIVolume adds a CSI volume referencing a SecretProviderClass to a Job. +func WithJobCSIVolume(spcName string) JobOption { + return func(j *batchv1.Job) { + volumeName := csiVolumeName(spcName) + mountPath := csiMountPath(spcName) + + j.Spec.Template.Spec.Volumes = append(j.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: CSIDriverName, + ReadOnly: ptr.To(true), + VolumeAttributes: map[string]string{ + "secretProviderClass": spcName, + }, + }, + }, + }) + j.Spec.Template.Spec.Containers[0].VolumeMounts = append( + j.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + ReadOnly: true, + }, + ) + } +} + +// baseJobResource creates a base Job template. +func baseJobResource(namespace, name string) *batchv1.Job { + labels := map[string]string{"app": name} + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "job", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo done"}, + }, + }, + }, + }, + }, + } +} + +// DeleteJob deletes a Job. +func DeleteJob(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + propagation := metav1.DeletePropagationBackground + return client.BatchV1().Jobs(namespace).Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &propagation, + }) +} + +func csiVolumeName(spcName string) string { + return fmt.Sprintf("csi-%s", spcName) +} + +func csiMountPath(spcName string) string { + return fmt.Sprintf("/mnt/secrets-store/%s", spcName) +} + +// GetDeployment retrieves a deployment by name. +func GetDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.Deployment, error) { + return client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// GetPodLogs retrieves logs from pods matching the given label selector. +func GetPodLogs(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string) (string, error) { + pods, err := client.CoreV1().Pods(namespace).List( + ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }, + ) + if err != nil { + return "", fmt.Errorf("failed to list pods: %w", err) + } + + var allLogs strings.Builder + for _, pod := range pods.Items { + for _, container := range pod.Spec.Containers { + logs, err := client.CoreV1().Pods(namespace).GetLogs( + pod.Name, &corev1.PodLogOptions{ + Container: container.Name, + }, + ).Do(ctx).Raw() + if err != nil { + allLogs.WriteString(fmt.Sprintf("Error getting logs for %s/%s: %v\n", pod.Name, container.Name, err)) + continue + } + allLogs.WriteString(fmt.Sprintf("=== %s/%s ===\n%s\n", pod.Name, container.Name, string(logs))) + } + } + + return allLogs.String(), nil +} diff --git a/test/e2e/utils/test_helpers.go b/test/e2e/utils/test_helpers.go new file mode 100644 index 000000000..f075b70e6 --- /dev/null +++ b/test/e2e/utils/test_helpers.go @@ -0,0 +1,12 @@ +package utils + +// MergeAnnotations merges multiple annotation maps into one. +func MergeAnnotations(maps ...map[string]string) map[string]string { + result := make(map[string]string) + for _, m := range maps { + for k, v := range m { + result[k] = v + } + } + return result +} diff --git a/test/e2e/utils/test_helpers_test.go b/test/e2e/utils/test_helpers_test.go new file mode 100644 index 000000000..0af5bcfb5 --- /dev/null +++ b/test/e2e/utils/test_helpers_test.go @@ -0,0 +1,143 @@ +package utils + +import ( + "testing" +) + +func TestMergeAnnotations(t *testing.T) { + tests := []struct { + name string + maps []map[string]string + expected map[string]string + }{ + { + name: "no maps", + maps: []map[string]string{}, + expected: map[string]string{}, + }, + { + name: "single map", + maps: []map[string]string{ + {"key1": "value1"}, + }, + expected: map[string]string{ + "key1": "value1", + }, + }, + { + name: "two maps no overlap", + maps: []map[string]string{ + {"key1": "value1"}, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "three maps with overlap - last wins", + maps: []map[string]string{ + {"key1": "value1", "shared": "first"}, + {"key2": "value2", "shared": "second"}, + {"key3": "value3", "shared": "third"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + "shared": "third", + }, + }, + { + name: "empty map in the middle", + maps: []map[string]string{ + {"key1": "value1"}, + {}, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "nil map in the middle", + maps: []map[string]string{ + {"key1": "value1"}, + nil, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "realistic use case - auto annotation with reload annotation", + maps: []map[string]string{ + BuildAutoTrueAnnotation(), + BuildConfigMapReloadAnnotation("my-config"), + }, + expected: map[string]string{ + AnnotationAuto: AnnotationValueTrue, + AnnotationConfigMapReload: "my-config", + }, + }, + { + name: "realistic use case - pause period with reload annotation", + maps: []map[string]string{ + BuildConfigMapReloadAnnotation("config1"), + BuildPausePeriodAnnotation("10s"), + }, + expected: map[string]string{ + AnnotationConfigMapReload: "config1", + AnnotationDeploymentPausePeriod: "10s", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MergeAnnotations(tt.maps...) + + if len(result) != len(tt.expected) { + t.Errorf("MergeAnnotations() returned %d entries, want %d", len(result), len(tt.expected)) + t.Errorf("Got: %v", result) + t.Errorf("Want: %v", tt.expected) + return + } + + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("MergeAnnotations()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestMergeAnnotationsDoesNotModifyInput(t *testing.T) { + map1 := map[string]string{"key1": "value1"} + map2 := map[string]string{"key2": "value2"} + + _ = MergeAnnotations(map1, map2) + + if len(map1) != 1 || map1["key1"] != "value1" { + t.Errorf("map1 was modified: %v", map1) + } + if len(map2) != 1 || map2["key2"] != "value2" { + t.Errorf("map2 was modified: %v", map2) + } +} + +func TestMergeAnnotationsReturnsNewMap(t *testing.T) { + input := map[string]string{"key1": "value1"} + result := MergeAnnotations(input) + + result["key2"] = "value2" + + if _, exists := input["key2"]; exists { + t.Error("modifying result affected input map - should return a new map") + } +} diff --git a/test/e2e/utils/testenv.go b/test/e2e/utils/testenv.go new file mode 100644 index 000000000..b9d5dd18c --- /dev/null +++ b/test/e2e/utils/testenv.go @@ -0,0 +1,175 @@ +package utils + +import ( + "context" + "fmt" + + rolloutsclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" + "github.com/onsi/ginkgo/v2" + openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" +) + +// TestEnvironment holds the common test environment state. +type TestEnvironment struct { + Ctx context.Context + Cancel context.CancelFunc + KubeClient kubernetes.Interface + DiscoveryClient discovery.DiscoveryInterface + CSIClient csiclient.Interface + RolloutsClient rolloutsclient.Interface + OpenShiftClient openshiftclient.Interface + RestConfig *rest.Config + Namespace string + ReleaseName string + TestImage string + ProjectDir string +} + +// SetupTestEnvironment creates a new test environment with kubernetes clients. +// It creates a unique namespace with the given prefix. +func SetupTestEnvironment(ctx context.Context, namespacePrefix string) (*TestEnvironment, error) { + env := &TestEnvironment{ + Ctx: ctx, + TestImage: GetTestImage(), + } + + var err error + + env.ProjectDir, err = GetProjectDir() + if err != nil { + return nil, fmt.Errorf("getting project directory: %w", err) + } + + kubeconfig := GetKubeconfig() + ginkgo.GinkgoWriter.Printf("Using kubeconfig: %s\n", kubeconfig) + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("building config from kubeconfig: %w", err) + } + + env.RestConfig = config + + env.KubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating kubernetes client: %w", err) + } + + env.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating discovery client: %w", err) + } + + env.CSIClient, err = csiclient.NewForConfig(config) + if err != nil { + ginkgo.GinkgoWriter.Printf("Warning: Could not create CSI client: %v (CSI tests will be skipped)\n", err) + env.CSIClient = nil + } + + // Try to create Argo Rollouts client (optional - may not be installed) + env.RolloutsClient, err = rolloutsclient.NewForConfig(config) + if err != nil { + ginkgo.GinkgoWriter.Printf("Warning: Could not create Rollouts client: %v (Argo tests will be skipped)\n", err) + env.RolloutsClient = nil + } + + // Try to create OpenShift client (optional - may not be installed) + env.OpenShiftClient, err = openshiftclient.NewForConfig(config) + if err != nil { + ginkgo.GinkgoWriter.Printf("Warning: Could not create OpenShift client: %v (OpenShift tests will be skipped)\n", + err) + env.OpenShiftClient = nil + } + + ginkgo.GinkgoWriter.Println("Verifying cluster connectivity...") + _, err = env.KubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{Limit: 1}) + if err != nil { + return nil, fmt.Errorf("connecting to kubernetes cluster: %w", err) + } + ginkgo.GinkgoWriter.Println("Cluster connectivity verified") + + env.Namespace = RandName(namespacePrefix) + env.ReleaseName = RandName("reloader") + ginkgo.GinkgoWriter.Printf("Creating test namespace: %s\n", env.Namespace) + ginkgo.GinkgoWriter.Printf("Using Helm release name: %s\n", env.ReleaseName) + if err := CreateNamespace(ctx, env.KubeClient, env.Namespace); err != nil { + return nil, fmt.Errorf("creating test namespace: %w", err) + } + + ginkgo.GinkgoWriter.Printf("Using test image: %s\n", env.TestImage) + ginkgo.GinkgoWriter.Printf("Project directory: %s\n", env.ProjectDir) + + return env, nil +} + +// Cleanup cleans up the test environment resources. +func (e *TestEnvironment) Cleanup() error { + if e.Namespace == "" { + return nil + } + + ginkgo.GinkgoWriter.Printf("Cleaning up test namespace: %s\n", e.Namespace) + ginkgo.GinkgoWriter.Printf("Cleaning up Helm release: %s\n", e.ReleaseName) + + logs, err := GetPodLogs(e.Ctx, e.KubeClient, e.Namespace, ReloaderPodSelector(e.ReleaseName)) + if err == nil && logs != "" { + ginkgo.GinkgoWriter.Println("Reloader logs:") + ginkgo.GinkgoWriter.Println(logs) + } + + _ = UndeployReloader(e.Namespace, e.ReleaseName) + + if err := DeleteNamespace(e.Ctx, e.KubeClient, e.Namespace); err != nil { + return fmt.Errorf("deleting namespace: %w", err) + } + + return nil +} + +// DeployReloaderWithStrategy deploys Reloader with the specified reload strategy. +func (e *TestEnvironment) DeployReloaderWithStrategy(strategy string) error { + return e.DeployReloaderWithValues( + map[string]string{ + "reloader.reloadStrategy": strategy, + }, + ) +} + +// DeployReloaderWithValues deploys Reloader with the specified Helm values. +// Each test suite uses a unique release name to prevent cluster-scoped resource conflicts. +func (e *TestEnvironment) DeployReloaderWithValues(values map[string]string) error { + ginkgo.GinkgoWriter.Printf("Deploying Reloader with values: %v\n", values) + return DeployReloader( + DeployOptions{ + Namespace: e.Namespace, + ReleaseName: e.ReleaseName, + Image: e.TestImage, + Values: values, + }, + ) +} + +// WaitForReloader waits for the Reloader deployment to be ready. +func (e *TestEnvironment) WaitForReloader() error { + ginkgo.GinkgoWriter.Println("Waiting for Reloader to be ready...") + adapter := NewDeploymentAdapter(e.KubeClient) + return adapter.WaitReady(e.Ctx, e.Namespace, ReloaderDeploymentName(e.ReleaseName), WorkloadReadyTimeout) +} + +// DeployAndWait deploys Reloader with the given values and waits for it to be ready. +func (e *TestEnvironment) DeployAndWait(values map[string]string) error { + if err := e.DeployReloaderWithValues(values); err != nil { + return fmt.Errorf("deploying Reloader: %w", err) + } + if err := e.WaitForReloader(); err != nil { + return fmt.Errorf("waiting for Reloader: %w", err) + } + ginkgo.GinkgoWriter.Println("Reloader is ready") + return nil +} diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go new file mode 100644 index 000000000..11b35d7be --- /dev/null +++ b/test/e2e/utils/utils.go @@ -0,0 +1,91 @@ +// Package utils provides helper functions for e2e tests. +package utils + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:revive,staticcheck +) + +// Run executes the provided command and returns its combined stdout/stderr output. +// The command is executed from the project directory. +func Run(cmd *exec.Cmd) (string, error) { + dir, err := GetProjectDir() + if err != nil { + return "", fmt.Errorf("failed to get project dir: %w", err) + } + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + output := stdout.String() + stderr.String() + if err != nil { + return output, fmt.Errorf("%q failed with error %q: %w", command, output, err) + } + + return output, nil +} + +// GetProjectDir returns the root directory of the project. +// It works by finding the directory containing go.mod. +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get current working directory: %w", err) + } + + // Walk up the directory tree looking for go.mod + dir := wd + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return dir, nil + } + + parent := filepath.Dir(dir) + if parent == dir { + // Reached root without finding go.mod + break + } + dir = parent + } + + // Fallback: try to strip common test paths + wd = strings.ReplaceAll(wd, "/test/e2e", "") + wd = strings.ReplaceAll(wd, "/test/e2e/annotations", "") + wd = strings.ReplaceAll(wd, "/test/e2e/envvars", "") + wd = strings.ReplaceAll(wd, "/test/e2e/flags", "") + wd = strings.ReplaceAll(wd, "/test/e2e/advanced", "") + wd = strings.ReplaceAll(wd, "/test/e2e/argo", "") + wd = strings.ReplaceAll(wd, "/test/e2e/openshift", "") + + return wd, nil +} + +// GetKubeconfig returns the path to the kubeconfig file. +// It checks KUBECONFIG environment variable first, then falls back to ~/.kube/config. +func GetKubeconfig() string { + if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" { + return kubeconfig + } + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return filepath.Join(home, ".kube", "config") +} diff --git a/test/e2e/utils/watch.go b/test/e2e/utils/watch.go new file mode 100644 index 000000000..f206d88d5 --- /dev/null +++ b/test/e2e/utils/watch.go @@ -0,0 +1,204 @@ +package utils + +import ( + "context" + "errors" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +// Timeout constants for watch operations. +const ( + DefaultInterval = 1 * time.Second // Polling interval + ShortTimeout = 5 * time.Second // Quick checks + NegativeTestWait = 3 * time.Second // Wait before checking negative conditions + WorkloadReadyTimeout = 60 * time.Second // Workload readiness timeout (buffer for CI) + ReloadTimeout = 15 * time.Second // Time for reload to trigger +) + +// ErrWatchTimeout is returned when a watch times out waiting for condition. +var ErrWatchTimeout = errors.New("watch timeout waiting for condition") + +// ErrWatchError is returned when the watch receives an error event from the API server. +var ErrWatchError = errors.New("watch received error event from API server") + +// ErrUnsupportedOperation is returned when an operation is not supported for a workload type. +var ErrUnsupportedOperation = errors.New("operation not supported for this workload type") + +// HandleWatchResult converts watch errors to the standard (bool, error) return pattern. +// Returns (false, nil) for timeout, (true, nil) for success, (false, err) for other errors. +func HandleWatchResult(err error) (bool, error) { + if errors.Is(err, ErrWatchTimeout) { + return false, nil + } + return err == nil, err +} + +// WatchFunc is a function that starts a watch for a specific resource. +type WatchFunc func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + +// Condition is a function that checks if the desired state is reached. +type Condition[T any] func(T) bool + +// WatchUntil watches a resource until the condition is met or timeout occurs. +// It handles watch reconnection automatically on errors. +// If name is empty, it watches all resources and returns the first matching one. +func WatchUntil[T runtime.Object](ctx context.Context, watchFunc WatchFunc, name string, condition Condition[T], timeout time.Duration) (T, error) { + var zero T + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + opts := metav1.ListOptions{Watch: true} + if name != "" { + opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", name).String() + } + + for { + select { + case <-ctx.Done(): + return zero, ErrWatchTimeout + default: + } + + result, done, err := watchOnce(ctx, watchFunc, opts, condition) + if done { + return result, err + } + select { + case <-ctx.Done(): + return zero, ErrWatchTimeout + case <-time.After(100 * time.Millisecond): + } + } +} + +// watchOnce starts a single watch and processes events until condition met or watch ends. +func watchOnce[T runtime.Object]( + ctx context.Context, + watchFunc WatchFunc, + opts metav1.ListOptions, + condition Condition[T], +) (T, bool, error) { + var zero T + + watcher, err := watchFunc(ctx, opts) + if err != nil { + return zero, false, nil + } + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + return zero, true, ErrWatchTimeout + case event, ok := <-watcher.ResultChan(): + if !ok { + return zero, false, nil + } + + switch event.Type { + case watch.Added, watch.Modified: + obj, ok := event.Object.(T) + if !ok { + continue + } + if condition(obj) { + return obj, true, nil + } + case watch.Deleted: + continue + case watch.Error: + return zero, false, ErrWatchError + } + } + } +} + +// WatchUntilDeleted watches until the resource is deleted or timeout occurs. +func WatchUntilDeleted( + ctx context.Context, + watchFunc WatchFunc, + name string, + timeout time.Duration, +) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + opts := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + Watch: true, + } + + for { + select { + case <-ctx.Done(): + return ErrWatchTimeout + default: + } + + deleted, err := watchDeleteOnce(ctx, watchFunc, opts) + if deleted { + return err + } + select { + case <-ctx.Done(): + return ErrWatchTimeout + case <-time.After(100 * time.Millisecond): + } + } +} + +func watchDeleteOnce( + ctx context.Context, + watchFunc WatchFunc, + opts metav1.ListOptions, +) (bool, error) { + watcher, err := watchFunc(ctx, opts) + if err != nil { + return false, nil + } + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + return true, ErrWatchTimeout + case event, ok := <-watcher.ResultChan(): + if !ok { + return false, nil + } + if event.Type == watch.Deleted { + return true, nil + } + if event.Type == watch.Error { + return false, ErrWatchError + } + } + } +} + +// WatchUntilDifferentUID watches until the resource has a different UID (recreated). +func WatchUntilDifferentUID[T runtime.Object]( + ctx context.Context, + watchFunc WatchFunc, + name string, + originalUID string, + timeout time.Duration, + getUID func(T) string, +) (T, bool, error) { + var zero T + result, err := WatchUntil(ctx, watchFunc, name, func(obj T) bool { + return getUID(obj) != originalUID + }, timeout) + if errors.Is(err, ErrWatchTimeout) { + return zero, false, nil + } + if err != nil { + return zero, false, err + } + return result, true, nil +} diff --git a/test/e2e/utils/workload_adapter.go b/test/e2e/utils/workload_adapter.go new file mode 100644 index 000000000..d40700ab9 --- /dev/null +++ b/test/e2e/utils/workload_adapter.go @@ -0,0 +1,177 @@ +package utils + +import ( + "context" + "time" + + "k8s.io/client-go/kubernetes" +) + +// WorkloadType represents the type of Kubernetes workload. +type WorkloadType string + +const ( + WorkloadDeployment WorkloadType = "Deployment" + WorkloadDaemonSet WorkloadType = "DaemonSet" + WorkloadStatefulSet WorkloadType = "StatefulSet" + WorkloadCronJob WorkloadType = "CronJob" + WorkloadJob WorkloadType = "Job" + WorkloadArgoRollout WorkloadType = "ArgoRollout" + WorkloadDeploymentConfig WorkloadType = "DeploymentConfig" +) + +// ReloadStrategy represents the reload strategy used by Reloader. +type ReloadStrategy string + +const ( + StrategyAnnotations ReloadStrategy = "annotations" + StrategyEnvVars ReloadStrategy = "envvars" +) + +// WorkloadConfig holds configuration for workload creation. +type WorkloadConfig struct { + ConfigMapName string + SecretName string + SPCName string + Annotations map[string]string // Annotations for workload metadata (e.g., Deployment.metadata.annotations) + PodTemplateAnnotations map[string]string // Annotations for pod template metadata (e.g., Deployment.spec.template.metadata.annotations) + UseConfigMapEnvFrom bool + UseSecretEnvFrom bool + UseConfigMapVolume bool + UseSecretVolume bool + UseProjectedVolume bool + UseConfigMapKeyRef bool + UseSecretKeyRef bool + UseInitContainer bool + UseInitContainerVolume bool + UseCSIVolume bool + UseInitContainerCSI bool + ConfigMapKey string + SecretKey string + EnvVarName string + MultipleContainers int +} + +// WorkloadAdapter provides a unified interface for all workload types. +// This allows tests to be parameterized across different workload types. +type WorkloadAdapter interface { + // Type returns the workload type. + Type() WorkloadType + + // Create creates the workload with the given config. + Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error + + // Delete removes the workload. + Delete(ctx context.Context, namespace, name string) error + + // WaitReady waits for the workload to be ready. + WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error + + // WaitReloaded waits for the workload to have the reload annotation. + // Returns true if the annotation was found, false if timeout occurred. + WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) + + // WaitEnvVar waits for the workload to have a STAKATER_ env var (for envvars strategy). + // Returns true if the env var was found, false if timeout occurred. + WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) + + // SupportsEnvVarStrategy returns true if the workload supports env var reload strategy. + // CronJob does not support this as it uses job creation instead. + SupportsEnvVarStrategy() bool + + // RequiresSpecialHandling returns true for workloads that need special handling. + // For example, CronJob triggers a new job instead of rolling restart. + RequiresSpecialHandling() bool + + // GetPodTemplateAnnotation returns the value of a pod template annotation. + // This is useful for tests that need to compare annotation values before/after updates. + GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) +} + +// Pausable is implemented by workloads that support pause/unpause. +// Currently only Deployment supports this capability. +type Pausable interface { + WaitPaused(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) + WaitUnpaused(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) +} + +// Recreatable is implemented by workloads that are recreated instead of updated. +// Currently only Job supports this capability (Jobs are immutable, so Reloader recreates them). +type Recreatable interface { + GetOriginalUID(ctx context.Context, namespace, name string) (string, error) + WaitRecreated(ctx context.Context, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) +} + +// JobTriggerer is implemented by workloads that trigger jobs on reload. +// Currently only CronJob supports this capability. +type JobTriggerer interface { + WaitForTriggeredJob(ctx context.Context, namespace, name string, timeout time.Duration) (bool, error) +} + +// RestartAtSupporter is implemented by workloads that support the restartAt field. +// Currently only ArgoRollout supports this capability. +type RestartAtSupporter interface { + WaitRestartAt(ctx context.Context, namespace, name string, timeout time.Duration) (bool, error) +} + +// AdapterRegistry holds adapters for all workload types. +type AdapterRegistry struct { + kubeClient kubernetes.Interface + adapters map[WorkloadType]WorkloadAdapter +} + +// NewAdapterRegistry creates a new adapter registry with all standard adapters. +func NewAdapterRegistry(kubeClient kubernetes.Interface) *AdapterRegistry { + r := &AdapterRegistry{ + kubeClient: kubeClient, + adapters: make(map[WorkloadType]WorkloadAdapter), + } + + r.adapters[WorkloadDeployment] = NewDeploymentAdapter(kubeClient) + r.adapters[WorkloadDaemonSet] = NewDaemonSetAdapter(kubeClient) + r.adapters[WorkloadStatefulSet] = NewStatefulSetAdapter(kubeClient) + r.adapters[WorkloadCronJob] = NewCronJobAdapter(kubeClient) + r.adapters[WorkloadJob] = NewJobAdapter(kubeClient) + + return r +} + +// RegisterAdapter registers a custom adapter for a workload type. +func (r *AdapterRegistry) RegisterAdapter(adapter WorkloadAdapter) { + r.adapters[adapter.Type()] = adapter +} + +// Get returns the adapter for the given workload type. +// Returns nil if the adapter is not registered. +func (r *AdapterRegistry) Get(wt WorkloadType) WorkloadAdapter { + return r.adapters[wt] +} + +// GetStandardWorkloads returns the standard workload types that are always available. +func (r *AdapterRegistry) GetStandardWorkloads() []WorkloadType { + return []WorkloadType{ + WorkloadDeployment, + WorkloadDaemonSet, + WorkloadStatefulSet, + } +} + +// GetAllWorkloads returns all registered workload types. +func (r *AdapterRegistry) GetAllWorkloads() []WorkloadType { + result := make([]WorkloadType, 0, len(r.adapters)) + for wt := range r.adapters { + result = append(result, wt) + } + return result +} + +// GetEnvVarWorkloads returns workload types that support env var reload strategy. +func (r *AdapterRegistry) GetEnvVarWorkloads() []WorkloadType { + result := make([]WorkloadType, 0) + for wt, adapter := range r.adapters { + if adapter.SupportsEnvVarStrategy() { + result = append(result, wt) + } + } + return result +} diff --git a/test/e2e/utils/workload_argo.go b/test/e2e/utils/workload_argo.go new file mode 100644 index 000000000..24cbcf4b2 --- /dev/null +++ b/test/e2e/utils/workload_argo.go @@ -0,0 +1,151 @@ +package utils + +import ( + "context" + "time" + + rolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + rolloutsclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/utils/ptr" +) + +// ArgoRolloutAdapter implements WorkloadAdapter for Argo Rollouts. +type ArgoRolloutAdapter struct { + rolloutsClient rolloutsclient.Interface +} + +// NewArgoRolloutAdapter creates a new ArgoRolloutAdapter. +func NewArgoRolloutAdapter(rolloutsClient rolloutsclient.Interface) *ArgoRolloutAdapter { + return &ArgoRolloutAdapter{ + rolloutsClient: rolloutsClient, + } +} + +// Type returns the workload type. +func (a *ArgoRolloutAdapter) Type() WorkloadType { + return WorkloadArgoRollout +} + +// Create creates an Argo Rollout with the given config. +func (a *ArgoRolloutAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + rollout := baseRollout(name) + opts := buildRolloutOptions(cfg) + for _, opt := range opts { + opt(rollout) + } + _, err := a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Create(ctx, rollout, metav1.CreateOptions{}) + return err +} + +// Delete removes the Argo Rollout. +func (a *ArgoRolloutAdapter) Delete(ctx context.Context, namespace, name string) error { + return a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// WaitReady waits for the Argo Rollout to be ready using watches. +func (a *ArgoRolloutAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(RolloutIsReady), timeout) + return err +} + +// WaitReloaded waits for the Argo Rollout to have the reload annotation using watches. +func (a *ArgoRolloutAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(RolloutPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar waits for the Argo Rollout to have a STAKATER_ env var using watches. +func (a *ArgoRolloutAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasEnvVarPrefix(RolloutContainers, prefix), timeout) + return HandleWatchResult(err) +} + +// WaitRestartAt waits for the Argo Rollout to have the restartAt field set using watches. +// This is used when Reloader is configured with rollout strategy=restart. +func (a *ArgoRolloutAdapter) WaitRestartAt(ctx context.Context, namespace, name string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(RolloutHasRestartAt), timeout) + return HandleWatchResult(err) +} + +// SupportsEnvVarStrategy returns true as Argo Rollouts support env var reload strategy. +func (a *ArgoRolloutAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as Argo Rollouts use standard rolling restart. +func (a *ArgoRolloutAdapter) RequiresSpecialHandling() bool { + return false +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *ArgoRolloutAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + rollout, err := a.rolloutsClient.ArgoprojV1alpha1().Rollouts(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return rollout.Spec.Template.Annotations[annotationKey], nil +} + +// baseRollout returns a minimal Rollout template. +func baseRollout(name string) *rolloutv1alpha1.Rollout { + return &rolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: rolloutv1alpha1.RolloutSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }}, + }, + }, + Strategy: rolloutv1alpha1.RolloutStrategy{ + Canary: &rolloutv1alpha1.CanaryStrategy{ + Steps: []rolloutv1alpha1.CanaryStep{ + {SetWeight: ptr.To[int32](100)}, + }, + }, + }, + }, + } +} + +// buildRolloutOptions converts WorkloadConfig to RolloutOption slice. +func buildRolloutOptions(cfg WorkloadConfig) []RolloutOption { + return []RolloutOption{ + func(r *rolloutv1alpha1.Rollout) { + if len(cfg.Annotations) > 0 { + if r.Annotations == nil { + r.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + r.Annotations[k] = v + } + } + ApplyWorkloadConfig(&r.Spec.Template, cfg) + }, + } +} diff --git a/test/e2e/utils/workload_cronjob.go b/test/e2e/utils/workload_cronjob.go new file mode 100644 index 000000000..b77cddc4e --- /dev/null +++ b/test/e2e/utils/workload_cronjob.go @@ -0,0 +1,106 @@ +package utils + +import ( + "context" + "time" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// CronJobAdapter implements WorkloadAdapter for Kubernetes CronJobs. +type CronJobAdapter struct { + client kubernetes.Interface +} + +// NewCronJobAdapter creates a new CronJobAdapter. +func NewCronJobAdapter(client kubernetes.Interface) *CronJobAdapter { + return &CronJobAdapter{client: client} +} + +// Type returns the workload type. +func (a *CronJobAdapter) Type() WorkloadType { + return WorkloadCronJob +} + +// Create creates a CronJob with the given config. +func (a *CronJobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildCronJobOptions(cfg) + _, err := CreateCronJob(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the CronJob. +func (a *CronJobAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteCronJob(ctx, a.client, namespace, name) +} + +// WaitReady waits for the CronJob to exist using watches. +func (a *CronJobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.BatchV1().CronJobs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, Always[*batchv1.CronJob](), timeout) + return err +} + +// WaitReloaded waits for the CronJob pod template to have the reload annotation using watches. +func (a *CronJobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.BatchV1().CronJobs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(CronJobPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar returns an error because CronJobs don't support env var reload strategy. +func (a *CronJobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return false, ErrUnsupportedOperation +} + +// SupportsEnvVarStrategy returns false as CronJobs don't support env var reload strategy. +func (a *CronJobAdapter) SupportsEnvVarStrategy() bool { + return false +} + +// RequiresSpecialHandling returns true as CronJobs use job triggering instead of rolling restart. +func (a *CronJobAdapter) RequiresSpecialHandling() bool { + return true +} + +// WaitForTriggeredJob waits for Reloader to trigger a new Job from this CronJob using watches. +func (a *CronJobAdapter) WaitForTriggeredJob(ctx context.Context, namespace, cronJobName string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.BatchV1().Jobs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, "", IsTriggeredJobForCronJob(cronJobName), timeout) + return HandleWatchResult(err) +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *CronJobAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + cj, err := a.client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return cj.Spec.JobTemplate.Spec.Template.Annotations[annotationKey], nil +} + +// buildCronJobOptions converts WorkloadConfig to CronJobOption slice. +func buildCronJobOptions(cfg WorkloadConfig) []CronJobOption { + return []CronJobOption{ + func(cj *batchv1.CronJob) { + if len(cfg.Annotations) > 0 { + if cj.Annotations == nil { + cj.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + cj.Annotations[k] = v + } + } + ApplyWorkloadConfig(&cj.Spec.JobTemplate.Spec.Template, cfg) + }, + } +} diff --git a/test/e2e/utils/workload_daemonset.go b/test/e2e/utils/workload_daemonset.go new file mode 100644 index 000000000..d80ce7903 --- /dev/null +++ b/test/e2e/utils/workload_daemonset.go @@ -0,0 +1,101 @@ +package utils + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// DaemonSetAdapter implements WorkloadAdapter for Kubernetes DaemonSets. +type DaemonSetAdapter struct { + client kubernetes.Interface +} + +// NewDaemonSetAdapter creates a new DaemonSetAdapter. +func NewDaemonSetAdapter(client kubernetes.Interface) *DaemonSetAdapter { + return &DaemonSetAdapter{client: client} +} + +// Type returns the workload type. +func (a *DaemonSetAdapter) Type() WorkloadType { + return WorkloadDaemonSet +} + +// Create creates a DaemonSet with the given config. +func (a *DaemonSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildDaemonSetOptions(cfg) + _, err := CreateDaemonSet(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the DaemonSet. +func (a *DaemonSetAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteDaemonSet(ctx, a.client, namespace, name) +} + +// WaitReady waits for the DaemonSet to be ready using watches. +func (a *DaemonSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().DaemonSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(DaemonSetIsReady), timeout) + return err +} + +// WaitReloaded waits for the DaemonSet to have the reload annotation using watches. +func (a *DaemonSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().DaemonSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(DaemonSetPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar waits for the DaemonSet to have a STAKATER_ env var using watches. +func (a *DaemonSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().DaemonSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasEnvVarPrefix(DaemonSetContainers, prefix), timeout) + return HandleWatchResult(err) +} + +// SupportsEnvVarStrategy returns true as DaemonSets support env var reload strategy. +func (a *DaemonSetAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as DaemonSets use standard rolling restart. +func (a *DaemonSetAdapter) RequiresSpecialHandling() bool { + return false +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *DaemonSetAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + ds, err := a.client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return ds.Spec.Template.Annotations[annotationKey], nil +} + +// buildDaemonSetOptions converts WorkloadConfig to DaemonSetOption slice. +func buildDaemonSetOptions(cfg WorkloadConfig) []DaemonSetOption { + return []DaemonSetOption{ + func(ds *appsv1.DaemonSet) { + if len(cfg.Annotations) > 0 { + if ds.Annotations == nil { + ds.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + ds.Annotations[k] = v + } + } + ApplyWorkloadConfig(&ds.Spec.Template, cfg) + }, + } +} diff --git a/test/e2e/utils/workload_deployment.go b/test/e2e/utils/workload_deployment.go new file mode 100644 index 000000000..1b967b845 --- /dev/null +++ b/test/e2e/utils/workload_deployment.go @@ -0,0 +1,119 @@ +package utils + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// DeploymentAdapter implements WorkloadAdapter for Kubernetes Deployments. +type DeploymentAdapter struct { + client kubernetes.Interface +} + +// NewDeploymentAdapter creates a new DeploymentAdapter. +func NewDeploymentAdapter(client kubernetes.Interface) *DeploymentAdapter { + return &DeploymentAdapter{client: client} +} + +// Type returns the workload type. +func (a *DeploymentAdapter) Type() WorkloadType { + return WorkloadDeployment +} + +// Create creates a Deployment with the given config. +func (a *DeploymentAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildDeploymentOptions(cfg) + _, err := CreateDeployment(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the Deployment. +func (a *DeploymentAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteDeployment(ctx, a.client, namespace, name) +} + +// WaitReady waits for the Deployment to be ready using watches. +func (a *DeploymentAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().Deployments(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(DeploymentIsReady), timeout) + return err +} + +// WaitReloaded waits for the Deployment to have the reload annotation using watches. +func (a *DeploymentAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().Deployments(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(DeploymentPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar waits for the Deployment to have a STAKATER_ env var using watches. +func (a *DeploymentAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().Deployments(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasEnvVarPrefix(DeploymentContainers, prefix), timeout) + return HandleWatchResult(err) +} + +// WaitPaused waits for the Deployment to have the paused annotation using watches. +func (a *DeploymentAdapter) WaitPaused(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().Deployments(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasAnnotation(DeploymentAnnotations, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitUnpaused waits for the Deployment to NOT have the paused annotation using watches. +func (a *DeploymentAdapter) WaitUnpaused(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().Deployments(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, NoAnnotation(DeploymentAnnotations, annotationKey), timeout) + return HandleWatchResult(err) +} + +// SupportsEnvVarStrategy returns true as Deployments support env var reload strategy. +func (a *DeploymentAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as Deployments use standard rolling restart. +func (a *DeploymentAdapter) RequiresSpecialHandling() bool { + return false +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *DeploymentAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + deploy, err := a.client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return deploy.Spec.Template.Annotations[annotationKey], nil +} + +// buildDeploymentOptions converts WorkloadConfig to DeploymentOption slice. +func buildDeploymentOptions(cfg WorkloadConfig) []DeploymentOption { + return []DeploymentOption{ + func(d *appsv1.Deployment) { + if len(cfg.Annotations) > 0 { + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + d.Annotations[k] = v + } + } + ApplyWorkloadConfig(&d.Spec.Template, cfg) + }, + } +} diff --git a/test/e2e/utils/workload_job.go b/test/e2e/utils/workload_job.go new file mode 100644 index 000000000..e71c86c25 --- /dev/null +++ b/test/e2e/utils/workload_job.go @@ -0,0 +1,120 @@ +package utils + +import ( + "context" + "errors" + "time" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// JobAdapter implements WorkloadAdapter for Kubernetes Jobs. +type JobAdapter struct { + client kubernetes.Interface +} + +// NewJobAdapter creates a new JobAdapter. +func NewJobAdapter(client kubernetes.Interface) *JobAdapter { + return &JobAdapter{client: client} +} + +// Type returns the workload type. +func (a *JobAdapter) Type() WorkloadType { + return WorkloadJob +} + +// Create creates a Job with the given config. +func (a *JobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildJobOptions(cfg) + _, err := CreateJob(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the Job. +func (a *JobAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteJob(ctx, a.client, namespace, name) +} + +// WaitReady waits for the Job to be ready (has active or succeeded pods) using watches. +func (a *JobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.BatchV1().Jobs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(JobIsReady), timeout) + return err +} + +// WaitReloaded returns an error because Jobs are recreated, not updated. +// Use the Recreatable interface (GetOriginalUID + WaitRecreated) instead. +func (a *JobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return false, ErrUnsupportedOperation +} + +// WaitEnvVar returns an error because Jobs don't support env var reload strategy. +func (a *JobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return false, ErrUnsupportedOperation +} + +// WaitRecreated waits for the Job to be recreated with a different UID using watches. +func (a *JobAdapter) WaitRecreated(ctx context.Context, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.BatchV1().Jobs(namespace).Watch(ctx, opts) + } + job, err := WatchUntil(ctx, watchFunc, name, HasDifferentUID(JobUID, types.UID(originalUID)), timeout) + if errors.Is(err, ErrWatchTimeout) { + return "", false, nil + } + if err != nil { + return "", false, err + } + return string(job.UID), true, nil +} + +// SupportsEnvVarStrategy returns false as Jobs don't support env var reload strategy. +func (a *JobAdapter) SupportsEnvVarStrategy() bool { + return false +} + +// RequiresSpecialHandling returns true as Jobs are recreated by Reloader. +func (a *JobAdapter) RequiresSpecialHandling() bool { + return true +} + +// GetOriginalUID retrieves the current UID of the Job for recreation verification. +func (a *JobAdapter) GetOriginalUID(ctx context.Context, namespace, name string) (string, error) { + job, err := a.client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return string(job.UID), nil +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *JobAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + job, err := a.client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return job.Spec.Template.Annotations[annotationKey], nil +} + +// buildJobOptions converts WorkloadConfig to JobOption slice. +func buildJobOptions(cfg WorkloadConfig) []JobOption { + return []JobOption{ + func(job *batchv1.Job) { + if len(cfg.Annotations) > 0 { + if job.Annotations == nil { + job.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + job.Annotations[k] = v + } + } + ApplyWorkloadConfig(&job.Spec.Template, cfg) + }, + } +} diff --git a/test/e2e/utils/workload_openshift.go b/test/e2e/utils/workload_openshift.go new file mode 100644 index 000000000..091f03af2 --- /dev/null +++ b/test/e2e/utils/workload_openshift.go @@ -0,0 +1,142 @@ +package utils + +import ( + "context" + "time" + + openshiftappsv1 "github.com/openshift/api/apps/v1" + openshiftclient "github.com/openshift/client-go/apps/clientset/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +// DeploymentConfigOption is a function that modifies a DeploymentConfig. +type DeploymentConfigOption func(*openshiftappsv1.DeploymentConfig) + +// DeploymentConfigAdapter implements WorkloadAdapter for OpenShift DeploymentConfigs. +type DeploymentConfigAdapter struct { + openshiftClient openshiftclient.Interface +} + +// NewDeploymentConfigAdapter creates a new DeploymentConfigAdapter. +func NewDeploymentConfigAdapter(openshiftClient openshiftclient.Interface) *DeploymentConfigAdapter { + return &DeploymentConfigAdapter{ + openshiftClient: openshiftClient, + } +} + +// Type returns the workload type. +func (a *DeploymentConfigAdapter) Type() WorkloadType { + return WorkloadDeploymentConfig +} + +// Create creates a DeploymentConfig with the given config. +func (a *DeploymentConfigAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + dc := baseDeploymentConfig(name) + opts := buildDeploymentConfigOptions(cfg) + for _, opt := range opts { + opt(dc) + } + _, err := a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Create(ctx, dc, metav1.CreateOptions{}) + return err +} + +// Delete removes the DeploymentConfig. +func (a *DeploymentConfigAdapter) Delete(ctx context.Context, namespace, name string) error { + return a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// WaitReady waits for the DeploymentConfig to be ready using watches. +func (a *DeploymentConfigAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(DeploymentConfigIsReady), timeout) + return err +} + +// WaitReloaded waits for the DeploymentConfig to have the reload annotation using watches. +func (a *DeploymentConfigAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(DeploymentConfigPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar waits for the DeploymentConfig to have a STAKATER_ env var using watches. +func (a *DeploymentConfigAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasEnvVarPrefix(DeploymentConfigContainers, prefix), timeout) + return HandleWatchResult(err) +} + +// SupportsEnvVarStrategy returns true as DeploymentConfigs support env var reload strategy. +func (a *DeploymentConfigAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as DeploymentConfigs use standard rolling restart. +func (a *DeploymentConfigAdapter) RequiresSpecialHandling() bool { + return false +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *DeploymentConfigAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + dc, err := a.openshiftClient.AppsV1().DeploymentConfigs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + if dc.Spec.Template == nil { + return "", nil + } + return dc.Spec.Template.Annotations[annotationKey], nil +} + +// baseDeploymentConfig returns a minimal DeploymentConfig template. +func baseDeploymentConfig(name string) *openshiftappsv1.DeploymentConfig { + return &openshiftappsv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: openshiftappsv1.DeploymentConfigSpec{ + Replicas: 1, + Selector: map[string]string{"app": name}, + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }}, + }, + }, + Triggers: openshiftappsv1.DeploymentTriggerPolicies{ + {Type: openshiftappsv1.DeploymentTriggerOnConfigChange}, + }, + }, + } +} + +// buildDeploymentConfigOptions converts WorkloadConfig to DeploymentConfigOption slice. +func buildDeploymentConfigOptions(cfg WorkloadConfig) []DeploymentConfigOption { + return []DeploymentConfigOption{ + func(dc *openshiftappsv1.DeploymentConfig) { + if len(cfg.Annotations) > 0 { + if dc.Annotations == nil { + dc.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + dc.Annotations[k] = v + } + } + if dc.Spec.Template != nil { + ApplyWorkloadConfig(dc.Spec.Template, cfg) + } + }, + } +} diff --git a/test/e2e/utils/workload_statefulset.go b/test/e2e/utils/workload_statefulset.go new file mode 100644 index 000000000..53f6fd7c0 --- /dev/null +++ b/test/e2e/utils/workload_statefulset.go @@ -0,0 +1,101 @@ +package utils + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// StatefulSetAdapter implements WorkloadAdapter for Kubernetes StatefulSets. +type StatefulSetAdapter struct { + client kubernetes.Interface +} + +// NewStatefulSetAdapter creates a new StatefulSetAdapter. +func NewStatefulSetAdapter(client kubernetes.Interface) *StatefulSetAdapter { + return &StatefulSetAdapter{client: client} +} + +// Type returns the workload type. +func (a *StatefulSetAdapter) Type() WorkloadType { + return WorkloadStatefulSet +} + +// Create creates a StatefulSet with the given config. +func (a *StatefulSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildStatefulSetOptions(cfg) + _, err := CreateStatefulSet(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the StatefulSet. +func (a *StatefulSetAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteStatefulSet(ctx, a.client, namespace, name) +} + +// WaitReady waits for the StatefulSet to be ready using watches. +func (a *StatefulSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().StatefulSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, IsReady(StatefulSetIsReady), timeout) + return err +} + +// WaitReloaded waits for the StatefulSet to have the reload annotation using watches. +func (a *StatefulSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().StatefulSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasPodTemplateAnnotation(StatefulSetPodTemplate, annotationKey), timeout) + return HandleWatchResult(err) +} + +// WaitEnvVar waits for the StatefulSet to have a STAKATER_ env var using watches. +func (a *StatefulSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + watchFunc := func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return a.client.AppsV1().StatefulSets(namespace).Watch(ctx, opts) + } + _, err := WatchUntil(ctx, watchFunc, name, HasEnvVarPrefix(StatefulSetContainers, prefix), timeout) + return HandleWatchResult(err) +} + +// SupportsEnvVarStrategy returns true as StatefulSets support env var reload strategy. +func (a *StatefulSetAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as StatefulSets use standard rolling restart. +func (a *StatefulSetAdapter) RequiresSpecialHandling() bool { + return false +} + +// GetPodTemplateAnnotation returns the value of a pod template annotation. +func (a *StatefulSetAdapter) GetPodTemplateAnnotation(ctx context.Context, namespace, name, annotationKey string) (string, error) { + sts, err := a.client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return sts.Spec.Template.Annotations[annotationKey], nil +} + +// buildStatefulSetOptions converts WorkloadConfig to StatefulSetOption slice. +func buildStatefulSetOptions(cfg WorkloadConfig) []StatefulSetOption { + return []StatefulSetOption{ + func(sts *appsv1.StatefulSet) { + if len(cfg.Annotations) > 0 { + if sts.Annotations == nil { + sts.Annotations = make(map[string]string) + } + for k, v := range cfg.Annotations { + sts.Annotations[k] = v + } + } + ApplyWorkloadConfig(&sts.Spec.Template, cfg) + }, + } +} diff --git a/test/loadtest/internal/cmd/report.go b/test/loadtest/internal/cmd/report.go index 7bf4cc670..87e4e26e2 100644 --- a/test/loadtest/internal/cmd/report.go +++ b/test/loadtest/internal/cmd/report.go @@ -122,15 +122,15 @@ type ReportExpectedMetrics struct { // ScenarioReport represents the full report for a scenario. type ScenarioReport struct { - Scenario string `json:"scenario"` - Timestamp time.Time `json:"timestamp"` - Comparisons []MetricComparison `json:"comparisons"` - OverallStatus string `json:"overall_status"` - Summary string `json:"summary"` - PassCriteria []string `json:"pass_criteria"` - FailedCriteria []string `json:"failed_criteria"` + Scenario string `json:"scenario"` + Timestamp time.Time `json:"timestamp"` + Comparisons []MetricComparison `json:"comparisons"` + OverallStatus string `json:"overall_status"` + Summary string `json:"summary"` + PassCriteria []string `json:"pass_criteria"` + FailedCriteria []string `json:"failed_criteria"` Expected ReportExpectedMetrics `json:"expected"` - TestDescription string `json:"test_description"` + TestDescription string `json:"test_description"` } // MetricType defines how to evaluate a metric. diff --git a/test/loadtest/internal/cmd/run.go b/test/loadtest/internal/cmd/run.go index c78e57916..eb45a07ec 100644 --- a/test/loadtest/internal/cmd/run.go +++ b/test/loadtest/internal/cmd/run.go @@ -14,12 +14,13 @@ import ( "time" "github.com/spf13/cobra" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "github.com/stakater/Reloader/test/loadtest/internal/cluster" "github.com/stakater/Reloader/test/loadtest/internal/prometheus" "github.com/stakater/Reloader/test/loadtest/internal/reloader" "github.com/stakater/Reloader/test/loadtest/internal/scenarios" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) // RunConfig holds CLI configuration for the run command. @@ -645,4 +646,3 @@ func cleanupTestNamespaces(ctx context.Context, kubeContext string) { exec.CommandContext(ctx, "kubectl", args...).Run() } } -