diff --git a/bindata/observability/07-observability-operator.yaml b/bindata/observability/07-observability-operator.yaml
new file mode 100644
index 0000000000..0ba6f9df7b
--- /dev/null
+++ b/bindata/observability/07-observability-operator.yaml
@@ -0,0 +1,246 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: openshift-netobserv-operator
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: netobserv-operator-installer
+ namespace: openshift-netobserv-operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: netobserv-operator-installer
+rules:
+ # Permissions needed by OLM v1 to install the Network Observability operator
+ # Based on OLM v1 pre-authorization requirements from the operator bundle
+
+ # Core resources
+ - apiGroups: [""]
+ resources: ["endpoints", "nodes", "pods"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
+ - apiGroups: [""]
+ resources: ["serviceaccounts", "services", "configmaps"]
+ verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
+ - apiGroups: [""]
+ resources: ["secrets", "persistentvolumeclaims"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # Webhooks - validating webhook configurations
+ - apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations"]
+ verbs: ["create", "list", "watch"]
+ - apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations"]
+ resourceNames: ["flowcollectorconversionwebhook.netobserv.io", "flowmetricvalidationwebhook.netobserv.io"]
+ verbs: ["delete", "get", "patch", "update"]
+
+ # CRDs and API services
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "list", "watch", "update", "patch"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions/status"]
+ verbs: ["patch", "update"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ resourceNames: ["flowcollectors.flows.netobserv.io", "flowcollectorslices.flows.netobserv.io", "flowmetrics.flows.netobserv.io"]
+ verbs: ["delete"]
+ - apiGroups: ["apiregistration.k8s.io"]
+ resources: ["apiservices"]
+ verbs: ["get", "list", "watch"]
+
+ # Workloads
+ - apiGroups: ["apps"]
+ resources: ["deployments", "daemonsets"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["replicasets"]
+ verbs: ["get", "list", "watch"]
+
+ # Authentication and authorization
+ - apiGroups: ["authentication.k8s.io"]
+ resources: ["tokenreviews"]
+ verbs: ["create"]
+ - apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+
+ # Autoscaling
+ - apiGroups: ["autoscaling"]
+ resources: ["horizontalpodautoscalers"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # eBPF (bpfman.io)
+ - apiGroups: ["bpfman.io"]
+ resources: ["clusterbpfapplications"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+ - apiGroups: ["bpfman.io"]
+ resources: ["clusterbpfapplications/status"]
+ verbs: ["get", "patch", "update"]
+
+ # OpenShift config
+ - apiGroups: ["config.openshift.io"]
+ resources: ["clusterversions", "networks"]
+ verbs: ["get", "list", "watch"]
+
+ # Console plugin
+ - apiGroups: ["console.openshift.io"]
+ resources: ["consoleplugins"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # Coordination (leader election)
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # Discovery
+ - apiGroups: ["discovery.k8s.io"]
+ resources: ["endpointslices"]
+ verbs: ["get", "list", "watch"]
+
+ # FlowCollector CRs
+ - apiGroups: ["flows.netobserv.io"]
+ resources: ["flowcollectors", "flowcollectorslices", "flowmetrics"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+ - apiGroups: ["flows.netobserv.io"]
+ resources: ["flowcollectors/finalizers"]
+ verbs: ["update"]
+ - apiGroups: ["flows.netobserv.io"]
+ resources: ["flowcollectors/status", "flowcollectorslices/status", "flowmetrics/status"]
+ verbs: ["get", "patch", "update"]
+
+ # OVN-Kubernetes
+ - apiGroups: ["k8s.ovn.org"]
+ resources: ["clusteruserdefinednetworks", "userdefinednetworks"]
+ verbs: ["get", "list", "watch"]
+
+ # Loki integration
+ - apiGroups: ["loki.grafana.com"]
+ resources: ["lokistacks"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["loki.grafana.com"]
+ resources: ["network"]
+ resourceNames: ["logs"]
+ verbs: ["create", "get"]
+
+ # Metrics
+ - apiGroups: ["metrics.k8s.io"]
+ resources: ["pods"]
+ verbs: ["create"]
+
+ # Monitoring
+ - apiGroups: ["monitoring.coreos.com"]
+ resources: ["prometheusrules", "servicemonitors"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # Network policies
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+
+ # OLM ClusterExtension finalizers
+ - apiGroups: ["olm.operatorframework.io"]
+ resources: ["clusterextensions/finalizers"]
+ resourceNames: ["netobserv-operator"]
+ verbs: ["update"]
+
+ # OpenShift console and network config
+ - apiGroups: ["operator.openshift.io"]
+ resources: ["consoles"]
+ verbs: ["get", "list", "update", "watch"]
+ - apiGroups: ["operator.openshift.io"]
+ resources: ["networks"]
+ verbs: ["get", "list", "watch"]
+
+ # RBAC
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["clusterroles", "clusterrolebindings"]
+ verbs: ["create", "delete", "get", "list", "update", "watch", "patch"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles"]
+ verbs: ["list", "watch"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["rolebindings"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
+
+ # Security context constraints
+ - apiGroups: ["security.openshift.io"]
+ resources: ["securitycontextconstraints"]
+ verbs: ["create", "list", "update", "watch"]
+ - apiGroups: ["security.openshift.io"]
+ resources: ["securitycontextconstraints"]
+ resourceNames: ["hostnetwork"]
+ verbs: ["use"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: netobserv-operator-installer
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: netobserv-operator-installer
+subjects:
+ - kind: ServiceAccount
+ name: netobserv-operator-installer
+ namespace: openshift-netobserv-operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: netobserv-operator-installer
+ namespace: openshift-netobserv-operator
+rules:
+ # Namespace-scoped permissions needed for operator installation
+ # The operator needs to manage Roles and RoleBindings in its own namespace
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles"]
+ verbs: ["create"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles"]
+ resourceNames: ["netobserv-openshift-netobserv-operator-prometheus"]
+ verbs: ["delete", "get", "patch", "update"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["rolebindings"]
+ resourceNames: ["netobserv-openshift-netobserv-operator-prometheus"]
+ verbs: ["delete", "get", "patch", "update"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["rolebindings"]
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: netobserv-operator-installer
+ namespace: openshift-netobserv-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: netobserv-operator-installer
+subjects:
+ - kind: ServiceAccount
+ name: netobserv-operator-installer
+ namespace: openshift-netobserv-operator
+---
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterExtension
+metadata:
+ name: netobserv-operator
+spec:
+ namespace: openshift-netobserv-operator
+ serviceAccount:
+ name: netobserv-operator-installer
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName: netobserv-operator
+ channels: [stable]
diff --git a/bindata/observability/08-flowcollector.yaml b/bindata/observability/08-flowcollector.yaml
new file mode 100644
index 0000000000..d4f975b080
--- /dev/null
+++ b/bindata/observability/08-flowcollector.yaml
@@ -0,0 +1,15 @@
+apiVersion: flows.netobserv.io/v1beta2
+kind: FlowCollector
+metadata:
+ name: cluster
+spec:
+ agent:
+ ebpf:
+ features:
+ - DNSTracking
+ sampling: 400
+ type: eBPF
+ deploymentModel: Service
+ loki:
+ enable: false
+ namespace: openshift-network-observability
diff --git a/go.mod b/go.mod
index 12ea627c25..917847997e 100644
--- a/go.mod
+++ b/go.mod
@@ -161,3 +161,5 @@ require (
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect
)
+
+replace github.com/openshift/api v0.0.0-20260320151444-324a1bcb9f55 => github.com/OlivierCazade/api v0.0.0-20260324144412-012c4cdbbb5b
diff --git a/go.sum b/go.sum
index e0875a468f..a28f940c7a 100644
--- a/go.sum
+++ b/go.sum
@@ -12,6 +12,8 @@ github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/OlivierCazade/api v0.0.0-20260324144412-012c4cdbbb5b h1:gP0wnvRizYDHlzLptWxAL3BPC1p6iTkqvC9R43Lexn4=
+github.com/OlivierCazade/api v0.0.0-20260324144412-012c4cdbbb5b/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -209,8 +211,6 @@ github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
-github.com/openshift/api v0.0.0-20260320151444-324a1bcb9f55 h1:2h6bqs9ua3wrsQnxEbzys3/n5IohLC7Dyb/KgaVYC/A=
-github.com/openshift/api v0.0.0-20260320151444-324a1bcb9f55/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo=
github.com/openshift/build-machinery-go v0.0.0-20251023084048-5d77c1a5e5af h1:UiYYMi/CCV+kwWrXuXfuUSOY2yNXOpWpNVgHc6aLQlE=
github.com/openshift/build-machinery-go v0.0.0-20251023084048-5d77c1a5e5af/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE=
github.com/openshift/client-go v0.0.0-20260320040014-4b5fc2cdad98 h1:Ssuo/zELWqb7pFCwzB3QGEA4QeLW948hL2AhWq2SWjs=
diff --git a/manifests/0000_70_cluster-network-operator_02_rbac_observability.yaml b/manifests/0000_70_cluster-network-operator_02_rbac_observability.yaml
new file mode 100644
index 0000000000..f00b40f99e
--- /dev/null
+++ b/manifests/0000_70_cluster-network-operator_02_rbac_observability.yaml
@@ -0,0 +1,42 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cno-observability
+rules:
+ # Manage the netobserv namespace itself
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "create", "list"]
+
+ # Manage ServiceAccounts for ClusterExtension installer
+ - apiGroups: [""]
+ resources: ["serviceaccounts"]
+ verbs: ["get", "create", "list"]
+
+ # Manage OLM v1 resources for operator installation
+ - apiGroups: ["olm.operatorframework.io"]
+ resources: ["clusterextensions"]
+ verbs: ["get", "list", "create", "update", "patch"]
+
+ # Check for FlowCollector CRD to determine if operator is installed
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get"]
+
+ # Manage FlowCollector CRs
+ - apiGroups: ["flows.netobserv.io"]
+ resources: ["flowcollectors"]
+ verbs: ["get", "create", "update", "patch", "delete"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cno-observability
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cno-observability
+subjects:
+ - kind: ServiceAccount
+ name: cluster-network-operator
+ namespace: openshift-network-operator
diff --git a/pkg/controller/add_networkconfig.go b/pkg/controller/add_networkconfig.go
index 64c8b2399b..d183508295 100644
--- a/pkg/controller/add_networkconfig.go
+++ b/pkg/controller/add_networkconfig.go
@@ -8,6 +8,7 @@ import (
"github.com/openshift/cluster-network-operator/pkg/controller/egress_router"
"github.com/openshift/cluster-network-operator/pkg/controller/infrastructureconfig"
"github.com/openshift/cluster-network-operator/pkg/controller/ingressconfig"
+ "github.com/openshift/cluster-network-operator/pkg/controller/observability"
"github.com/openshift/cluster-network-operator/pkg/controller/operconfig"
"github.com/openshift/cluster-network-operator/pkg/controller/pki"
"github.com/openshift/cluster-network-operator/pkg/controller/proxyconfig"
@@ -28,5 +29,6 @@ func init() {
infrastructureconfig.Add,
allowlist.Add,
dashboards.Add,
+ observability.Add,
)
}
diff --git a/pkg/controller/observability/observability_controller.go b/pkg/controller/observability/observability_controller.go
new file mode 100644
index 0000000000..b12dc0d4e6
--- /dev/null
+++ b/pkg/controller/observability/observability_controller.go
@@ -0,0 +1,464 @@
+package observability
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ applyconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
+ cnoclient "github.com/openshift/cluster-network-operator/pkg/client"
+ "github.com/openshift/cluster-network-operator/pkg/controller/statusmanager"
+ "github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ applyconfigmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ crclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+const (
+ OperatorYAML = "bindata/observability/07-observability-operator.yaml"
+ FlowCollectorYAML = "bindata/observability/08-flowcollector.yaml"
+ NetObservNamespace = "openshift-network-observability"
+ OperatorNamespace = "openshift-netobserv-operator"
+ FlowCollectorVersion = "v1beta2"
+ FlowCollectorName = "cluster"
+
+ NetworkObservabilityDeployed = "NetworkObservabilityDeployed"
+
+ checkInterval = 10 * time.Second
+ checkTimeout = 10 * time.Minute
+ requeueAfterOLM = 5 * time.Minute // Requeue interval for OLM operations (install/wait)
+ requeueAfterStandard = 30 * time.Second // Requeue interval for standard operations
+)
+
+// Add creates a new controller. Referenced in add_networkconfig.go.
+func Add(mgr manager.Manager, status *statusmanager.StatusManager, cnoClient cnoclient.Client, featureGate featuregates.FeatureGate) error {
+ klog.Info("Add Network Observability Operator to manager")
+ configClient, err := configv1client.NewForConfig(cnoClient.Default().Config())
+ if err != nil {
+ return fmt.Errorf("failed to create config client: %w", err)
+ }
+ return add(mgr, newReconciler(mgr.GetClient(), status, configClient, featureGate))
+}
+
+func newReconciler(client crclient.Client, status *statusmanager.StatusManager, configClient configv1client.ConfigV1Interface, featureGate featuregates.FeatureGate) *ReconcileObservability {
+ return &ReconcileObservability{
+ client: client,
+ status: status,
+ configClient: configClient,
+ featureGate: featureGate,
+ }
+}
+
+func add(mgr manager.Manager, r *ReconcileObservability) error {
+ c, err := controller.New("observability-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+ return c.Watch(source.Kind(mgr.GetCache(), &configv1.Network{}, &handler.TypedEnqueueRequestForObject[*configv1.Network]{}))
+}
+
+var _ reconcile.Reconciler = &ReconcileObservability{}
+
+// StatusReporter is an interface for reporting status
+type StatusReporter interface {
+ SetDegraded(level statusmanager.StatusLevel, reason, message string)
+ SetNotDegraded(level statusmanager.StatusLevel)
+}
+
+type ReconcileObservability struct {
+ client crclient.Client
+ status StatusReporter
+ configClient configv1client.ConfigV1Interface
+ featureGate featuregates.FeatureGate
+}
+
+// Reconcile reacts to changes in Network CR
+func (r *ReconcileObservability) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ klog.Info("Reconcile Network Observability")
+
+ if req.Name != FlowCollectorName {
+ return ctrl.Result{}, nil // only reconcile the singleton Network object
+ }
+
+ // Check if NetworkObservabilityInstall feature gate is enabled
+ if !r.isFeatureGateEnabled() {
+ klog.V(4).Info("NetworkObservabilityInstall feature gate is disabled, skipping Network Observability management")
+ // Clear any degraded status
+ r.status.SetNotDegraded(statusmanager.ObservabilityConfig)
+ return ctrl.Result{}, nil
+ }
+
+ // Get Network CR information
+ var network configv1.Network
+ if err := r.client.Get(ctx, types.NamespacedName{Name: FlowCollectorName}, &network); err != nil {
+ return ctrl.Result{}, crclient.IgnoreNotFound(err)
+ }
+
+ // Check if Network Observability should be enabled
+ shouldInstall, err := r.shouldInstallNetworkObservability(ctx, &network)
+ if err != nil {
+ klog.Warningf("Failed to determine if Network Observability should be installed: %v. Will retry in %v.", err, requeueAfterStandard)
+ return ctrl.Result{RequeueAfter: requeueAfterStandard}, nil
+ }
+ if !shouldInstall {
+ r.status.SetNotDegraded(statusmanager.ObservabilityConfig)
+ return ctrl.Result{}, nil
+ }
+
+ // Proceed with installation/reinstallation
+ installed, err := r.isNetObservOperatorInstalled(ctx)
+ if err != nil {
+ klog.Warningf("Failed to check if Network Observability Operator is installed: %v. Will retry in %v.", err, requeueAfterStandard)
+ return ctrl.Result{RequeueAfter: requeueAfterStandard}, nil
+ }
+ if !installed {
+ // Install Network Observability Operator
+ if err := r.installNetObservOperator(ctx); err != nil {
+ klog.Warningf("Failed to install Network Observability Operator: %v. Will retry in %v.", err, requeueAfterOLM)
+ return ctrl.Result{RequeueAfter: requeueAfterOLM}, nil
+ }
+
+ // Wait for Network Observability Operator to be ready
+ klog.Info("Wait for Network Observability to be ready")
+ if err := r.waitForNetObservOperator(ctx); err != nil {
+ if err == context.DeadlineExceeded {
+ klog.Warningf("Timed out waiting for Network Observability Operator to be ready after %v. Will retry in %v.", checkTimeout, requeueAfterOLM)
+ } else {
+ klog.Warningf("Failed waiting for Network Observability Operator: %v. Will retry in %v.", err, requeueAfterOLM)
+ }
+ return ctrl.Result{RequeueAfter: requeueAfterOLM}, nil
+ }
+ }
+
+ // Check if FlowCollector already exists
+ flowCollectorExists, err := r.isFlowCollectorExists(ctx)
+ if err != nil {
+ klog.Warningf("Failed to check if FlowCollector exists: %v. Will retry in %v.", err, requeueAfterStandard)
+ return ctrl.Result{RequeueAfter: requeueAfterStandard}, nil
+ }
+
+ if !flowCollectorExists {
+ // Create FlowCollector
+ if err := r.createFlowCollector(ctx); err != nil {
+ klog.Warningf("Failed to create FlowCollector: %v. Will retry in %v.", err, requeueAfterStandard)
+ return ctrl.Result{RequeueAfter: requeueAfterStandard}, nil
+ }
+ klog.Info("FlowCollector created successfully")
+ }
+
+ // Mark as deployed to track deployment status
+ // Skip this if configClient is nil (e.g., in tests)
+ if r.configClient != nil {
+ if err := r.markNetworkObservabilityDeployed(ctx); err != nil {
+ klog.Warningf("Failed to mark Network Observability as deployed: %v. Will retry in %v.", err, requeueAfterStandard)
+ return ctrl.Result{RequeueAfter: requeueAfterStandard}, nil
+ }
+ }
+
+ klog.V(4).Info("Network Observability is deployed")
+ r.status.SetNotDegraded(statusmanager.ObservabilityConfig)
+ return ctrl.Result{}, nil
+}
+
+// isFeatureGateEnabled checks if the NetworkObservabilityInstall feature gate is enabled.
+// If featureGate is nil (e.g., in tests), returns false to default to disabled.
+// If the feature gate is not registered yet (older cluster versions), returns false.
+func (r *ReconcileObservability) isFeatureGateEnabled() bool {
+ if r.featureGate == nil {
+ return false // Default to disabled in tests
+ }
+
+ featureGateName := configv1.FeatureGateName("NetworkObservabilityInstall")
+
+ // Check if the feature gate is registered in the cluster's feature gate list
+ // to avoid panic when the feature gate doesn't exist yet
+ knownFeatures := r.featureGate.KnownFeatures()
+ for _, known := range knownFeatures {
+ if known == featureGateName {
+ return r.featureGate.Enabled(featureGateName)
+ }
+ }
+
+ // Feature gate not registered yet (older API version), default to disabled
+ klog.V(4).Info("NetworkObservabilityInstall feature gate is not registered yet, defaulting to disabled")
+ return false
+}
+
+// wasNetworkObservabilityDeployed checks if the NetworkObservabilityDeployed condition is set to True
+func (r *ReconcileObservability) wasNetworkObservabilityDeployed(ctx context.Context) (bool, error) {
+ network, err := r.configClient.Networks().Get(ctx, FlowCollectorName, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+
+ for _, condition := range network.Status.Conditions {
+ if condition.Type == NetworkObservabilityDeployed {
+ return condition.Status == metav1.ConditionTrue, nil
+ }
+ }
+
+ return false, nil
+}
+
+// markNetworkObservabilityDeployed sets the NetworkObservabilityDeployed condition to True
+func (r *ReconcileObservability) markNetworkObservabilityDeployed(ctx context.Context) error {
+ // Use server-side apply to set the condition
+ networkApplyConfig := applyconfigv1.Network(FlowCollectorName).
+ WithStatus(applyconfigv1.NetworkStatus().
+ WithConditions(applyconfigmetav1.Condition().
+ WithType(NetworkObservabilityDeployed).
+ WithStatus(metav1.ConditionTrue).
+ WithReason("DeploymentComplete").
+ WithMessage("Network Observability has been deployed").
+ WithLastTransitionTime(metav1.Now())))
+
+ _, err := r.configClient.Networks().ApplyStatus(ctx, networkApplyConfig, metav1.ApplyOptions{
+ FieldManager: "network-observability-controller",
+ Force: true,
+ })
+
+ if err != nil {
+ return fmt.Errorf("failed to mark Network Observability as deployed: %w", err)
+ }
+
+ klog.Info("Marked Network Observability as deployed")
+ return nil
+}
+
+// shouldInstallNetworkObservability returns true if Network Observability should be installed.
+// Valid values: "", "InstallAndEnable", "DoNotInstall"
+// "DoNotInstall": skip installation (user opted out)
+// "InstallAndEnable": install Network Observability (even on SNO clusters), always reinstall if missing
+// "" or nil: install Network Observability once (opt-out model), except for SNO clusters, do not reinstall if already deployed
+// SNO (Single Node OpenShift) clusters: skip installation by default unless explicitly set to "InstallAndEnable"
+func (r *ReconcileObservability) shouldInstallNetworkObservability(ctx context.Context, network *configv1.Network) (bool, error) {
+ // Check explicit value
+ if network.Spec.NetworkObservability.InstallationPolicy != nil {
+ value := *network.Spec.NetworkObservability.InstallationPolicy
+
+ // Explicit disable
+ if value == configv1.NetworkObservabilityDoNotInstall {
+ return false, nil
+ }
+
+ // Explicit enable - install regardless of topology, always reinstall if missing
+ if value == configv1.NetworkObservabilityInstallAndEnable {
+ return true, nil
+ }
+
+ // Empty string falls through to default behavior
+ }
+
+ // Default behavior (nil or ""): install once, do not reinstall
+ // Check if already deployed (skip this check if configClient is nil, e.g., in tests)
+ if r.configClient != nil {
+ deployed, err := r.wasNetworkObservabilityDeployed(ctx)
+ if err != nil {
+ return false, err
+ }
+ if deployed {
+ // Already deployed, do not reinstall
+ klog.V(4).Info("Network Observability already deployed (default policy), skipping reinstallation")
+ return false, nil
+ }
+ }
+
+ // Check if this is a SNO cluster
+ isSNO, err := r.isSingleNodeCluster(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ if isSNO {
+ // SNO clusters: don't install by default
+ return false, nil
+ }
+
+ // Non-SNO clusters: install by default (opt-out model)
+ return true, nil
+}
+
+// isSingleNodeCluster returns true if the cluster is a Single Node OpenShift (SNO) cluster.
+// A cluster is SNO if ControlPlaneTopology is SingleReplica.
+func (r *ReconcileObservability) isSingleNodeCluster(ctx context.Context) (bool, error) {
+ infra := &configv1.Infrastructure{}
+ if err := r.client.Get(ctx, types.NamespacedName{Name: "cluster"}, infra); err != nil {
+ return false, err
+ }
+
+ return infra.Status.ControlPlaneTopology == configv1.SingleReplicaTopologyMode, nil
+}
+
+// isNetObservOperatorInstalled returns true if the flowcollector CRD exists
+func (r *ReconcileObservability) isNetObservOperatorInstalled(ctx context.Context) (bool, error) {
+ // Check if the FlowCollector CRD exists to determine if the operator is installed
+ crd := &unstructured.Unstructured{}
+ crd.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "apiextensions.k8s.io",
+ Version: "v1",
+ Kind: "CustomResourceDefinition",
+ })
+
+ err := r.client.Get(ctx, types.NamespacedName{
+ Name: "flowcollectors.flows.netobserv.io",
+ }, crd)
+
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ return true, nil
+}
+
+// applyManifest reads a YAML file and applies all resources using server-side apply
+func (r *ReconcileObservability) applyManifest(ctx context.Context, yamlPath, description string) error {
+ yamlBytes, err := os.ReadFile(yamlPath)
+ if err != nil {
+ return fmt.Errorf("failed to read %s manifest %s: %w", description, yamlPath, err)
+ }
+
+ dec := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(yamlBytes), 4096)
+ for {
+ obj := &unstructured.Unstructured{}
+ if err := dec.Decode(obj); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ if obj.GetKind() == "" {
+ continue
+ }
+ obj.SetManagedFields(nil)
+
+ // Marshal object to JSON for RawPatch
+ data, err := obj.MarshalJSON()
+ if err != nil {
+ return fmt.Errorf("failed to marshal %s %s: %w", obj.GetKind(), obj.GetName(), err)
+ }
+
+ // Use RawPatch with ApplyPatchType to avoid deprecated crclient.Apply
+ patch := crclient.RawPatch(types.ApplyPatchType, data)
+ if err := r.client.Patch(ctx, obj, patch, &crclient.PatchOptions{
+ Force: ptr.To(true),
+ FieldManager: "cno-observability-controller",
+ }); err != nil {
+ return fmt.Errorf("failed to apply %s %s: %w", obj.GetKind(), obj.GetName(), err)
+ }
+ klog.Infof("Applied %s %s", description, obj.GetName())
+ }
+ klog.Infof("Successfully applied %s", description)
+ return nil
+}
+
+func (r *ReconcileObservability) installNetObservOperator(ctx context.Context) error {
+ return r.applyManifest(ctx, OperatorYAML, "Network Observability Operator")
+}
+
+func (r *ReconcileObservability) waitForNetObservOperator(ctx context.Context) error {
+ condition := func(ctx context.Context) (bool, error) {
+ // Get the ClusterExtension resource
+ clusterExtension := &unstructured.Unstructured{}
+ clusterExtension.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "olm.operatorframework.io",
+ Version: "v1",
+ Kind: "ClusterExtension",
+ })
+
+ if err := r.client.Get(ctx, types.NamespacedName{Name: "netobserv-operator"}, clusterExtension); err != nil {
+ if errors.IsNotFound(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ // Check the status conditions for "Installed" condition with status True
+ conditions, found, err := unstructured.NestedSlice(clusterExtension.Object, "status", "conditions")
+ if err != nil {
+ return false, err
+ }
+ if !found {
+ return false, nil
+ }
+
+ for _, cond := range conditions {
+ condMap, ok := cond.(map[string]interface{})
+ if !ok {
+ continue
+ }
+ condType, _, _ := unstructured.NestedString(condMap, "type")
+ condStatus, _, _ := unstructured.NestedString(condMap, "status")
+
+ // Check for "Installed" condition with status "True"
+ if condType == "Installed" && condStatus == "True" {
+ return true, nil
+ }
+ }
+
+ return false, nil
+ }
+ return wait.PollUntilContextTimeout(ctx, checkInterval, checkTimeout, true, condition)
+}
+
+// isFlowCollectorExists returns true if a FlowCollector instance exists.
+// Note: FlowCollector is a cluster-scoped singleton resource and can only be named "cluster".
+func (r *ReconcileObservability) isFlowCollectorExists(ctx context.Context) (bool, error) {
+ flowCollector := &unstructured.Unstructured{}
+ flowCollector.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "flows.netobserv.io",
+ Version: FlowCollectorVersion,
+ Kind: "FlowCollector",
+ })
+
+ err := r.client.Get(ctx, types.NamespacedName{Name: FlowCollectorName}, flowCollector)
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ return true, nil
+}
+
+func (r *ReconcileObservability) createFlowCollector(ctx context.Context) error {
+ // Ensure the netobserv namespace exists before applying manifests.
+ ns := &corev1.Namespace{}
+ if err := r.client.Get(ctx, types.NamespacedName{Name: NetObservNamespace}, ns); err != nil {
+ if errors.IsNotFound(err) {
+ if err := r.client.Create(ctx, &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{Name: NetObservNamespace},
+ }); err != nil {
+ return fmt.Errorf("failed to create namespace %s: %w", NetObservNamespace, err)
+ }
+ klog.Infof("Created namespace %s", NetObservNamespace)
+ } else {
+ return err
+ }
+ }
+
+ return r.applyManifest(ctx, FlowCollectorYAML, "FlowCollector")
+}
diff --git a/pkg/controller/observability/observability_controller_test.go b/pkg/controller/observability/observability_controller_test.go
new file mode 100644
index 0000000000..8a33900737
--- /dev/null
+++ b/pkg/controller/observability/observability_controller_test.go
@@ -0,0 +1,1441 @@
+package observability
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ . "github.com/onsi/gomega"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/cluster-network-operator/pkg/controller/statusmanager"
+ "github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+// Helper functions for creating test resources
+
+func createTestNetwork(name string, value string) *configv1.Network {
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+
+ if value != "" {
+ policy := configv1.NetworkObservabilityInstallationPolicy(value)
+ network.Spec.NetworkObservability = configv1.NetworkObservabilitySpec{
+ InstallationPolicy: &policy,
+ }
+ }
+
+ return network
+}
+
+func createTestFlowCollector(name string) *unstructured.Unstructured {
+ fc := &unstructured.Unstructured{}
+ fc.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "flows.netobserv.io",
+ Version: FlowCollectorVersion,
+ Kind: "FlowCollector",
+ })
+ fc.SetName(name)
+ return fc
+}
+
+func createTestNamespace(name string) *corev1.Namespace {
+ return &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+}
+
+func createTestInfrastructure(topology configv1.TopologyMode) *configv1.Infrastructure {
+ return &configv1.Infrastructure{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ Status: configv1.InfrastructureStatus{
+ ControlPlaneTopology: topology,
+ },
+ }
+}
+
+func createTestCRD(name string) *unstructured.Unstructured {
+ crd := &unstructured.Unstructured{}
+ crd.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "apiextensions.k8s.io",
+ Version: "v1",
+ Kind: "CustomResourceDefinition",
+ })
+ crd.SetName(name)
+ return crd
+}
+
+func createTestClusterExtension(name string, installed bool) *unstructured.Unstructured {
+ ce := &unstructured.Unstructured{}
+ ce.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "olm.operatorframework.io",
+ Version: "v1",
+ Kind: "ClusterExtension",
+ })
+ ce.SetName(name)
+
+ // Set status conditions
+ conditions := []interface{}{
+ map[string]interface{}{
+ "type": "Installed",
+ "status": func() string {
+ if installed {
+ return "True"
+ }
+ return "False"
+ }(),
+ "reason": "InstallSucceeded",
+ "message": "ClusterExtension installed successfully",
+ },
+ }
+ _ = unstructured.SetNestedSlice(ce.Object, conditions, "status", "conditions")
+ return ce
+}
+
+func createTempManifest(t *testing.T, content string) string {
+ t.Helper()
+ tmpDir := t.TempDir()
+ filePath := filepath.Join(tmpDir, "manifest.yaml")
+ err := os.WriteFile(filePath, []byte(content), 0644)
+ if err != nil {
+ t.Fatalf("Failed to create temp manifest: %v", err)
+ }
+ return filePath
+}
+
+// Mock status manager that implements the methods the controller needs
+type mockStatusManager struct {
+ mu sync.Mutex
+ degradedCalls []degradedCall
+ notDegradedCalls []statusmanager.StatusLevel
+}
+
+type degradedCall struct {
+ level statusmanager.StatusLevel
+ reason string
+ message string
+}
+
+func (m *mockStatusManager) SetDegraded(level statusmanager.StatusLevel, reason, message string) {
+ if m != nil {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.degradedCalls = append(m.degradedCalls, degradedCall{level, reason, message})
+ }
+}
+
+func (m *mockStatusManager) SetNotDegraded(level statusmanager.StatusLevel) {
+ if m != nil {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.notDegradedCalls = append(m.notDegradedCalls, level)
+ }
+}
+
+func newMockStatusManager() *mockStatusManager {
+ return &mockStatusManager{
+ degradedCalls: []degradedCall{},
+ notDegradedCalls: []statusmanager.StatusLevel{},
+ }
+}
+
+// Helper function to create a feature gate with NetworkObservabilityInstall enabled
+func createEnabledFeatureGate() featuregates.FeatureGate {
+ return featuregates.NewFeatureGate(
+ []configv1.FeatureGateName{"NetworkObservabilityInstall"},
+ []configv1.FeatureGateName{},
+ )
+}
+
+// Test shouldInstallNetworkObservability()
+
+func TestShouldInstallNetworkObservability_NilNonSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ // NetworkObservability not set: Default behavior should install on non-SNO
+ },
+ }
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeTrue())
+}
+
+func TestShouldInstallNetworkObservability_NilSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ // NetworkObservability not set: Default behavior should NOT install on SNO
+ },
+ }
+ infra := createTestInfrastructure(configv1.SingleReplicaTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeFalse())
+}
+
+func TestShouldInstallNetworkObservability_ExplicitInstallAndEnableNonSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ NetworkObservability: configv1.NetworkObservabilitySpec{
+ InstallationPolicy: ptr.To(configv1.NetworkObservabilityInstallAndEnable),
+ },
+ },
+ }
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeTrue())
+}
+
+func TestShouldInstallNetworkObservability_ExplicitInstallAndEnableSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ NetworkObservability: configv1.NetworkObservabilitySpec{
+ InstallationPolicy: ptr.To(configv1.NetworkObservabilityInstallAndEnable), // Explicit InstallAndEnable: install even on SNO
+ },
+ },
+ }
+ infra := createTestInfrastructure(configv1.SingleReplicaTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeTrue())
+}
+
+func TestShouldInstallNetworkObservability_ExplicitDoNotInstall(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ NetworkObservability: configv1.NetworkObservabilitySpec{
+ InstallationPolicy: ptr.To(configv1.NetworkObservabilityDoNotInstall),
+ },
+ },
+ }
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeFalse())
+}
+
+func TestShouldInstallNetworkObservability_EmptyStringNonSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ NetworkObservability: configv1.NetworkObservabilitySpec{
+ InstallationPolicy: ptr.To(configv1.NetworkObservabilityNoOpinion), // Empty string: default behavior (install on non-SNO)
+ },
+ },
+ }
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ result, err := r.shouldInstallNetworkObservability(context.TODO(), network)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(BeTrue())
+}
+
+// Test isSingleNodeCluster()
+
+func TestIsSingleNodeCluster_SNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ infra := createTestInfrastructure(configv1.SingleReplicaTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ isSNO, err := r.isSingleNodeCluster(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(isSNO).To(BeTrue())
+}
+
+func TestIsSingleNodeCluster_HighlyAvailable(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infra).Build()
+ r := &ReconcileObservability{client: client}
+
+ isSNO, err := r.isSingleNodeCluster(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(isSNO).To(BeFalse())
+}
+
+// Test Reconcile() - Main Controller Logic
+
+func TestReconcile_IgnoresNonClusterNetwork(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := createTestNetwork("not-cluster", "InstallAndEnable")
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(network).Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "not-cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+}
+
+func TestReconcile_SkipsWhenDisabled(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ // Explicitly set to false (opt-out)
+ network := createTestNetwork("cluster", "DoNotInstall")
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(network).Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+}
+
+func TestReconcile_InstallsWhenNil(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ // Create network with no NetworkObservability field (defaults to enabled on non-SNO)
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ Spec: configv1.NetworkSpec{
+ // NetworkObservability not set: defaults to enabled on non-SNO
+ },
+ }
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+ operatorNs := createTestNamespace(OperatorNamespace)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(network, infra, operatorNs).Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // When nil, controller should try to install (opt-out behavior)
+ // This will fail because the manifest doesn't exist, but it requeues instead of erroring
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterOLM))
+}
+
+func TestReconcile_SkipsInstallWhenNilOnSNO(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ // Create network with no NetworkObservability field on SNO cluster
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ Spec: configv1.NetworkSpec{
+ // NetworkObservability not set: defaults to disabled on SNO
+ },
+ }
+ infra := createTestInfrastructure(configv1.SingleReplicaTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(network, infra).Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // On SNO with nil, controller should skip installation
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+
+ // Verify that the operator namespace was NOT created
+ ns := &corev1.Namespace{}
+ nsErr := client.Get(context.TODO(), types.NamespacedName{Name: OperatorNamespace}, ns)
+ g.Expect(nsErr).To(HaveOccurred())
+ g.Expect(nsErr.Error()).To(ContainSubstring("not found"))
+}
+
+func TestReconcile_IgnoresNotFound(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+}
+
+// Test isNetObservOperatorInstalled()
+
+func TestIsNetObservOperatorInstalled_True(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(crd).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ installed, err := r.isNetObservOperatorInstalled(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(installed).To(BeTrue())
+}
+
+func TestIsNetObservOperatorInstalled_False(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ installed, err := r.isNetObservOperatorInstalled(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(installed).To(BeFalse())
+}
+
+func TestIsNetObservOperatorInstalled_Multiple(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ // Create multiple CRDs, but only the FlowCollector one should matter
+ crd1 := createTestCRD("other-crds.example.com")
+ crd2 := createTestCRD("flowcollectors.flows.netobserv.io")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(crd1, crd2).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ installed, err := r.isNetObservOperatorInstalled(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(installed).To(BeTrue())
+}
+
+// Test waitForNetObservOperator()
+
+func TestWaitForOperator_Success(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterExtension).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ err := r.waitForNetObservOperator(ctx)
+
+ g.Expect(err).NotTo(HaveOccurred())
+}
+
+func TestWaitForOperator_Timeout(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ // CSV exists but not in Succeeded phase
+ clusterExtension := createTestClusterExtension("netobserv-operator", false)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterExtension).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ err := r.waitForNetObservOperator(ctx)
+
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err).To(Equal(context.DeadlineExceeded))
+}
+
+func TestWaitForOperator_MissingStatus(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ // Create CSV without status phase
+ clusterExtension := createTestClusterExtension("netobserv-operator", false)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterExtension).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ err := r.waitForNetObservOperator(ctx)
+
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err).To(Equal(context.DeadlineExceeded))
+}
+
+// Test isFlowCollectorExists()
+
+func TestIsFlowCollectorExists_True(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(flowCollector).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ exists, err := r.isFlowCollectorExists(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(exists).To(BeTrue())
+}
+
+func TestIsFlowCollectorExists_False(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ exists, err := r.isFlowCollectorExists(context.TODO())
+
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(exists).To(BeFalse())
+}
+
+func TestIsFlowCollectorExists_OnlyChecksCluster(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ // Create a FlowCollector with a different name
+ fcOther := createTestFlowCollector("other")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(fcOther).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ exists, err := r.isFlowCollectorExists(context.TODO())
+
+ // Should return false because we only check for "cluster"
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(exists).To(BeFalse())
+}
+
+// Test createFlowCollector() - Note: Full testing requires real manifest files
+
+func TestCreateFlowCollector_ManifestNotFound(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ // Test with non-existent manifest by calling applyManifest directly
+ err := r.applyManifest(context.TODO(), "/non/existent/path.yaml", "test")
+
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err.Error()).To(ContainSubstring("failed to read"))
+}
+
+// Test installNetObservOperator()
+
+func TestInstallNetObservOperator_ManifestNotFound(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ // Test applyManifest with non-existent path directly
+ err := r.applyManifest(context.TODO(), "/non/existent/operator.yaml", "Network Observability Operator")
+
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err.Error()).To(ContainSubstring("failed to read"))
+}
+
+// Test applyManifest()
+
+func TestApplyManifest_SingleResource(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ manifestContent := `
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: test-namespace
+`
+ manifestPath := createTempManifest(t, manifestContent)
+
+ err := r.applyManifest(context.TODO(), manifestPath, "test resource")
+
+ g.Expect(err).NotTo(HaveOccurred())
+}
+
+func TestApplyManifest_MultipleResources(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ manifestContent := `
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: test-namespace-1
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: test-namespace-2
+`
+ manifestPath := createTempManifest(t, manifestContent)
+
+ err := r.applyManifest(context.TODO(), manifestPath, "test resources")
+
+ g.Expect(err).NotTo(HaveOccurred())
+}
+
+func TestApplyManifest_EmptyDocuments(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ manifestContent := `---
+---
+`
+ manifestPath := createTempManifest(t, manifestContent)
+
+ err := r.applyManifest(context.TODO(), manifestPath, "empty resources")
+
+ // Should not error on empty documents
+ g.Expect(err).NotTo(HaveOccurred())
+}
+
+func TestApplyManifest_InvalidYAML(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ r := &ReconcileObservability{client: client}
+
+ manifestContent := `
+invalid: yaml: content:
+ - broken
+ indentation
+`
+ manifestPath := createTempManifest(t, manifestContent)
+
+ err := r.applyManifest(context.TODO(), manifestPath, "invalid resource")
+
+ g.Expect(err).To(HaveOccurred())
+}
+
+// Integration Tests
+
+// TestReconcile_SkipsFlowCollectorWhenExists tests that reconciliation
+// doesn't try to create FlowCollector if it already exists
+func TestReconcile_SkipsFlowCollectorWhenExists(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension, flowCollector).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // Should complete without error
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+}
+
+// TestReconcile_SkipsInstallWhenExists tests that reconciliation
+// doesn't try to install operator if it already exists
+func TestReconcile_SkipsInstallWhenExists(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+ operatorNs := createTestNamespace(OperatorNamespace)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension, operatorNs).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Since operator is already installed, it should proceed to FlowCollector creation
+ // which will fail (manifest doesn't exist) but will requeue instead of erroring
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // We expect no error, just a requeue
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterStandard))
+}
+
+// Edge Case Tests
+
+// TestReconcile_MultipleInvocations tests that multiple reconciliations
+// handle idempotency correctly
+func TestReconcile_MultipleInvocations(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension, flowCollector).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // First reconciliation
+ result1, err1 := r.Reconcile(context.TODO(), req)
+ g.Expect(err1).NotTo(HaveOccurred())
+ g.Expect(result1).To(Equal(ctrl.Result{}))
+
+ // Second reconciliation should be idempotent
+ result2, err2 := r.Reconcile(context.TODO(), req)
+ g.Expect(err2).NotTo(HaveOccurred())
+ g.Expect(result2).To(Equal(ctrl.Result{}))
+
+ // Results should be the same
+ g.Expect(result1).To(Equal(result2))
+}
+
+// TestReconcile_OperatorNotReady tests reconciliation when operator exists
+// but is not ready yet
+func TestReconcile_OperatorNotReady(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ // CSV exists but not in Succeeded phase
+ clusterExtension := createTestClusterExtension("netobserv-operator", false)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ result, err := r.Reconcile(ctx, req)
+
+ // Controller returns no error, but should requeue after failing FlowCollector creation
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterStandard))
+}
+
+// TestReconcile_FlowCollectorDeleted tests that reconciliation recreates
+// FlowCollector if it gets deleted
+func TestReconcile_FlowCollectorDeleted(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ // Create network with the deployed condition set (simulating previous successful deployment)
+ // FlowCollector is NOT present (deleted)
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should skip everything since deployment condition is set
+ result, err := r.Reconcile(context.TODO(), req)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+
+ // Verify status was set to not degraded
+ g.Expect(len(mockStatus.notDegradedCalls)).To(Equal(1))
+ g.Expect(mockStatus.notDegradedCalls[0]).To(Equal(statusmanager.ObservabilityConfig))
+}
+
+// TestReconcile_OperatorDeleted tests that operator is not reinstalled after deletion if previously deployed
+func TestReconcile_OperatorDeleted(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ // Create network with the deployed condition set (simulating previous successful deployment)
+ // Operator subscription and CSV are NOT present (simulating deletion)
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, flowCollector).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should skip everything since deployment condition is set
+ result, err := r.Reconcile(context.TODO(), req)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+
+ // Verify status was set to not degraded
+ g.Expect(len(mockStatus.notDegradedCalls)).To(Equal(1))
+ g.Expect(mockStatus.notDegradedCalls[0]).To(Equal(statusmanager.ObservabilityConfig))
+}
+
+// TestReconcile_BothDeleted tests that nothing is reinstalled when both operator and FlowCollector are deleted
+func TestReconcile_BothDeleted(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ // Create network with the deployed condition set (simulating previous successful deployment)
+ // Neither operator nor FlowCollector are present (both deleted)
+ network := createTestNetwork("cluster", "InstallAndEnable")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should skip everything since deployment condition is set
+ result, err := r.Reconcile(context.TODO(), req)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result).To(Equal(ctrl.Result{}))
+
+ // Verify status was set to not degraded
+ g.Expect(len(mockStatus.notDegradedCalls)).To(Equal(1))
+ g.Expect(mockStatus.notDegradedCalls[0]).To(Equal(statusmanager.ObservabilityConfig))
+}
+
+// TestReconcile_NetworkCRUpdated tests that reconciliation handles
+// Network CR updates correctly
+func TestReconcile_NetworkCRUpdated(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ // Start with disabled
+ network := createTestNetwork("cluster", "DoNotInstall")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // First reconciliation - should skip
+ result1, err1 := r.Reconcile(context.TODO(), req)
+ g.Expect(err1).NotTo(HaveOccurred())
+ g.Expect(result1).To(Equal(ctrl.Result{}))
+
+ // Update Network CR to enable observability
+ network.Spec.NetworkObservability = configv1.NetworkObservabilitySpec{
+ InstallationPolicy: ptr.To(configv1.NetworkObservabilityInstallAndEnable),
+ }
+ err := client.Update(context.TODO(), network)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ // Second reconciliation - should now try to install
+ // This will fail because manifest doesn't exist, but will requeue instead of erroring
+ result2, err2 := r.Reconcile(context.TODO(), req)
+
+ // Should requeue, not error
+ g.Expect(err2).ToNot(HaveOccurred())
+ g.Expect(result2.RequeueAfter).To(Equal(requeueAfterOLM))
+}
+
+// Error Scenario Tests
+
+// TestReconcile_PartialFailure_OperatorInstallFails tests recovery
+// when operator installation fails
+func TestReconcile_PartialFailure_OperatorInstallFails(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should requeue when install fails (manifest doesn't exist)
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // Should requeue, not error
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterOLM))
+}
+
+// TestReconcile_RecoveryAfterOperatorBecomesReady tests that reconciliation
+// continues after operator becomes ready
+func TestReconcile_RecoveryAfterOperatorBecomesReady(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ // Start with CSV in Installing phase
+ clusterExtension := createTestClusterExtension("netobserv-operator", false)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // First reconciliation will fail creating FlowCollector (returns no error but RequeueAfter=30s)
+ ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel1()
+
+ result1, err1 := r.Reconcile(ctx1, req)
+ g.Expect(err1).NotTo(HaveOccurred())
+ g.Expect(result1.RequeueAfter).To(Equal(requeueAfterStandard))
+
+ // Update ClusterExtension to Installed status
+ conditions := []interface{}{
+ map[string]interface{}{
+ "type": "Installed",
+ "status": "True",
+ "reason": "InstallSucceeded",
+ "message": "ClusterExtension installed successfully",
+ },
+ }
+ _ = unstructured.SetNestedSlice(clusterExtension.Object, conditions, "status", "conditions")
+ err := client.Update(context.TODO(), clusterExtension)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ // Second reconciliation should proceed past operator wait
+ // and attempt to create FlowCollector (which will fail due to missing manifest)
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel2()
+
+ result2, err2 := r.Reconcile(ctx2, req)
+
+ // Should requeue after failing to read FlowCollector manifest
+ g.Expect(err2).ToNot(HaveOccurred())
+ g.Expect(result2.RequeueAfter).To(Equal(requeueAfterStandard))
+}
+
+// Performance/Stress Tests
+
+// TestReconcile_ConcurrentReconciliations tests that multiple concurrent
+// reconciliations don't cause issues
+func TestReconcile_ConcurrentReconciliations(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, crd, clusterExtension, flowCollector).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ r := &ReconcileObservability{
+ client: client,
+ status: newMockStatusManager(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Run 5 concurrent reconciliations
+ errChan := make(chan error, 5)
+ for i := 0; i < 5; i++ {
+ go func() {
+ _, err := r.Reconcile(context.TODO(), req)
+ errChan <- err
+ }()
+ }
+
+ // Wait for all to complete and collect errors
+ var unexpectedErrors []error
+ for i := 0; i < 5; i++ {
+ if err := <-errChan; err != nil {
+ // Filter out 409 conflict errors which are expected when multiple
+ // goroutines try to update the same resource status concurrently
+ if !errors.IsConflict(err) {
+ unexpectedErrors = append(unexpectedErrors, err)
+ }
+ }
+ }
+
+ // Assert no unexpected errors occurred (safe to do in main test goroutine)
+ g.Expect(unexpectedErrors).To(BeEmpty(), "All concurrent reconciliations should complete without unexpected errors")
+}
+
+// Status Manager Tests
+
+// TestReconcile_StatusDegradedOnError tests that status is set to degraded on errors
+func TestReconcile_StatusDegradedOnError(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, infra).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should fail trying to install operator (manifest doesn't exist)
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // Should not fail or set degraded status (errors are logged and requeued)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterOLM))
+
+ // Verify that status degraded was NOT called (optional feature shouldn't degrade operator)
+ g.Expect(len(mockStatus.degradedCalls)).To(Equal(0))
+}
+
+// TestReconcile_StatusNotDegradedOnSuccess tests that status is cleared on success
+func TestReconcile_StatusNotDegradedOnSuccess(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "InstallAndEnable")
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+ crd := createTestCRD("flowcollectors.flows.netobserv.io")
+ clusterExtension := createTestClusterExtension("netobserv-operator", true)
+ flowCollector := createTestFlowCollector(FlowCollectorName)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, infra, crd, clusterExtension, flowCollector).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should succeed (FlowCollector already exists)
+ _, err := r.Reconcile(context.TODO(), req)
+
+ g.Expect(err).NotTo(HaveOccurred())
+
+ // Verify that status not degraded was called
+ g.Expect(len(mockStatus.notDegradedCalls)).To(BeNumerically(">", 0))
+ g.Expect(mockStatus.notDegradedCalls[0]).To(Equal(statusmanager.ObservabilityConfig))
+}
+
+// TestReconcile_StatusNotDegradedWhenDisabled tests that status is cleared when disabled
+func TestReconcile_StatusNotDegradedWhenDisabled(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ network := createTestNetwork("cluster", "DoNotInstall") // disabled
+ infra := createTestInfrastructure(configv1.HighlyAvailableTopologyMode)
+
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network, infra).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should succeed and skip installation
+ _, err := r.Reconcile(context.TODO(), req)
+
+ g.Expect(err).NotTo(HaveOccurred())
+
+ // Verify that status not degraded was called (feature is disabled)
+ g.Expect(len(mockStatus.notDegradedCalls)).To(Equal(1))
+ g.Expect(mockStatus.notDegradedCalls[0]).To(Equal(statusmanager.ObservabilityConfig))
+}
+
+// TestReconcile_StatusDegradedOnInfrastructureError tests that Infrastructure lookup failures don't cause degraded status
+func TestReconcile_StatusDegradedOnInfrastructureError(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ scheme := runtime.NewScheme()
+ _ = configv1.AddToScheme(scheme)
+
+ // Create network with no NetworkObservability field (will trigger SNO check which needs Infrastructure)
+ network := &configv1.Network{
+ ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
+ Spec: configv1.NetworkSpec{
+ // NetworkObservability not set: will trigger SNO check
+ },
+ }
+
+ // Don't add Infrastructure object - this will cause Get to fail
+ client := fake.NewClientBuilder().WithScheme(scheme).
+ WithObjects(network).
+ WithStatusSubresource(&configv1.Network{}).
+ Build()
+
+ mockStatus := newMockStatusManager()
+ r := &ReconcileObservability{
+ client: client,
+ status: mockStatus,
+ featureGate: createEnabledFeatureGate(),
+ }
+
+ req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "cluster"}}
+
+ // Reconciliation should requeue when checking Infrastructure fails
+ result, err := r.Reconcile(context.TODO(), req)
+
+ // Should not fail or set degraded status (errors are logged and requeued)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(result.RequeueAfter).To(Equal(requeueAfterStandard))
+
+ // Verify that status degraded was NOT called (optional feature shouldn't degrade operator)
+ g.Expect(len(mockStatus.degradedCalls)).To(Equal(0))
+}
+
+func TestIsFeatureGateEnabled_NilFeatureGate(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ r := &ReconcileObservability{featureGate: nil}
+
+ // Should default to disabled when featureGate is nil
+ result := r.isFeatureGateEnabled()
+ g.Expect(result).To(BeFalse())
+}
+
+func TestIsFeatureGateEnabled_FeatureGateEnabled(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ // Create a feature gate with NetworkObservabilityInstall enabled
+ fg := featuregates.NewFeatureGate(
+ []configv1.FeatureGateName{"NetworkObservabilityInstall"},
+ []configv1.FeatureGateName{},
+ )
+
+ r := &ReconcileObservability{featureGate: fg}
+
+ result := r.isFeatureGateEnabled()
+ g.Expect(result).To(BeTrue())
+}
+
+func TestIsFeatureGateEnabled_FeatureGateDisabled(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ // Create a feature gate with NetworkObservabilityInstall disabled
+ fg := featuregates.NewFeatureGate(
+ []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{"NetworkObservabilityInstall"},
+ )
+
+ r := &ReconcileObservability{featureGate: fg}
+
+ result := r.isFeatureGateEnabled()
+ g.Expect(result).To(BeFalse())
+}
+
+func TestIsFeatureGateEnabled_FeatureGateNotRegistered(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ // Create a feature gate without NetworkObservabilityInstall registered
+ fg := featuregates.NewFeatureGate(
+ []configv1.FeatureGateName{"SomeOtherFeature"},
+ []configv1.FeatureGateName{},
+ )
+
+ r := &ReconcileObservability{featureGate: fg}
+
+ // Should default to disabled when feature gate is not registered
+ result := r.isFeatureGateEnabled()
+ g.Expect(result).To(BeFalse())
+}
diff --git a/pkg/controller/statusmanager/status_manager.go b/pkg/controller/statusmanager/status_manager.go
index 4e46e1a83c..57ad8cdaff 100644
--- a/pkg/controller/statusmanager/status_manager.go
+++ b/pkg/controller/statusmanager/status_manager.go
@@ -57,6 +57,7 @@ const (
CertificateSigner
InfrastructureConfig
DashboardConfig
+ ObservabilityConfig
maxStatusLevel
)
diff --git a/sample-config.yaml b/sample-config.yaml
index 5e2e2f18d1..ff960840b8 100644
--- a/sample-config.yaml
+++ b/sample-config.yaml
@@ -10,3 +10,10 @@ spec:
hostPrefix: 23
defaultNetwork:
type: OVNKubernetes
+ networkObservability:
+ # installationPolicy controls Network Observability installation during cluster deployment (day-0).
+ # Valid values: "", "InstallAndEnable", "DoNotInstall"
+ # Default (empty or omitted): enabled on multi-node clusters, disabled on SNO
+ # "InstallAndEnable": explicitly enable (even on SNO)
+ # "DoNotInstall": explicitly disable
+ installationPolicy: InstallAndEnable
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
index fb8ed2fff7..8697dfee4f 100644
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -86,6 +86,13 @@ type NetworkSpec struct {
//
// +optional
NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"`
+
+ // networkObservability is an optional field that configures network observability installation
+ // during cluster deployment (day-0).
+ // When omitted, network observability will be installed unless this is a SNO cluster.
+ // +openshift:enable:FeatureGate=NetworkObservabilityInstall
+ // +optional
+ NetworkObservability NetworkObservabilitySpec `json:"networkObservability,omitempty,omitzero"`
}
// NetworkStatus is the current network configuration.
@@ -304,3 +311,31 @@ type NetworkDiagnosticsTargetPlacement struct {
// +listType=atomic
Tolerations []corev1.Toleration `json:"tolerations"`
}
+
+// NetworkObservabilityInstallationPolicy is an enumeration of the available network observability installation policies
+// Valid values are "", "InstallAndEnable", "DoNotInstall".
+// +kubebuilder:validation:Enum="";InstallAndEnable;DoNotInstall
+type NetworkObservabilityInstallationPolicy string
+
+const (
+ // NetworkObservabilityNoOpinion means that the user has no opinion and the platform is left
+ // to choose reasonable defaults. The current default is to install and enable network observability.
+ // This is subject to change over time.
+ NetworkObservabilityNoOpinion NetworkObservabilityInstallationPolicy = ""
+ // NetworkObservabilityInstallAndEnable means that network observability should be installed and enabled during cluster deployment
+ NetworkObservabilityInstallAndEnable NetworkObservabilityInstallationPolicy = "InstallAndEnable"
+ // NetworkObservabilityDoNotInstall means that network observability should not be installed
+ NetworkObservabilityDoNotInstall NetworkObservabilityInstallationPolicy = "DoNotInstall"
+)
+
+// NetworkObservabilitySpec defines the configuration for network observability installation
+// +kubebuilder:validation:MinProperties=1
+type NetworkObservabilitySpec struct {
+ // installationPolicy controls whether network observability is installed during cluster deployment.
+ // Valid values are "", "InstallAndEnable" and "DoNotInstall".
+ // When set to "", network observability will be installed unless this is a SNO cluster.
+ // When set to "InstallAndEnable", network observability will be installed and enabled.
+ // When set to "DoNotInstall", network observability will not be installed.
+ // +optional
+ InstallationPolicy *NetworkObservabilityInstallationPolicy `json:"installationPolicy,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
index 30b85b78e9..388c714c67 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -4283,6 +4283,27 @@ func (in *NetworkMigration) DeepCopy() *NetworkMigration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkObservabilitySpec) DeepCopyInto(out *NetworkObservabilitySpec) {
+ *out = *in
+ if in.InstallationPolicy != nil {
+ in, out := &in.InstallationPolicy, &out.InstallationPolicy
+ *out = new(NetworkObservabilityInstallationPolicy)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservabilitySpec.
+func (in *NetworkObservabilitySpec) DeepCopy() *NetworkObservabilitySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkObservabilitySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
*out = *in
@@ -4302,6 +4323,7 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
(*in).DeepCopyInto(*out)
}
in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics)
+ in.NetworkObservability.DeepCopyInto(&out.NetworkObservability)
return
}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
index 4b768c3898..c9b0975f2a 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -445,7 +445,8 @@ networks.config.openshift.io:
CRDName: networks.config.openshift.io
Capability: ""
Category: ""
- FeatureGates: []
+ FeatureGates:
+ - NetworkObservabilityInstall
FilenameOperatorName: config-operator
FilenameOperatorOrdering: "01"
FilenameRunLevel: "0000_10"
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
index a30061c252..66705af5f2 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -2459,6 +2459,15 @@ func (NetworkMigration) SwaggerDoc() map[string]string {
return map_NetworkMigration
}
+var map_NetworkObservabilitySpec = map[string]string{
+ "": "NetworkObservabilitySpec defines the configuration for network observability installation",
+ "installationPolicy": "installationPolicy controls whether network observability is installed during cluster deployment. Valid values are \"\", \"InstallAndEnable\" and \"DoNotInstall\". When set to \"\", network observability will be installed unless this is a SNO cluster. When set to \"InstallAndEnable\", network observability will be installed and enabled. When set to \"DoNotInstall\", network observability will not be installed.",
+}
+
+func (NetworkObservabilitySpec) SwaggerDoc() map[string]string {
+ return map_NetworkObservabilitySpec
+}
+
var map_NetworkSpec = map[string]string{
"": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
"clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
@@ -2467,6 +2476,7 @@ var map_NetworkSpec = map[string]string{
"externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
"serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.",
"networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.",
+ "networkObservability": "networkObservability is an optional field that configures network observability installation during cluster deployment (day-0). When omitted, network observability will be installed unless this is a SNO cluster.",
}
func (NetworkSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md
index 1c9e3435c2..897b61a121 100644
--- a/vendor/github.com/openshift/api/features.md
+++ b/vendor/github.com/openshift/api/features.md
@@ -78,6 +78,7 @@
| MixedCPUsAllocation| | | Enabled | Enabled | | | Enabled | Enabled |
| MultiDiskSetup| | | Enabled | Enabled | | | Enabled | Enabled |
| MutatingAdmissionPolicy| | | Enabled | Enabled | | | Enabled | Enabled |
+| NetworkObservabilityInstall| | | Enabled | Enabled | | | Enabled | Enabled |
| NewOLM| | Enabled | | Enabled | | Enabled | | Enabled |
| NewOLMWebhookProviderOpenshiftServiceCA| | Enabled | | Enabled | | Enabled | | Enabled |
| NoOverlayMode| | | Enabled | Enabled | | | Enabled | Enabled |
diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go
index a9f4cda54e..f8aa26f38f 100644
--- a/vendor/github.com/openshift/api/features/features.go
+++ b/vendor/github.com/openshift/api/features/features.go
@@ -1020,4 +1020,12 @@ var (
enhancementPR("https://github.com/openshift/enhancements/pull/1910").
enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()).
mustRegister()
+
+ FeatureGateNetworkObservabilityInstall = newFeatureGate("NetworkObservabilityInstall").
+ reportProblemsToJiraComponent("netobserv").
+ contactPerson("jtakvori").
+ productScope(ocpSpecific).
+ enhancementPR("https://github.com/openshift/enhancements/pull/1908").
+ enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()).
+ mustRegister()
)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b00d1fd514..ed68970277 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -248,7 +248,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
github.com/onsi/gomega/types
-# github.com/openshift/api v0.0.0-20260320151444-324a1bcb9f55
+# github.com/openshift/api v0.0.0-20260320151444-324a1bcb9f55 => github.com/OlivierCazade/api v0.0.0-20260324144412-012c4cdbbb5b
## explicit; go 1.25.0
github.com/openshift/api
github.com/openshift/api/annotations
@@ -1677,12 +1677,15 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
# sigs.k8s.io/controller-runtime v0.23.1
## explicit; go 1.25.0
+sigs.k8s.io/controller-runtime
+sigs.k8s.io/controller-runtime/pkg/builder
sigs.k8s.io/controller-runtime/pkg/cache
sigs.k8s.io/controller-runtime/pkg/cache/internal
sigs.k8s.io/controller-runtime/pkg/certwatcher
sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics
sigs.k8s.io/controller-runtime/pkg/client
sigs.k8s.io/controller-runtime/pkg/client/apiutil
+sigs.k8s.io/controller-runtime/pkg/client/config
sigs.k8s.io/controller-runtime/pkg/client/fake
sigs.k8s.io/controller-runtime/pkg/client/interceptor
sigs.k8s.io/controller-runtime/pkg/cluster
@@ -1707,11 +1710,13 @@ sigs.k8s.io/controller-runtime/pkg/internal/syncs
sigs.k8s.io/controller-runtime/pkg/leaderelection
sigs.k8s.io/controller-runtime/pkg/log
sigs.k8s.io/controller-runtime/pkg/manager
+sigs.k8s.io/controller-runtime/pkg/manager/signals
sigs.k8s.io/controller-runtime/pkg/metrics
sigs.k8s.io/controller-runtime/pkg/metrics/server
sigs.k8s.io/controller-runtime/pkg/predicate
sigs.k8s.io/controller-runtime/pkg/reconcile
sigs.k8s.io/controller-runtime/pkg/recorder
+sigs.k8s.io/controller-runtime/pkg/scheme
sigs.k8s.io/controller-runtime/pkg/source
sigs.k8s.io/controller-runtime/pkg/webhook
sigs.k8s.io/controller-runtime/pkg/webhook/admission
diff --git a/vendor/sigs.k8s.io/controller-runtime/.gitignore b/vendor/sigs.k8s.io/controller-runtime/.gitignore
new file mode 100644
index 0000000000..2ddc5a8b87
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/.gitignore
@@ -0,0 +1,30 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# editor and IDE paraphernalia
+.idea
+*.swp
+*.swo
+*~
+
+# Vscode files
+.vscode
+
+# Tools binaries.
+hack/tools/bin
+
+# Release artifacts
+tools/setup-envtest/out
+
+junit-report.xml
+/artifacts
diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml
new file mode 100644
index 0000000000..5c86af65a3
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml
@@ -0,0 +1,209 @@
+version: "2"
+run:
+ go: "1.25"
+ timeout: 10m
+ allow-parallel-runners: true
+linters:
+ default: none
+ enable:
+ - asasalint
+ - asciicheck
+ - bidichk
+ - bodyclose
+ - copyloopvar
+ - depguard
+ - dogsled
+ - dupl
+ - errcheck
+ - errchkjson
+ - errorlint
+ - exhaustive
+ - forbidigo
+ - ginkgolinter
+ - goconst
+ - gocritic
+ - gocyclo
+ - godoclint
+ - goprintffuncname
+ - govet
+ - importas
+ - ineffassign
+ - iotamixing
+ - makezero
+ - misspell
+ - modernize
+ - nakedret
+ - nilerr
+ - nolintlint
+ - prealloc
+ - revive
+ - staticcheck
+ - tagliatelle
+ - unconvert
+ - unparam
+ - unused
+ - whitespace
+ settings:
+ depguard:
+ rules:
+ forbid-pkg-errors:
+ deny:
+ - pkg: sort
+ desc: Should be replaced with slices package
+ forbidigo:
+ forbid:
+ - pattern: context.Background
+ msg: Use ginkgos SpecContext or go testings t.Context instead
+ - pattern: context.TODO
+ msg: Use ginkgos SpecContext or go testings t.Context instead
+ govet:
+ disable:
+ - fieldalignment
+ - shadow
+ - buildtag
+ enable-all: true
+ importas:
+ alias:
+ - pkg: k8s.io/api/core/v1
+ alias: corev1
+ - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
+ alias: apiextensionsv1
+ - pkg: k8s.io/apimachinery/pkg/apis/meta/v1
+ alias: metav1
+ - pkg: k8s.io/apimachinery/pkg/api/errors
+ alias: apierrors
+ - pkg: k8s.io/apimachinery/pkg/util/errors
+ alias: kerrors
+ - pkg: sigs.k8s.io/controller-runtime
+ alias: ctrl
+ no-unaliased: true
+ modernize:
+ disable:
+ - omitzero
+ - fmtappendf
+ revive:
+ rules:
+ # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: if-return
+ - name: increment-decrement
+ - name: var-naming
+ - name: var-declaration
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: superfluous-else
+ - name: unreachable-code
+ - name: redefines-builtin-id
+ #
+ # Rules in addition to the recommended configuration above.
+ #
+ - name: bool-literal-in-expr
+ - name: constant-logical-expr
+ exclusions:
+ generated: strict
+ paths:
+ - zz_generated.*\.go$
+ - .*conversion.*\.go$
+ rules:
+ - linters:
+ - forbidigo
+ path-except: _test\.go
+ - linters:
+ - gosec
+ text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof'
+ - linters:
+ - revive
+ text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported'
+ - linters:
+ - errcheck
+ text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
+ - linters:
+ - staticcheck
+ text: 'SA1019: .*The component config package has been deprecated and will be removed in a future release.'
+ # With Go 1.16, the new embed directive can be used with an un-named import,
+ # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us.
+ # This directive allows the embed package to be imported with an underscore everywhere.
+ - linters:
+ - revive
+ source: _ "embed"
+ # Exclude some packages or code to require comments, for example test code, or fake clients.
+ - linters:
+ - revive
+ text: exported (method|function|type|const) (.+) should have comment or be unexported
+ source: (func|type).*Fake.*
+ - linters:
+ - revive
+ path: fake_\.go
+ text: exported (method|function|type|const) (.+) should have comment or be unexported
+ # Disable unparam "always receives" which might not be really
+ # useful when building libraries.
+ - linters:
+ - unparam
+ text: always receives
+ # Dot imports for gomega and ginkgo are allowed
+ # within test files.
+ - path: _test\.go
+ text: should not use dot imports
+ - path: _test\.go
+ text: cyclomatic complexity
+ - path: _test\.go
+ text: 'G107: Potential HTTP request made with variable url'
+ # Append should be able to assign to a different var/slice.
+ - linters:
+ - gocritic
+ text: 'appendAssign: append result not assigned to the same slice'
+ - linters:
+ - gocritic
+ text: 'singleCaseSwitch: should rewrite switch statement to if statement'
+ # It considers all file access to a filename that comes from a variable problematic,
+ # which is naiv at best.
+ - linters:
+ - gosec
+ text: 'G304: Potential file inclusion via variable'
+ - linters:
+ - dupl
+ path: _test\.go
+ - linters:
+ - revive
+ path: .*/internal/.*
+ - linters:
+ - unused
+ # Seems to incorrectly trigger on the two implementations that are only
+ # used through an interface and not directly..?
+ # Likely same issue as https://github.com/dominikh/go-tools/issues/1616
+ path: pkg/controller/priorityqueue/metrics\.go
+ # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time.
+ # If it is decided they will not be addressed they should be moved above this comment.
+ - path: (.+)\.go$
+ text: Subprocess launch(ed with variable|ing should be audited)
+ - linters:
+ - gosec
+ path: (.+)\.go$
+ text: (G204|G104|G307)
+ - linters:
+ - staticcheck
+ path: (.+)\.go$
+ text: (ST1000|QF1008)
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: strict
+ paths:
+ - zz_generated.*\.go$
+ - .*conversion.*\.go$
diff --git a/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml b/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml
new file mode 100644
index 0000000000..3eaff8dc47
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml
@@ -0,0 +1,21 @@
+upstreamRefs:
+ - k8s.io/api
+ - k8s.io/apiextensions-apiserver
+ - k8s.io/apimachinery
+ - k8s.io/apiserver
+ - k8s.io/client-go
+ - k8s.io/component-base
+ # k8s.io/klog/v2 -> conflicts with k/k deps
+ # k8s.io/utils -> conflicts with k/k deps
+
+excludedModules:
+ # Needs a newer version to fix https://github.com/kubernetes-sigs/controller-runtime/issues/3418
+ # This should not be needed by the time we update to 1.36
+ - sigs.k8s.io/structured-merge-diff/v6
+
+ # --- test dependencies:
+ - github.com/onsi/ginkgo/v2
+ - github.com/onsi/gomega
+
+ # --- We want a newer version with generics support for this
+ - github.com/google/btree
diff --git a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md
new file mode 100644
index 0000000000..2c0ea1f667
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+# Contributing guidelines
+
+## Sign the CLA
+
+Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests.
+
+Please see https://git.k8s.io/community/CLA.md for more info
+
+## Contributing steps
+
+1. Submit an issue describing your proposed change to the repo in question.
+1. The [repo owners](OWNERS) will respond to your issue promptly.
+1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above).
+1. Fork the desired repo, develop and test your code changes.
+1. Submit a pull request.
+
+## Test locally
+
+Run the command `make test` to test the changes locally.
diff --git a/vendor/sigs.k8s.io/controller-runtime/FAQ.md b/vendor/sigs.k8s.io/controller-runtime/FAQ.md
new file mode 100644
index 0000000000..9c36c8112e
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/FAQ.md
@@ -0,0 +1,81 @@
+# FAQ
+
+### Q: How do I know which type of object a controller references?
+
+**A**: Each controller should only reconcile one object type. Other
+affected objects should be mapped to a single type of root object, using
+the `handler.EnqueueRequestForOwner` or `handler.EnqueueRequestsFromMapFunc` event
+handlers, and potentially indices. Then, your Reconcile method should
+attempt to reconcile *all* state for that given root objects.
+
+### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)?
+
+**A**: You should not. Reconcile functions should be idempotent, and
+should always reconcile state by reading all the state it needs, then
+writing updates. This allows your reconciler to correctly respond to
+generic events, adjust to skipped or coalesced events, and easily deal
+with application startup. The controller will enqueue reconcile requests
+for both old and new objects if a mapping changes, but it's your
+responsibility to make sure you have enough information to be able clean
+up state that's no longer referenced.
+
+### Q: My cache might be stale if I read from a cache! How should I deal with that?
+
+**A**: There are several different approaches that can be taken, depending
+on your situation.
+
+- When you can, take advantage of optimistic locking: use deterministic
+ names for objects you create, so that the Kubernetes API server will
+ warn you if the object already exists. Many controllers in Kubernetes
+ take this approach: the StatefulSet controller appends a specific number
+ to each pod that it creates, while the Deployment controller hashes the
+ pod template spec and appends that.
+
+- In the few cases when you cannot take advantage of deterministic names
+ (e.g. when using generateName), it may be useful in to track which
+ actions you took, and assume that they need to be repeated if they don't
+ occur after a given time (e.g. using a requeue result). This is what
+ the ReplicaSet controller does.
+
+In general, write your controller with the assumption that information
+will eventually be correct, but may be slightly out of date. Make sure
+that your reconcile function enforces the entire state of the world each
+time it runs. If none of this works for you, you can always construct
+a client that reads directly from the API server, but this is generally
+considered to be a last resort, and the two approaches above should
+generally cover most circumstances.
+
+### Q: Where's the fake client? How do I use it?
+
+**A**: The fake client
+[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake),
+but we generally recommend using
+[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
+to test against a real API server. In our experience, tests using fake
+clients gradually re-implement poorly-written impressions of a real API
+server, which leads to hard-to-maintain, complex test code.
+
+### Q: How should I write tests? Any suggestions for getting started?
+
+- Use the aforementioned
+ [envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
+ to spin up a real API server instead of trying to mock one out.
+
+- Structure your tests to check that the state of the world is as you
+ expect it, *not* that a particular set of API calls were made, when
+ working with Kubernetes APIs. This will allow you to more easily
+ refactor and improve the internals of your controllers without changing
+ your tests.
+
+- Remember that any time you're interacting with the API server, changes
+ may have some delay between write time and reconcile time.
+
+### Q: What are these errors about no Kind being registered for a type?
+
+**A**: You're probably missing a fully-set-up Scheme. Schemes record the
+mapping between Go types and group-version-kinds in Kubernetes. In
+general, your application should have its own Scheme containing the types
+from the API groups that it needs (be they Kubernetes types or your own).
+See the [scheme builder
+docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for
+more information.
diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile
new file mode 100644
index 0000000000..1c1fb7f429
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/Makefile
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If you update this file, please follow
+# https://suva.sh/posts/well-documented-makefiles
+
+## --------------------------------------
+## General
+## --------------------------------------
+
+SHELL:=/usr/bin/env bash
+.DEFAULT_GOAL:=help
+
+#
+# Go.
+#
+GO_VERSION ?= 1.25.0
+
+# Use GOPROXY environment variable if set
+GOPROXY := $(shell go env GOPROXY)
+ifeq ($(GOPROXY),)
+GOPROXY := https://proxy.golang.org
+endif
+export GOPROXY
+
+# Active module mode, as we use go modules to manage dependencies
+export GO111MODULE=on
+
+# Hosts running SELinux need :z added to volume mounts
+SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
+
+ifeq ($(SELINUX_ENABLED),1)
+ DOCKER_VOL_OPTS?=:z
+endif
+
+# Tools.
+TOOLS_DIR := hack/tools
+TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/bin)
+GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint)
+GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff
+CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen
+ENVTEST_DIR := $(abspath tools/setup-envtest)
+SCRATCH_ENV_DIR := $(abspath examples/scratch-env)
+GO_INSTALL := ./hack/go-install.sh
+
+# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`.
+# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category.
+# More info over the usage of ANSI control characters for terminal formatting: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
+# More info over awk command: http://linuxcommand.org/lc3_adv_awk.php
+.PHONY: help
+help: ## Display this help
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+## --------------------------------------
+## Testing
+## --------------------------------------
+
+.PHONY: test
+test: ## Run the script check-everything.sh which will check all.
+ TRACE=1 ./hack/check-everything.sh
+
+## --------------------------------------
+## Binaries
+## --------------------------------------
+
+GO_APIDIFF_VER := v0.8.3
+GO_APIDIFF_BIN := go-apidiff
+GO_APIDIFF := $(abspath $(TOOLS_BIN_DIR)/$(GO_APIDIFF_BIN)-$(GO_APIDIFF_VER))
+GO_APIDIFF_PKG := github.com/joelanford/go-apidiff
+
+$(GO_APIDIFF): # Build go-apidiff from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GO_APIDIFF_PKG) $(GO_APIDIFF_BIN) $(GO_APIDIFF_VER)
+
+CONTROLLER_GEN_VER := v0.20.0
+CONTROLLER_GEN_BIN := controller-gen
+CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
+CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
+
+$(CONTROLLER_GEN): # Build controller-gen from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
+
+GOLANGCI_LINT_BIN := golangci-lint
+GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version: | sed 's/.*version: //')
+GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER))
+GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/v2/cmd/golangci-lint
+
+$(GOLANGCI_LINT): # Build golangci-lint from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER)
+
+GO_MOD_CHECK_DIR := $(abspath ./hack/tools/cmd/gomodcheck)
+GO_MOD_CHECK := $(abspath $(TOOLS_BIN_DIR)/gomodcheck)
+GO_MOD_CHECK_IGNORE := $(abspath .gomodcheck.yaml)
+.PHONY: $(GO_MOD_CHECK)
+$(GO_MOD_CHECK): # Build gomodcheck
+ go build -C $(GO_MOD_CHECK_DIR) -o $(GO_MOD_CHECK)
+
+## --------------------------------------
+## Linting
+## --------------------------------------
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT) ## Lint codebase
+ $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+ cd tools/setup-envtest; $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+
+.PHONY: lint-fix
+lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter.
+ GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint
+
+## --------------------------------------
+## Generate
+## --------------------------------------
+
+.PHONY: modules
+modules: ## Runs go mod to ensure modules are up to date.
+ go mod tidy
+ cd $(TOOLS_DIR); go mod tidy
+ cd $(ENVTEST_DIR); go mod tidy
+ cd $(SCRATCH_ENV_DIR); go mod tidy
+
+## --------------------------------------
+## Release
+## --------------------------------------
+
+RELEASE_DIR := tools/setup-envtest/out
+
+.PHONY: $(RELEASE_DIR)
+$(RELEASE_DIR):
+ mkdir -p $(RELEASE_DIR)/
+
+.PHONY: release
+release: clean-release $(RELEASE_DIR) ## Build release.
+ @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
+
+ # Build binaries first.
+ $(MAKE) release-binaries
+
+.PHONY: release-binaries
+release-binaries: ## Build release binaries.
+ RELEASE_BINARY=setup-envtest-linux-amd64 GOOS=linux GOARCH=amd64 $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-linux-arm64 GOOS=linux GOARCH=arm64 $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-linux-ppc64le GOOS=linux GOARCH=ppc64le $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-linux-s390x GOOS=linux GOARCH=s390x $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-darwin-amd64 GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-darwin-arm64 GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
+ RELEASE_BINARY=setup-envtest-windows-amd64.exe GOOS=windows GOARCH=amd64 $(MAKE) release-binary
+
+.PHONY: release-binary
+release-binary: $(RELEASE_DIR)
+ docker run \
+ --rm \
+ -e CGO_ENABLED=0 \
+ -e GOOS=$(GOOS) \
+ -e GOARCH=$(GOARCH) \
+ -e GOCACHE=/tmp/ \
+ --user $$(id -u):$$(id -g) \
+ -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
+ -w /workspace/tools/setup-envtest \
+ golang:$(GO_VERSION) \
+ go build -a -trimpath -ldflags "-X 'sigs.k8s.io/controller-runtime/tools/setup-envtest/version.version=$(RELEASE_TAG)' -extldflags '-static'" \
+ -o ./out/$(RELEASE_BINARY) ./
+
+## --------------------------------------
+## Cleanup / Verification
+## --------------------------------------
+
+.PHONY: clean
+clean: ## Cleanup.
+ $(GOLANGCI_LINT) cache clean
+ $(MAKE) clean-bin
+
+.PHONY: clean-bin
+clean-bin: ## Remove all generated binaries.
+ rm -rf hack/tools/bin
+
+.PHONY: clean-release
+clean-release: ## Remove the release folder
+ rm -rf $(RELEASE_DIR)
+
+.PHONY: verify-modules
+verify-modules: modules $(GO_MOD_CHECK) ## Verify go modules are up to date
+ @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(ENVTEST_DIR)/go.mod $(ENVTEST_DIR)/go.sum $(SCRATCH_ENV_DIR)/go.sum); then \
+ git diff; \
+ echo "go module files are out of date, please run 'make modules'"; exit 1; \
+ fi
+ $(GO_MOD_CHECK) $(GO_MOD_CHECK_IGNORE)
+
+APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main)
+
+.PHONY: apidiff
+verify-apidiff: $(GO_APIDIFF) ## Check for API differences
+ $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible
+
+## --------------------------------------
+## Helpers
+## --------------------------------------
+
+##@ helpers:
+
+go-version: ## Print the go version we use to compile our binaries and images
+ @echo $(GO_VERSION)
diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS b/vendor/sigs.k8s.io/controller-runtime/OWNERS
new file mode 100644
index 0000000000..9f2d296e4c
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+
+approvers:
+ - controller-runtime-admins
+ - controller-runtime-maintainers
+ - controller-runtime-approvers
+reviewers:
+ - controller-runtime-admins
+ - controller-runtime-maintainers
+ - controller-runtime-approvers
+ - controller-runtime-reviewers
diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
new file mode 100644
index 0000000000..47bf6eedf3
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
@@ -0,0 +1,39 @@
+# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+
+aliases:
+ # active folks who can be contacted to perform admin-related
+ # tasks on the repo, or otherwise approve any PRS.
+ controller-runtime-admins:
+ - alvaroaleman
+ - joelanford
+ - sbueringer
+ - vincepri
+
+ # non-admin folks who have write-access and can approve any PRs in the repo
+ controller-runtime-maintainers:
+ - alvaroaleman
+ - joelanford
+ - sbueringer
+ - vincepri
+
+ # non-admin folks who can approve any PRs in the repo
+ controller-runtime-approvers:
+ - fillzpp
+
+ # folks who can review and LGTM any PRs in the repo (doesn't
+ # include approvers & admins -- those count too via the OWNERS
+ # file)
+ controller-runtime-reviewers:
+ - varshaprasad96
+ - inteon
+ - JoelSpeed
+ - troy0820
+
+ # folks who may have context on ancient history,
+ # but are no longer directly involved
+ controller-runtime-emeritus-maintainers:
+ - directxman12
+ controller-runtime-emeritus-admins:
+ - droot
+ - mengqiy
+ - pwittrock
diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md
new file mode 100644
index 0000000000..8549f4e880
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/README.md
@@ -0,0 +1,86 @@
+[](https://goreportcard.com/report/sigs.k8s.io/controller-runtime)
+[](https://pkg.go.dev/sigs.k8s.io/controller-runtime)
+
+# Kubernetes controller-runtime Project
+
+The Kubernetes controller-runtime Project is a set of go libraries for building
+Controllers. It is leveraged by [Kubebuilder](https://book.kubebuilder.io/) and
+[Operator SDK](https://github.com/operator-framework/operator-sdk). Both are
+a great place to start for new projects. See
+[Kubebuilder's Quick Start](https://book.kubebuilder.io/quick-start.html) to
+see how it can be used.
+
+Documentation:
+
+- [Package overview](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg)
+- [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder)
+- [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New)
+- [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New)
+- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/main/examples)
+- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/main/designs)
+
+# Versioning, Maintenance, and Compatibility
+
+The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR:
+
+Users:
+
+- We stick to a zero major version
+- We publish a minor version for each Kubernetes minor release and allow breaking changes between minor versions
+- We publish patch versions as needed and we don't allow breaking changes in them
+
+Contributors:
+
+- All code PR must be labeled with :bug: (patch fixes), :sparkles: (backwards-compatible features), or :warning: (breaking changes)
+- Breaking changes will find their way into the next major release, other changes will go into an semi-immediate patch or minor release
+- For a quick PR template suggesting the right information, use one of these PR templates:
+ * [Breaking Changes/Features](/.github/PULL_REQUEST_TEMPLATE/breaking_change.md)
+ * [Backwards-Compatible Features](/.github/PULL_REQUEST_TEMPLATE/compat_feature.md)
+ * [Bug fixes](/.github/PULL_REQUEST_TEMPLATE/bug_fix.md)
+ * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md)
+ * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md)
+
+## Compatibility
+
+Every minor version of controller-runtime has been tested with a specific minor version of client-go. A controller-runtime minor version *may* be compatible with
+other client-go minor versions, but this is by chance and neither supported nor tested. In general, we create one minor version of controller-runtime
+for each minor version of client-go and other k8s.io/* dependencies.
+
+The minimum Go version of controller-runtime is the highest minimum Go version of our Go dependencies. Usually, this will
+be identical to the minimum Go version of the corresponding k8s.io/* dependencies.
+
+Compatible k8s.io/*, client-go and minimum Go versions can be looked up in our [go.mod](go.mod) file.
+
+| | k8s.io/*, client-go | minimum Go version |
+|----------|:-------------------:|:------------------:|
+| CR v0.22 | v0.34 | 1.24 |
+| CR v0.21 | v0.33 | 1.24 |
+| CR v0.20 | v0.32 | 1.23 |
+| CR v0.19 | v0.31 | 1.22 |
+| CR v0.18 | v0.30 | 1.22 |
+| CR v0.17 | v0.29 | 1.21 |
+| CR v0.16 | v0.28 | 1.20 |
+| CR v0.15 | v0.27 | 1.20 |
+
+## FAQ
+
+See [FAQ.md](FAQ.md)
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- Slack channel: [#controller-runtime](https://kubernetes.slack.com/archives/C02MRBMN00Z)
+- Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder)
+
+## Contributing
+
+Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers.
+The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details.
+Before starting any work, please either comment on an existing issue, or file a new one.
+
+## Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
diff --git a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md
new file mode 100644
index 0000000000..2a857b976e
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md
@@ -0,0 +1,51 @@
+# Release Process
+
+The Kubernetes controller-runtime Project is released on an as-needed basis. The process is as follows:
+
+**Note:** Releases are done from the `release-MAJOR.MINOR` branches. For PATCH releases is not required
+to create a new branch you will just need to ensure that all big fixes are cherry-picked into the respective
+`release-MAJOR.MINOR` branch. To know more about versioning check https://semver.org/.
+
+## How to do a release
+
+### Create the new branch and the release tag
+
+1. Create a new branch `git checkout -b release-` from main
+2. Push the new branch to the remote repository
+
+### Now, let's generate the changelog
+
+1. Create the changelog from the new branch `release-` (`git checkout release-`).
+You will need to use the [kubebuilder-release-tools][kubebuilder-release-tools] to generate the notes. See [here][release-notes-generation]
+
+> **Note**
+> - You will need to have checkout locally from the remote repository the previous branch
+> - Also, ensure that you fetch all tags from the remote `git fetch --all --tags`
+
+### Draft a new release from GitHub
+
+1. Create a new tag with the correct version from the new `release-` branch
+2. Add the changelog on it and publish. Now, the code source is released !
+
+### Add a new Prow test the for the new branch release
+
+1. Create a new prow test under [github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime)
+for the new `release-` branch. (i.e. for the `0.11.0` release see the PR: https://github.com/kubernetes/test-infra/pull/25205)
+2. Ping the infra PR in the controller-runtime slack channel for reviews.
+
+### Announce the new release:
+
+1. Publish on the Slack channel the new release, i.e:
+
+````
+:announce: Controller-Runtime v0.12.0 has been released!
+This release includes a Kubernetes dependency bump to v1.24.
+For more info, see the release page: https://github.com/kubernetes-sigs/controller-runtime/releases.
+ :tada: Thanks to all our contributors!
+````
+
+2. An announcement email is sent to `kubebuilder@googlegroups.com` with the subject `[ANNOUNCE] Controller-Runtime $VERSION is released`
+
+[kubebuilder-release-tools]: https://github.com/kubernetes-sigs/kubebuilder-release-tools
+[release-notes-generation]: https://github.com/kubernetes-sigs/kubebuilder-release-tools/blob/master/README.md#release-notes-generation
+[release-process]: https://github.com/kubernetes-sigs/kubebuilder/blob/master/VERSIONING.md#releasing
diff --git a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS
new file mode 100644
index 0000000000..9c5241c6b4
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS
@@ -0,0 +1,15 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+alvaroaleman
+sbueringer
+vincepri
diff --git a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md
new file mode 100644
index 0000000000..97e091fd48
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md
@@ -0,0 +1,169 @@
+Logging Guidelines
+==================
+
+controller-runtime uses a kind of logging called *structured logging*. If
+you've used a library like Zap or logrus before, you'll be familiar with
+the concepts we use. If you've only used a logging library like the "log"
+package (in the Go standard library) or "glog" (in Kubernetes), you'll
+need to adjust how you think about logging a bit.
+
+### Getting Started With Structured Logging
+
+With structured logging, we associate a *constant* log message with some
+variable key-value pairs. For instance, suppose we wanted to log that we
+were starting reconciliation on a pod. In the Go standard library logger,
+we might write:
+
+```go
+log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName)
+```
+
+In controller-runtime, we'd instead write:
+
+```go
+logger.Info("starting reconciliation", "pod", req.NamespacedName)
+```
+
+or even write
+
+```go
+func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) {
+ logger := logger.WithValues("pod", req.NamespacedName)
+ // do some stuff
+ logger.Info("starting reconciliation")
+}
+```
+
+Notice how we've broken out the information that we want to convey into
+a constant message (`"starting reconciliation"`) and some key-value pairs
+that convey variable information (`"pod", req.NamespacedName`). We've
+there-by added "structure" to our logs, which makes them easier to save
+and search later, as well as correlate with metrics and events.
+
+All of controller-runtime's logging is done via
+[logr](https://github.com/go-logr/logr), a generic interface for
+structured logging. You can use whichever logging library you want to
+implement the actual mechanics of the logging. controller-runtime
+provides some helpers to make it easy to use
+[Zap](https://go.uber.org/zap) as the implementation.
+
+You can configure the logging implementation using
+`"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That
+package also contains the convenience functions for setting up Zap.
+
+You can get a handle to the "root" logger using
+`"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call
+`WithName` to create individual named loggers. You can call `WithName`
+repeatedly to chain names together:
+
+```go
+logger := log.Log.WithName("controller").WithName("replicaset")
+// in reconcile...
+logger = logger.WithValues("replicaset", req.NamespacedName)
+// later on in reconcile...
+logger.Info("doing things with pods", "pod", newPod)
+```
+
+As seen above, you can also call `WithValue` to create a new sub-logger
+that always attaches some key-value pairs to a logger.
+
+Finally, you can use `V(1)` to mark a particular log line as "debug" logs:
+
+```go
+logger.V(1).Info("this is particularly verbose!", "state of the world",
+allKubernetesObjectsEverywhere)
+```
+
+While it's possible to use higher log levels, it's recommended that you
+stick with `V(1)` or `V(0)` (which is equivalent to not specifying `V`),
+and then filter later based on key-value pairs or messages; different
+numbers tend to lose meaning easily over time, and you'll be left
+wondering why particular logs lines are at `V(5)` instead of `V(7)`.
+
+## Logging errors
+
+Errors should *always* be logged with `log.Error`, which allows logr
+implementations to provide special handling of errors (for instance,
+providing stack traces in debug mode).
+
+It's acceptable to log call `log.Error` with a nil error object. This
+conveys that an error occurred in some capacity, but that no actual
+`error` object was involved.
+
+Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`.
+It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned.
+
+## Logging messages
+
+- Don't put variable content in your messages -- use key-value pairs for
+ that. Never use `fmt.Sprintf` in your message.
+
+- Try to match the terminology in your messages with your key-value pairs
+ -- for instance, if you have a key-value pairs `api version`, use the
+ term `APIVersion` instead of `GroupVersion` in your message.
+
+## Logging Kubernetes Objects
+
+Kubernetes objects should be logged directly, like `log.Info("this is
+a Kubernetes object", "pod", somePod)`. controller-runtime provides
+a special encoder for Zap that will transform Kubernetes objects into
+`name, namespace, apiVersion, kind` objects, when available and not in
+development mode. Other logr implementations should implement similar
+logic.
+
+## Logging Structured Values (Key-Value pairs)
+
+- Use lower-case, space separated keys. For example `object` for objects,
+ `api version` for `APIVersion`
+
+- Be consistent across your application, and with controller-runtime when
+ possible.
+
+- Try to be brief but descriptive.
+
+- Match terminology in keys with terminology in the message.
+
+- Be careful logging non-Kubernetes objects verbatim if they're very
+ large.
+
+### Groups, Versions, and Kinds
+
+- Kinds should not be logged alone (they're meaningless alone). Use
+ a `GroupKind` object to log them instead, or a `GroupVersionKind` when
+ version is relevant.
+
+- If you need to log an API version string, use `api version` as the key
+ (formatted as with a `GroupVersion`, or as received directly from API
+ discovery).
+
+### Objects and Types
+
+- If code works with a generic Kubernetes `runtime.Object`, use the
+ `object` key. For specific objects, prefer the resource name as the key
+ (e.g. `pod` for `v1.Pod` objects).
+
+- For non-Kubernetes objects, the `object` key may also be used, if you
+ accept a generic interface.
+
+- When logging a raw type, log it using the `type` key, with a value of
+ `fmt.Sprintf("%T", typ)`
+
+- If there's specific context around a type, the key may be more specific,
+ but should end with `type` -- for instance, `OwnerType` should be logged
+ as `owner` in the context of `log.Error(err, "Could not get ObjectKinds
+ for OwnerType", `owner type`, fmt.Sprintf("%T"))`. When possible, favor
+ communicating kind instead.
+
+### Multiple things
+
+- When logging multiple things, simply pluralize the key.
+
+### controller-runtime Specifics
+
+- Reconcile requests should be logged as `request`, although normal code
+ should favor logging the key.
+
+- Reconcile keys should be logged as with the same key as if you were
+ logging the object directly (e.g. `log.Info("reconciling pod", "pod",
+ req.NamespacedName)`). This ends up having a similar effect to logging
+ the object directly.
diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
new file mode 100644
index 0000000000..7ad6b142cc
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
@@ -0,0 +1,40 @@
+# Versioning and Branching in controller-runtime
+
+We follow the [common KubeBuilder versioning guidelines][guidelines], and
+use the corresponding tooling.
+
+For the purposes of the aforementioned guidelines, controller-runtime
+counts as a "library project", but otherwise follows the guidelines
+exactly.
+
+We stick to a major version of zero and create a minor version for
+each Kubernetes minor version and we allow breaking changes in our
+minor versions. We create patch releases as needed and don't allow
+breaking changes in them.
+
+Publishing a non-zero major version is pointless for us, as the k8s.io/*
+libraries we heavily depend on do breaking changes but use the same
+versioning scheme as described above. Consequently, a project can only
+ever depend on one controller-runtime version.
+
+[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md
+
+## Compatibility and Release Support
+
+For release branches, we generally tend to support backporting one (1)
+major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back
+further if the need arises and is very pressing (e.g. security updates).
+
+### Dependency Support
+
+Note the [guidelines on dependency versions][dep-versions]. Particularly:
+
+- We **DO** guarantee Kubernetes REST API compatibility -- if a given
+ version of controller-runtime stops working with what should be
+ a supported version of Kubernetes, this is almost certainly a bug.
+
+- We **DO NOT** guarantee any particular compatibility matrix between
+ kubernetes library dependencies (client-go, apimachinery, etc); Such
+ compatibility is infeasible due to the way those libraries are versioned.
+
+[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility
diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go
new file mode 100644
index 0000000000..e2ac45a5e0
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/alias.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllerruntime
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/builder"
+ "sigs.k8s.io/controller-runtime/pkg/client/config"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+// Builder builds an Application ControllerManagedBy (e.g. Operator) and returns a manager.Manager to start it.
+type Builder = builder.Builder
+
+// Request contains the information necessary to reconcile a Kubernetes object. This includes the
+// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about
+// any specific Event or the object contents itself.
+type Request = reconcile.Request
+
+// Result contains the result of a Reconciler invocation.
+type Result = reconcile.Result
+
+// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables.
+// A Manager is required to create Controllers.
+type Manager = manager.Manager
+
+// Options are the arguments for creating a new Manager.
+type Options = manager.Options
+
+// SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
+type SchemeBuilder = scheme.Builder
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+type GroupVersion = schema.GroupVersion
+
+// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types.
+type GroupResource = schema.GroupResource
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+type TypeMeta = metav1.TypeMeta
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+type ObjectMeta = metav1.ObjectMeta
+
+var (
+ // RegisterFlags registers flag variables to the given FlagSet if not already registered.
+ // It uses the default command line FlagSet, if none is provided. Currently, it only registers the kubeconfig flag.
+ RegisterFlags = config.RegisterFlags
+
+ // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver.
+ // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
+ // in cluster and use the cluster provided kubeconfig.
+ //
+ // The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and
+ // fairness. Set its QPS to a value equal or bigger than 0 to re-enable it.
+ //
+ // Will log an error and exit if there is an error creating the rest.Config.
+ GetConfigOrDie = config.GetConfigOrDie
+
+ // GetConfig creates a *rest.Config for talking to a Kubernetes apiserver.
+ // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
+ // in cluster and use the cluster provided kubeconfig.
+ //
+ // The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and
+ // fairness. Set its QPS to a value equal or bigger than 0 to re-enable it.
+ //
+ // Config precedence
+ //
+ // * --kubeconfig flag pointing at a file
+ //
+ // * KUBECONFIG environment variable pointing at a file
+ //
+ // * In-cluster config if running in cluster
+ //
+ // * $HOME/.kube/config if exists.
+ GetConfig = config.GetConfig
+
+ // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager.
+ NewControllerManagedBy = builder.ControllerManagedBy
+
+ // NewManager returns a new Manager for creating Controllers.
+ // Note that if ContentType in the given config is not set, "application/vnd.kubernetes.protobuf"
+ // will be used for all built-in resources of Kubernetes, and "application/json" is for other types
+ // including all CRD resources.
+ NewManager = manager.New
+
+ // CreateOrPatch creates or patches the given object obj in the Kubernetes
+ // cluster. The object's desired state should be reconciled with the existing
+ // state using the passed in ReconcileFn. obj must be a struct pointer so that
+ // obj can be patched with the content returned by the Server.
+ //
+ // It returns the executed operation and an error.
+ CreateOrPatch = controllerutil.CreateOrPatch
+
+ // CreateOrUpdate creates or updates the given object obj in the Kubernetes
+ // cluster. The object's desired state should be reconciled with the existing
+ // state using the passed in ReconcileFn. obj must be a struct pointer so that
+ // obj can be updated with the content returned by the Server.
+ //
+ // It returns the executed operation and an error.
+ CreateOrUpdate = controllerutil.CreateOrUpdate
+
+ // SetControllerReference sets owner as a Controller OwnerReference on owned.
+ // This is used for garbage collection of the owned object and for
+ // reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner).
+ // Since only one OwnerReference can be a controller, it returns an error if
+ // there is another OwnerReference with Controller flag set.
+ SetControllerReference = controllerutil.SetControllerReference
+
+ // SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned
+ // which is canceled on one of these signals. If a second signal is caught, the program
+ // is terminated with exit code 1.
+ SetupSignalHandler = signals.SetupSignalHandler
+
+ // Log is the base logger used by controller-runtime. It delegates
+ // to another logr.Logger. You *must* call SetLogger to
+ // get any actual logging.
+ Log = log.Log
+
+ // LoggerFrom returns a logger with predefined values from a context.Context.
+ // The logger, when used with controllers, can be expected to contain basic information about the object
+ // that's being reconciled like:
+ // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller.
+ // - `name` and `namespace` from the reconciliation request.
+ //
+ // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface.
+ LoggerFrom = log.FromContext
+
+ // LoggerInto takes a context and sets the logger as one of its keys.
+ //
+ // This is meant to be used in reconcilers to enrich the logger within a context with additional values.
+ LoggerInto = log.IntoContext
+
+ // SetLogger sets a concrete logging implementation for all deferred Loggers.
+ SetLogger = log.SetLogger
+)
+
+// NewWebhookManagedBy returns a new webhook builder for the provided type T.
+func NewWebhookManagedBy[T runtime.Object](mgr manager.Manager, obj T) *builder.WebhookBuilder[T] {
+ return builder.WebhookManagedBy(mgr, obj)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md
new file mode 100644
index 0000000000..0d15c00cf3
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go
new file mode 100644
index 0000000000..75d1d908c5
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/doc.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package controllerruntime provides tools to construct Kubernetes-style
+// controllers that manipulate both Kubernetes CRDs and aggregated/built-in
+// Kubernetes APIs.
+//
+// It defines easy helpers for the common use cases when building CRDs, built
+// on top of customizable layers of abstraction. Common cases should be easy,
+// and uncommon cases should be possible. In general, controller-runtime tries
+// to guide users towards Kubernetes controller best-practices.
+//
+// # Getting Started
+//
+// The main entrypoint for controller-runtime is this root package, which
+// contains all of the common types needed to get started building controllers:
+//
+// import (
+// ctrl "sigs.k8s.io/controller-runtime"
+// )
+//
+// The examples in this package walk through a basic controller setup. The
+// kubebuilder book (https://book.kubebuilder.io) has some more in-depth
+// walkthroughs.
+//
+// controller-runtime favors structs with sane defaults over constructors, so
+// it's fairly common to see structs being used directly in controller-runtime.
+//
+// # Organization
+//
+// A brief-ish walkthrough of the layout of this library can be found below. Each
+// package contains more information about how to use it.
+//
+// Frequently asked questions about using controller-runtime and designing
+// controllers can be found at
+// https://github.com/kubernetes-sigs/controller-runtime/blob/main/FAQ.md.
+//
+// # Managers
+//
+// Every controller and webhook is ultimately run by a Manager (pkg/manager). A
+// manager is responsible for running controllers and webhooks, and setting up
+// common dependencies, like shared caches and clients, as
+// well as managing leader election (pkg/leaderelection). Managers are
+// generally configured to gracefully shut down controllers on pod termination
+// by wiring up a signal handler (pkg/manager/signals).
+//
+// # Controllers
+//
+// Controllers (pkg/controller) use events (pkg/event) to eventually trigger
+// reconcile requests. They may be constructed manually, but are often
+// constructed with a Builder (pkg/builder), which eases the wiring of event
+// sources (pkg/source), like Kubernetes API object changes, to event handlers
+// (pkg/handler), like "enqueue a reconcile request for the object owner".
+// Predicates (pkg/predicate) can be used to filter which events actually
+// trigger reconciles. There are pre-written utilities for the common cases, and
+// interfaces and helpers for advanced cases.
+//
+// # Reconcilers
+//
+// Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A
+// Reconciler implements a function which takes a reconcile Request containing
+// the name and namespace of the object to reconcile, reconciles the object,
+// and returns a Response or an error indicating whether to requeue for a
+// second round of processing.
+//
+// # Clients and Caches
+//
+// Reconcilers use Clients (pkg/client) to access API objects. The default
+// client provided by the manager reads from a local shared cache (pkg/cache)
+// and writes directly to the API server, but clients can be constructed that
+// only talk to the API server, without a cache. The Cache will auto-populate
+// with watched objects, as well as when other structured objects are
+// requested. The default split client does not promise to invalidate the cache
+// during writes (nor does it promise sequential create/get coherence), and code
+// should not assume a get immediately following a create/update will return
+// the updated resource. Caches may also have indexes, which can be created via
+// a FieldIndexer (pkg/client) obtained from the manager. Indexes can be used to
+// quickly and easily look up all objects with certain fields set. Reconcilers
+// may retrieve event recorders (pkg/recorder) to emit events using the
+// manager.
+//
+// # Schemes
+//
+// Clients, Caches, and many other things in Kubernetes use Schemes
+// (pkg/scheme) to associate Go types to Kubernetes API Kinds
+// (Group-Version-Kinds, to be specific).
+//
+// # Webhooks
+//
+// Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but
+// are often constructed using a builder (pkg/webhook/admission/builder). They
+// are run via a server (pkg/webhook) which is managed by a Manager.
+//
+// # Logging and Metrics
+//
+// Logging (pkg/log) in controller-runtime is done via structured logs, using a
+// log set of interfaces called logr
+// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime
+// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap),
+// you can provide any implementation of logr as the base logger for
+// controller-runtime.
+//
+// Metrics (pkg/metrics) provided by controller-runtime are registered into a
+// controller-runtime-specific Prometheus metrics registry. The manager can
+// serve these by an HTTP endpoint, and additional metrics may be registered to
+// this Registry as normal.
+//
+// # Testing
+//
+// You can easily build integration and unit tests for your controllers and
+// webhooks using the test Environment (pkg/envtest). This will automatically
+// stand up a copy of etcd and kube-apiserver, and provide the correct options
+// to connect to the API server. It's designed to work well with the Ginkgo
+// testing framework, but should work with any testing setup.
+package controllerruntime
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go
new file mode 100644
index 0000000000..840e27b679
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go
@@ -0,0 +1,466 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builder
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/go-logr/logr"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/klog/v2"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// project represents other forms that we can use to
+// send/receive a given resource (metadata-only, unstructured, etc).
+type objectProjection int
+
+const (
+ // projectAsNormal doesn't change the object from the form given.
+ projectAsNormal objectProjection = iota
+ // projectAsMetadata turns this into a metadata-only watch.
+ projectAsMetadata
+)
+
+// Builder builds a Controller.
+type Builder = TypedBuilder[reconcile.Request]
+
+// TypedBuilder builds a Controller. The request is the request type
+// that is passed to the workqueue and then to the Reconciler.
+// The workqueue de-duplicates identical requests.
+type TypedBuilder[request comparable] struct {
+ forInput ForInput
+ ownsInput []OwnsInput
+ rawSources []source.TypedSource[request]
+ watchesInput []WatchesInput[request]
+ mgr manager.Manager
+ globalPredicates []predicate.Predicate
+ ctrl controller.TypedController[request]
+ ctrlOptions controller.TypedOptions[request]
+ name string
+ newController func(name string, mgr manager.Manager, options controller.TypedOptions[request]) (controller.TypedController[request], error)
+}
+
+// ControllerManagedBy returns a new controller builder that will be started by the provided Manager.
+func ControllerManagedBy(m manager.Manager) *Builder {
+ return TypedControllerManagedBy[reconcile.Request](m)
+}
+
+// TypedControllerManagedBy returns a new typed controller builder that will be started by the provided Manager.
+func TypedControllerManagedBy[request comparable](m manager.Manager) *TypedBuilder[request] {
+ return &TypedBuilder[request]{mgr: m}
+}
+
+// ForInput represents the information set by the For method.
+type ForInput struct {
+ object client.Object
+ predicates []predicate.Predicate
+ objectProjection objectProjection
+ err error
+}
+
+// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete /
+// update events by *reconciling the object*.
+//
+// This is the equivalent of calling
+// Watches(source.Kind(cache, &Type{}, &handler.EnqueueRequestForObject{})).
+func (blder *TypedBuilder[request]) For(object client.Object, opts ...ForOption) *TypedBuilder[request] {
+ if blder.forInput.object != nil {
+ blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation")
+ return blder
+ }
+ input := ForInput{object: object}
+ for _, opt := range opts {
+ opt.ApplyToFor(&input)
+ }
+
+ blder.forInput = input
+ return blder
+}
+
+// OwnsInput represents the information set by Owns method.
+type OwnsInput struct {
+ matchEveryOwner bool
+ object client.Object
+ predicates []predicate.Predicate
+ objectProjection objectProjection
+}
+
+// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to
+// create / delete / update events by *reconciling the owner object*.
+//
+// The default behavior reconciles only the first controller-type OwnerReference of the given type.
+// Use Owns(object, builder.MatchEveryOwner) to reconcile all owners.
+//
+// By default, this is the equivalent of calling
+// Watches(source.Kind(cache, &Type{}, handler.EnqueueRequestForOwner([...], &OwnerType{}, OnlyControllerOwner()))).
+func (blder *TypedBuilder[request]) Owns(object client.Object, opts ...OwnsOption) *TypedBuilder[request] {
+ input := OwnsInput{object: object}
+ for _, opt := range opts {
+ opt.ApplyToOwns(&input)
+ }
+
+ blder.ownsInput = append(blder.ownsInput, input)
+ return blder
+}
+
+type untypedWatchesInput interface {
+ setPredicates([]predicate.Predicate)
+ setObjectProjection(objectProjection)
+}
+
+// WatchesInput represents the information set by Watches method.
+type WatchesInput[request comparable] struct {
+ obj client.Object
+ handler handler.TypedEventHandler[client.Object, request]
+ predicates []predicate.Predicate
+ objectProjection objectProjection
+}
+
+func (w *WatchesInput[request]) setPredicates(predicates []predicate.Predicate) {
+ w.predicates = predicates
+}
+
+func (w *WatchesInput[request]) setObjectProjection(objectProjection objectProjection) {
+ w.objectProjection = objectProjection
+}
+
+// Watches defines the type of Object to watch, and configures the ControllerManagedBy to respond to create / delete /
+// update events by *reconciling the object* with the given EventHandler.
+//
+// This is the equivalent of calling
+// WatchesRawSource(source.Kind(cache, object, eventHandler, predicates...)).
+func (blder *TypedBuilder[request]) Watches(
+ object client.Object,
+ eventHandler handler.TypedEventHandler[client.Object, request],
+ opts ...WatchesOption,
+) *TypedBuilder[request] {
+ input := WatchesInput[request]{
+ obj: object,
+ handler: eventHandler,
+ }
+ for _, opt := range opts {
+ opt.ApplyToWatches(&input)
+ }
+
+ blder.watchesInput = append(blder.watchesInput, input)
+
+ return blder
+}
+
+// WatchesMetadata is the same as Watches, but forces the internal cache to only watch PartialObjectMetadata.
+//
+// This is useful when watching lots of objects, really big objects, or objects for which you only know
+// the GVK, but not the structure. You'll need to pass metav1.PartialObjectMetadata to the client
+// when fetching objects in your reconciler, otherwise you'll end up with a duplicate structured or unstructured cache.
+//
+// When watching a resource with metadata only, for example the v1.Pod, you should not Get and List using the v1.Pod type.
+// Instead, you should use the special metav1.PartialObjectMetadata type.
+//
+// ❌ Incorrect:
+//
+// pod := &v1.Pod{}
+// mgr.GetClient().Get(ctx, nsAndName, pod)
+//
+// ✅ Correct:
+//
+// pod := &metav1.PartialObjectMetadata{}
+// pod.SetGroupVersionKind(schema.GroupVersionKind{
+// Group: "",
+// Version: "v1",
+// Kind: "Pod",
+// })
+// mgr.GetClient().Get(ctx, nsAndName, pod)
+//
+// In the first case, controller-runtime will create another cache for the
+// concrete type on top of the metadata cache; this increases memory
+// consumption and leads to race conditions as caches are not in sync.
+func (blder *TypedBuilder[request]) WatchesMetadata(
+ object client.Object,
+ eventHandler handler.TypedEventHandler[client.Object, request],
+ opts ...WatchesOption,
+) *TypedBuilder[request] {
+ opts = append(opts, OnlyMetadata)
+ return blder.Watches(object, eventHandler, opts...)
+}
+
+// WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder.
+//
+// WatchesRawSource does not respect predicates configured through WithEventFilter.
+//
+// WatchesRawSource makes it possible to use typed handlers and predicates with `source.Kind` as well as custom source implementations.
+func (blder *TypedBuilder[request]) WatchesRawSource(src source.TypedSource[request]) *TypedBuilder[request] {
+ blder.rawSources = append(blder.rawSources, src)
+
+ return blder
+}
+
+// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually
+// trigger reconciliations. For example, filtering on whether the resource version has changed.
+// Given predicate is added for all watched objects and thus must be able to deal with the type
+// of all watched objects.
+//
+// Defaults to the empty list.
+func (blder *TypedBuilder[request]) WithEventFilter(p predicate.Predicate) *TypedBuilder[request] {
+ blder.globalPredicates = append(blder.globalPredicates, p)
+ return blder
+}
+
+// WithOptions overrides the controller options used in doController. Defaults to empty.
+func (blder *TypedBuilder[request]) WithOptions(options controller.TypedOptions[request]) *TypedBuilder[request] {
+ blder.ctrlOptions = options
+ return blder
+}
+
+// WithLogConstructor overrides the controller options's LogConstructor.
+func (blder *TypedBuilder[request]) WithLogConstructor(logConstructor func(*request) logr.Logger) *TypedBuilder[request] {
+ blder.ctrlOptions.LogConstructor = logConstructor
+ return blder
+}
+
+// Named sets the name of the controller to the given name. The name shows up
+// in metrics, among other things, and thus should be a prometheus compatible name
+// (underscores and alphanumeric characters only).
+//
+// By default, controllers are named using the lowercase version of their kind.
+//
+// The name must be unique as it is used to identify the controller in metrics and logs.
+func (blder *TypedBuilder[request]) Named(name string) *TypedBuilder[request] {
+ blder.name = name
+ return blder
+}
+
+// Complete builds the Application Controller.
+func (blder *TypedBuilder[request]) Complete(r reconcile.TypedReconciler[request]) error {
+ _, err := blder.Build(r)
+ return err
+}
+
+// Build builds the Application Controller and returns the Controller it created.
+func (blder *TypedBuilder[request]) Build(r reconcile.TypedReconciler[request]) (controller.TypedController[request], error) {
+ if r == nil {
+ return nil, fmt.Errorf("must provide a non-nil Reconciler")
+ }
+ if blder.mgr == nil {
+ return nil, fmt.Errorf("must provide a non-nil Manager")
+ }
+ if blder.forInput.err != nil {
+ return nil, blder.forInput.err
+ }
+
+ // Set the ControllerManagedBy
+ if err := blder.doController(r); err != nil {
+ return nil, err
+ }
+
+ // Set the Watch
+ if err := blder.doWatch(); err != nil {
+ return nil, err
+ }
+
+ return blder.ctrl, nil
+}
+
+func (blder *TypedBuilder[request]) project(obj client.Object, proj objectProjection) (client.Object, error) {
+ switch proj {
+ case projectAsNormal:
+ return obj, nil
+ case projectAsMetadata:
+ metaObj := &metav1.PartialObjectMetadata{}
+ gvk, err := apiutil.GVKForObject(obj, blder.mgr.GetScheme())
+ if err != nil {
+ return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err)
+ }
+ metaObj.SetGroupVersionKind(gvk)
+ return metaObj, nil
+ default:
+ panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj))
+ }
+}
+
+func (blder *TypedBuilder[request]) doWatch() error {
+ // Reconcile type
+ if blder.forInput.object != nil {
+ obj, err := blder.project(blder.forInput.object, blder.forInput.objectProjection)
+ if err != nil {
+ return err
+ }
+
+ if reflect.TypeFor[request]() != reflect.TypeFor[reconcile.Request]() {
+ return fmt.Errorf("For() can only be used with reconcile.Request, got %T", *new(request))
+ }
+
+ var hdler handler.TypedEventHandler[client.Object, request]
+ reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(&handler.EnqueueRequestForObject{}))
+ allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...)
+ allPredicates = append(allPredicates, blder.forInput.predicates...)
+ src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...)
+ if err := blder.ctrl.Watch(src); err != nil {
+ return err
+ }
+ }
+
+ // Watches the managed types
+ if len(blder.ownsInput) > 0 && blder.forInput.object == nil {
+ return errors.New("Owns() can only be used together with For()")
+ }
+ for _, own := range blder.ownsInput {
+ obj, err := blder.project(own.object, own.objectProjection)
+ if err != nil {
+ return err
+ }
+ opts := []handler.OwnerOption{}
+ if !own.matchEveryOwner {
+ opts = append(opts, handler.OnlyControllerOwner())
+ }
+
+ var hdler handler.TypedEventHandler[client.Object, request]
+ reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(handler.EnqueueRequestForOwner(
+ blder.mgr.GetScheme(), blder.mgr.GetRESTMapper(),
+ blder.forInput.object,
+ opts...,
+ )))
+ allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...)
+ allPredicates = append(allPredicates, own.predicates...)
+ src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...)
+ if err := blder.ctrl.Watch(src); err != nil {
+ return err
+ }
+ }
+
+ // Do the watch requests
+ if len(blder.watchesInput) == 0 && blder.forInput.object == nil && len(blder.rawSources) == 0 {
+ return errors.New("there are no watches configured, controller will never get triggered. Use For(), Owns(), Watches() or WatchesRawSource() to set them up")
+ }
+ for _, w := range blder.watchesInput {
+ projected, err := blder.project(w.obj, w.objectProjection)
+ if err != nil {
+ return fmt.Errorf("failed to project for %T: %w", w.obj, err)
+ }
+ allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...)
+ allPredicates = append(allPredicates, w.predicates...)
+ if err := blder.ctrl.Watch(source.TypedKind(blder.mgr.GetCache(), projected, w.handler, allPredicates...)); err != nil {
+ return err
+ }
+ }
+ for _, src := range blder.rawSources {
+ if err := blder.ctrl.Watch(src); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (blder *TypedBuilder[request]) getControllerName(gvk schema.GroupVersionKind, hasGVK bool) (string, error) {
+ if blder.name != "" {
+ return blder.name, nil
+ }
+ if !hasGVK {
+ return "", errors.New("one of For() or Named() must be called")
+ }
+ return strings.ToLower(gvk.Kind), nil
+}
+
+func (blder *TypedBuilder[request]) doController(r reconcile.TypedReconciler[request]) error {
+ globalOpts := blder.mgr.GetControllerOptions()
+
+ ctrlOptions := blder.ctrlOptions
+ if ctrlOptions.Reconciler != nil && r != nil {
+ return errors.New("reconciler was set via WithOptions() and via Build() or Complete()")
+ }
+ if ctrlOptions.Reconciler == nil {
+ ctrlOptions.Reconciler = r
+ }
+
+ // Retrieve the GVK from the object we're reconciling
+ // to pre-populate logger information, and to optionally generate a default name.
+ var gvk schema.GroupVersionKind
+ hasGVK := blder.forInput.object != nil
+ if hasGVK {
+ var err error
+ gvk, err = apiutil.GVKForObject(blder.forInput.object, blder.mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+ }
+
+ // Setup concurrency.
+ if ctrlOptions.MaxConcurrentReconciles == 0 && hasGVK {
+ groupKind := gvk.GroupKind().String()
+
+ if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 {
+ ctrlOptions.MaxConcurrentReconciles = concurrency
+ }
+ }
+
+ // Setup cache sync timeout.
+ if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout > 0 {
+ ctrlOptions.CacheSyncTimeout = globalOpts.CacheSyncTimeout
+ }
+
+ controllerName, err := blder.getControllerName(gvk, hasGVK)
+ if err != nil {
+ return err
+ }
+
+ // Setup the logger.
+ if ctrlOptions.LogConstructor == nil {
+ log := blder.mgr.GetLogger().WithValues(
+ "controller", controllerName,
+ )
+ if hasGVK {
+ log = log.WithValues(
+ "controllerGroup", gvk.Group,
+ "controllerKind", gvk.Kind,
+ )
+ }
+
+ ctrlOptions.LogConstructor = func(in *request) logr.Logger {
+ log := log
+
+ if req, ok := any(in).(*reconcile.Request); ok && req != nil {
+ if hasGVK {
+ log = log.WithValues(gvk.Kind, klog.KRef(req.Namespace, req.Name))
+ }
+ log = log.WithValues(
+ "namespace", req.Namespace, "name", req.Name,
+ )
+ }
+ return log
+ }
+ }
+
+ if blder.newController == nil {
+ blder.newController = controller.NewTyped[request]
+ }
+
+ // Build the controller and return.
+ blder.ctrl, err = blder.newController(controllerName, blder.mgr, ctrlOptions)
+ return err
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go
new file mode 100644
index 0000000000..e4df1b709f
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package builder wraps other controller-runtime libraries and exposes simple
+// patterns for building common Controllers.
+//
+// Projects built with the builder package can trivially be rebased on top of the underlying
+// packages if the project requires more customized behavior in the future.
+package builder
+
+import (
+ logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
+)
+
+var log = logf.RuntimeLog.WithName("builder")
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go
new file mode 100644
index 0000000000..b907b5d020
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builder
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+)
+
+// {{{ "Functional" Option Interfaces
+
+// ForOption is some configuration that modifies options for a For request.
+type ForOption interface {
+ // ApplyToFor applies this configuration to the given for input.
+ ApplyToFor(*ForInput)
+}
+
+// OwnsOption is some configuration that modifies options for an owns request.
+type OwnsOption interface {
+ // ApplyToOwns applies this configuration to the given owns input.
+ ApplyToOwns(*OwnsInput)
+}
+
+// WatchesOption is some configuration that modifies options for a watches request.
+type WatchesOption interface {
+ // ApplyToWatches applies this configuration to the given watches options.
+ ApplyToWatches(untypedWatchesInput)
+}
+
+// }}}
+
+// {{{ Multi-Type Options
+
+// WithPredicates sets the given predicates list.
+func WithPredicates(predicates ...predicate.Predicate) Predicates {
+ return Predicates{
+ predicates: predicates,
+ }
+}
+
+// Predicates filters events before enqueuing the keys.
+type Predicates struct {
+ predicates []predicate.Predicate
+}
+
+// ApplyToFor applies this configuration to the given ForInput options.
+func (w Predicates) ApplyToFor(opts *ForInput) {
+ opts.predicates = w.predicates
+}
+
+// ApplyToOwns applies this configuration to the given OwnsInput options.
+func (w Predicates) ApplyToOwns(opts *OwnsInput) {
+ opts.predicates = w.predicates
+}
+
+// ApplyToWatches applies this configuration to the given WatchesInput options.
+func (w Predicates) ApplyToWatches(opts untypedWatchesInput) {
+ opts.setPredicates(w.predicates)
+}
+
+var _ ForOption = &Predicates{}
+var _ OwnsOption = &Predicates{}
+var _ WatchesOption = &Predicates{}
+
+// }}}
+
+// {{{ For & Owns Dual-Type options
+
+// projectAs configures the projection on the input.
+// Currently only OnlyMetadata is supported. We might want to expand
+// this to arbitrary non-special local projections in the future.
+type projectAs objectProjection
+
+// ApplyToFor applies this configuration to the given ForInput options.
+func (p projectAs) ApplyToFor(opts *ForInput) {
+ opts.objectProjection = objectProjection(p)
+}
+
+// ApplyToOwns applies this configuration to the given OwnsInput options.
+func (p projectAs) ApplyToOwns(opts *OwnsInput) {
+ opts.objectProjection = objectProjection(p)
+}
+
+// ApplyToWatches applies this configuration to the given WatchesInput options.
+func (p projectAs) ApplyToWatches(opts untypedWatchesInput) {
+ opts.setObjectProjection(objectProjection(p))
+}
+
+var (
+ // OnlyMetadata tells the controller to *only* cache metadata, and to watch
+ // the API server in metadata-only form. This is useful when watching
+ // lots of objects, really big objects, or objects for which you only know
+ // the GVK, but not the structure. You'll need to pass
+ // metav1.PartialObjectMetadata to the client when fetching objects in your
+ // reconciler, otherwise you'll end up with a duplicate structured or
+ // unstructured cache.
+ //
+ // When watching a resource with OnlyMetadata, for example the v1.Pod, you
+ // should not Get and List using the v1.Pod type. Instead, you should use
+ // the special metav1.PartialObjectMetadata type.
+ //
+ // ❌ Incorrect:
+ //
+ // pod := &v1.Pod{}
+ // mgr.GetClient().Get(ctx, nsAndName, pod)
+ //
+ // ✅ Correct:
+ //
+ // pod := &metav1.PartialObjectMetadata{}
+ // pod.SetGroupVersionKind(schema.GroupVersionKind{
+ // Group: "",
+ // Version: "v1",
+ // Kind: "Pod",
+ // })
+ // mgr.GetClient().Get(ctx, nsAndName, pod)
+ //
+ // In the first case, controller-runtime will create another cache for the
+ // concrete type on top of the metadata cache; this increases memory
+ // consumption and leads to race conditions as caches are not in sync.
+ OnlyMetadata = projectAs(projectAsMetadata)
+
+ _ ForOption = OnlyMetadata
+ _ OwnsOption = OnlyMetadata
+ _ WatchesOption = OnlyMetadata
+)
+
+// }}}
+
+// MatchEveryOwner determines whether the watch should be filtered based on
+// controller ownership. As in, when the OwnerReference.Controller field is set.
+//
+// If passed as an option,
+// the handler receives notification for every owner of the object with the given type.
+// If unset (default), the handler receives notification only for the first
+// OwnerReference with `Controller: true`.
+var MatchEveryOwner = &matchEveryOwner{}
+
+type matchEveryOwner struct{}
+
+// ApplyToOwns applies this configuration to the given OwnsInput options.
+func (o matchEveryOwner) ApplyToOwns(opts *OwnsInput) {
+ opts.matchEveryOwner = true
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
new file mode 100644
index 0000000000..d9c57c5e8b
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
@@ -0,0 +1,386 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builder
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/conversion"
+)
+
+// WebhookBuilder builds a Webhook.
+type WebhookBuilder[T runtime.Object] struct {
+ apiType runtime.Object
+ customDefaulter admission.CustomDefaulter //nolint:staticcheck
+ defaulter admission.Defaulter[T]
+ customDefaulterOpts []admission.DefaulterOption
+ customValidator admission.CustomValidator //nolint:staticcheck
+ validator admission.Validator[T]
+ customPath string
+ customValidatorCustomPath string
+ customDefaulterCustomPath string
+ converterConstructor func(*runtime.Scheme) (conversion.Converter, error)
+ gvk schema.GroupVersionKind
+ mgr manager.Manager
+ config *rest.Config
+ recoverPanic *bool
+ logConstructor func(base logr.Logger, req *admission.Request) logr.Logger
+ contextFunc func(context.Context, *http.Request) context.Context
+ err error
+}
+
+// WebhookManagedBy returns a new webhook builder.
+func WebhookManagedBy[T runtime.Object](m manager.Manager, object T) *WebhookBuilder[T] {
+ return &WebhookBuilder[T]{mgr: m, apiType: object}
+}
+
+// WithCustomDefaulter takes an admission.CustomDefaulter interface, a MutatingWebhook with the provided opts (admission.DefaulterOption)
+// will be wired for this type.
+//
+// Deprecated: Use WithDefaulter instead.
+func (blder *WebhookBuilder[T]) WithCustomDefaulter(defaulter admission.CustomDefaulter, opts ...admission.DefaulterOption) *WebhookBuilder[T] {
+ blder.customDefaulter = defaulter
+ blder.customDefaulterOpts = opts
+ return blder
+}
+
+// WithDefaulter sets up the provided admission.Defaulter in a defaulting webhook.
+func (blder *WebhookBuilder[T]) WithDefaulter(defaulter admission.Defaulter[T], opts ...admission.DefaulterOption) *WebhookBuilder[T] {
+ blder.defaulter = defaulter
+ blder.customDefaulterOpts = opts
+ return blder
+}
+
+// WithCustomValidator takes a admission.CustomValidator interface, a ValidatingWebhook will be wired for this type.
+//
+// Deprecated: Use WithValidator instead.
+func (blder *WebhookBuilder[T]) WithCustomValidator(validator admission.CustomValidator) *WebhookBuilder[T] {
+ blder.customValidator = validator
+ return blder
+}
+
+// WithValidator sets up the provided admission.Validator in a validating webhook.
+func (blder *WebhookBuilder[T]) WithValidator(validator admission.Validator[T]) *WebhookBuilder[T] {
+ blder.validator = validator
+ return blder
+}
+
+// WithConverter takes a func that constructs a converter.Converter.
+// The Converter will then be used by the conversion endpoint for the type passed into NewWebhookManagedBy()
+func (blder *WebhookBuilder[T]) WithConverter(converterConstructor func(*runtime.Scheme) (conversion.Converter, error)) *WebhookBuilder[T] {
+ blder.converterConstructor = converterConstructor
+ return blder
+}
+
+// WithLogConstructor overrides the webhook's LogConstructor.
+func (blder *WebhookBuilder[T]) WithLogConstructor(logConstructor func(base logr.Logger, req *admission.Request) logr.Logger) *WebhookBuilder[T] {
+ blder.logConstructor = logConstructor
+ return blder
+}
+
+// WithContextFunc overrides the webhook's WithContextFunc.
+func (blder *WebhookBuilder[T]) WithContextFunc(contextFunc func(context.Context, *http.Request) context.Context) *WebhookBuilder[T] {
+ blder.contextFunc = contextFunc
+ return blder
+}
+
+// RecoverPanic indicates whether panics caused by the webhook should be recovered.
+// Defaults to true.
+func (blder *WebhookBuilder[T]) RecoverPanic(recoverPanic bool) *WebhookBuilder[T] {
+ blder.recoverPanic = &recoverPanic
+ return blder
+}
+
+// WithCustomPath overrides the webhook's default path by the customPath
+//
+// Deprecated: WithCustomPath should not be used anymore.
+// Please use WithValidatorCustomPath or WithDefaulterCustomPath instead.
+func (blder *WebhookBuilder[T]) WithCustomPath(customPath string) *WebhookBuilder[T] {
+ blder.customPath = customPath
+ return blder
+}
+
+// WithValidatorCustomPath overrides the path of the Validator.
+func (blder *WebhookBuilder[T]) WithValidatorCustomPath(customPath string) *WebhookBuilder[T] {
+ blder.customValidatorCustomPath = customPath
+ return blder
+}
+
+// WithDefaulterCustomPath overrides the path of the Defaulter.
+func (blder *WebhookBuilder[T]) WithDefaulterCustomPath(customPath string) *WebhookBuilder[T] {
+ blder.customDefaulterCustomPath = customPath
+ return blder
+}
+
+// Complete builds the webhook.
+func (blder *WebhookBuilder[T]) Complete() error {
+ // Set the Config
+ blder.loadRestConfig()
+
+ // Configure the default LogConstructor
+ blder.setLogConstructor()
+
+ // Set the Webhook if needed
+ return blder.registerWebhooks()
+}
+
+func (blder *WebhookBuilder[T]) loadRestConfig() {
+ if blder.config == nil {
+ blder.config = blder.mgr.GetConfig()
+ }
+}
+
+func (blder *WebhookBuilder[T]) setLogConstructor() {
+ if blder.logConstructor == nil {
+ blder.logConstructor = func(base logr.Logger, req *admission.Request) logr.Logger {
+ log := base.WithValues(
+ "webhookGroup", blder.gvk.Group,
+ "webhookKind", blder.gvk.Kind,
+ )
+ if req != nil {
+ return log.WithValues(
+ blder.gvk.Kind, klog.KRef(req.Namespace, req.Name),
+ "namespace", req.Namespace, "name", req.Name,
+ "resource", req.Resource, "user", req.UserInfo.Username,
+ "requestID", req.UID,
+ )
+ }
+ return log
+ }
+ }
+}
+
+func (blder *WebhookBuilder[T]) isThereCustomPathConflict() bool {
+ return (blder.customPath != "" && blder.customDefaulter != nil && blder.customValidator != nil) || (blder.customPath != "" && blder.customDefaulterCustomPath != "") || (blder.customPath != "" && blder.customValidatorCustomPath != "")
+}
+
+func (blder *WebhookBuilder[T]) registerWebhooks() error {
+ typ, err := blder.getType()
+ if err != nil {
+ return err
+ }
+
+ blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
+ if blder.isThereCustomPathConflict() {
+ return errors.New("only one of CustomDefaulter or CustomValidator should be set when using WithCustomPath. Otherwise, WithDefaulterCustomPath() and WithValidatorCustomPath() should be used")
+ }
+ if blder.customPath != "" {
+ // isThereCustomPathConflict() already checks for potential conflicts.
+ // Since we are sure that only one of customDefaulter or customValidator will be used,
+ // we can set both customDefaulterCustomPath and validatingCustomPath.
+ blder.customDefaulterCustomPath = blder.customPath
+ blder.customValidatorCustomPath = blder.customPath
+ }
+
+ // Register webhook(s) for type
+ err = blder.registerDefaultingWebhook()
+ if err != nil {
+ return err
+ }
+
+ err = blder.registerValidatingWebhook()
+ if err != nil {
+ return err
+ }
+
+ err = blder.registerConversionWebhook()
+ if err != nil {
+ return err
+ }
+ return blder.err
+}
+
+// registerDefaultingWebhook registers a defaulting webhook if necessary.
+func (blder *WebhookBuilder[T]) registerDefaultingWebhook() error {
+ mwh, err := blder.getDefaultingWebhook()
+ if err != nil {
+ return err
+ }
+ if mwh != nil {
+ mwh.LogConstructor = blder.logConstructor
+ mwh.WithContextFunc = blder.contextFunc
+ path := generateMutatePath(blder.gvk)
+ if blder.customDefaulterCustomPath != "" {
+ generatedCustomPath, err := generateCustomPath(blder.customDefaulterCustomPath)
+ if err != nil {
+ return err
+ }
+ path = generatedCustomPath
+ }
+
+ // Checking if the path is already registered.
+ // If so, just skip it.
+ if !blder.isAlreadyHandled(path) {
+ log.Info("Registering a mutating webhook",
+ "GVK", blder.gvk,
+ "path", path)
+ blder.mgr.GetWebhookServer().Register(path, mwh)
+ }
+ }
+
+ return nil
+}
+
+func (blder *WebhookBuilder[T]) getDefaultingWebhook() (*admission.Webhook, error) {
+ var w *admission.Webhook
+ if blder.defaulter != nil {
+ if blder.customDefaulter != nil {
+ return nil, errors.New("only one of Defaulter or CustomDefaulter can be set")
+ }
+ w = admission.WithDefaulter(blder.mgr.GetScheme(), blder.defaulter, blder.customDefaulterOpts...)
+ } else if blder.customDefaulter != nil {
+ w = admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, blder.customDefaulter, blder.customDefaulterOpts...)
+ }
+ if w != nil && blder.recoverPanic != nil {
+ w = w.WithRecoverPanic(*blder.recoverPanic)
+ }
+ return w, nil
+}
+
+// registerValidatingWebhook registers a validating webhook if necessary.
+func (blder *WebhookBuilder[T]) registerValidatingWebhook() error {
+ vwh, err := blder.getValidatingWebhook()
+ if err != nil {
+ return err
+ }
+ if vwh != nil {
+ vwh.LogConstructor = blder.logConstructor
+ vwh.WithContextFunc = blder.contextFunc
+ path := generateValidatePath(blder.gvk)
+ if blder.customValidatorCustomPath != "" {
+ generatedCustomPath, err := generateCustomPath(blder.customValidatorCustomPath)
+ if err != nil {
+ return err
+ }
+ path = generatedCustomPath
+ }
+
+ // Checking if the path is already registered.
+ // If so, just skip it.
+ if !blder.isAlreadyHandled(path) {
+ log.Info("Registering a validating webhook",
+ "GVK", blder.gvk,
+ "path", path)
+ blder.mgr.GetWebhookServer().Register(path, vwh)
+ }
+ }
+
+ return nil
+}
+
+func (blder *WebhookBuilder[T]) getValidatingWebhook() (*admission.Webhook, error) {
+ var w *admission.Webhook
+ if blder.validator != nil {
+ if blder.customValidator != nil {
+ return nil, errors.New("only one of Validator or CustomValidator can be set")
+ }
+ w = admission.WithValidator(blder.mgr.GetScheme(), blder.validator)
+ } else if blder.customValidator != nil {
+ //nolint:staticcheck
+ w = admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, blder.customValidator)
+ }
+ if w != nil && blder.recoverPanic != nil {
+ w = w.WithRecoverPanic(*blder.recoverPanic)
+ }
+ return w, nil
+}
+
+func (blder *WebhookBuilder[T]) registerConversionWebhook() error {
+ if blder.converterConstructor != nil {
+ converter, err := blder.converterConstructor(blder.mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
+ if err := blder.mgr.GetConverterRegistry().RegisterConverter(blder.gvk.GroupKind(), converter); err != nil {
+ return err
+ }
+ } else {
+ ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType)
+ if err != nil {
+ log.Error(err, "conversion check failed", "GVK", blder.gvk)
+ return err
+ }
+ if !ok {
+ return nil
+ }
+ }
+
+ if !blder.isAlreadyHandled("/convert") {
+ blder.mgr.GetWebhookServer().Register("/convert", conversion.NewWebhookHandler(blder.mgr.GetScheme(), blder.mgr.GetConverterRegistry()))
+ }
+ log.Info("Conversion webhook enabled", "GVK", blder.gvk)
+
+ return nil
+}
+
+func (blder *WebhookBuilder[T]) getType() (runtime.Object, error) {
+ if blder.apiType != nil {
+ return blder.apiType, nil
+ }
+ return nil, errors.New("NewWebhookManagedBy() must be called with a valid object")
+}
+
+func (blder *WebhookBuilder[T]) isAlreadyHandled(path string) bool {
+ if blder.mgr.GetWebhookServer().WebhookMux() == nil {
+ return false
+ }
+ h, p := blder.mgr.GetWebhookServer().WebhookMux().Handler(&http.Request{URL: &url.URL{Path: path}})
+ if p == path && h != nil {
+ return true
+ }
+ return false
+}
+
+func generateMutatePath(gvk schema.GroupVersionKind) string {
+ return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" +
+ gvk.Version + "-" + strings.ToLower(gvk.Kind)
+}
+
+func generateValidatePath(gvk schema.GroupVersionKind) string {
+ return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" +
+ gvk.Version + "-" + strings.ToLower(gvk.Kind)
+}
+
+const webhookPathStringValidation = `^((/[a-zA-Z0-9-_]+)+|/)$`
+
+var validWebhookPathRegex = regexp.MustCompile(webhookPathStringValidation)
+
+func generateCustomPath(customPath string) (string, error) {
+ if !validWebhookPathRegex.MatchString(customPath) {
+ return "", errors.New("customPath \"" + customPath + "\" does not match this regex: " + webhookPathStringValidation)
+ }
+ return customPath, nil
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go
new file mode 100644
index 0000000000..1c39f4d854
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
+)
+
+// KubeconfigFlagName is the name of the kubeconfig flag
+const KubeconfigFlagName = "kubeconfig"
+
+var (
+ kubeconfig string
+ log = logf.RuntimeLog.WithName("client").WithName("config")
+)
+
+// init registers the "kubeconfig" flag to the default command line FlagSet.
+// TODO: This should be removed, as it potentially leads to redefined flag errors for users, if they already
+// have registered the "kubeconfig" flag to the command line FlagSet in other parts of their code.
+func init() {
+ RegisterFlags(flag.CommandLine)
+}
+
+// RegisterFlags registers flag variables to the given FlagSet if not already registered.
+// It uses the default command line FlagSet, if none is provided. Currently, it only registers the kubeconfig flag.
+func RegisterFlags(fs *flag.FlagSet) {
+ if fs == nil {
+ fs = flag.CommandLine
+ }
+ if f := fs.Lookup(KubeconfigFlagName); f != nil {
+ kubeconfig = f.Value.String()
+ } else {
+ fs.StringVar(&kubeconfig, KubeconfigFlagName, "", "Paths to a kubeconfig. Only required if out-of-cluster.")
+ }
+}
+
+// GetConfig creates a *rest.Config for talking to a Kubernetes API server.
+// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
+// in cluster and use the cluster provided kubeconfig.
+//
+// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and
+// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it.
+//
+// Config precedence:
+//
+// * --kubeconfig flag pointing at a file
+//
+// * KUBECONFIG environment variable pointing at a file
+//
+// * In-cluster config if running in cluster
+//
+// * $HOME/.kube/config if exists.
+func GetConfig() (*rest.Config, error) {
+ return GetConfigWithContext("")
+}
+
+// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context.
+// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
+// in cluster and use the cluster provided kubeconfig.
+//
+// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and
+// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it.
+//
+// Config precedence:
+//
+// * --kubeconfig flag pointing at a file
+//
+// * KUBECONFIG environment variable pointing at a file
+//
+// * In-cluster config if running in cluster
+//
+// * $HOME/.kube/config if exists.
+func GetConfigWithContext(context string) (*rest.Config, error) {
+ cfg, err := loadConfig(context)
+ if err != nil {
+ return nil, err
+ }
+ if cfg.QPS == 0.0 {
+ // Disable client-side ratelimer by default, we can rely on
+ // API priority and fairness
+ cfg.QPS = -1
+ }
+ return cfg, nil
+}
+
+// loadInClusterConfig is a function used to load the in-cluster
+// Kubernetes client config. This variable makes is possible to
+// test the precedence of loading the config.
+var loadInClusterConfig = rest.InClusterConfig
+
+// loadConfig loads a REST Config as per the rules specified in GetConfig.
+func loadConfig(context string) (config *rest.Config, configErr error) {
+ // If a flag is specified with the config location, use that
+ if len(kubeconfig) > 0 {
+ return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context)
+ }
+
+ // If the recommended kubeconfig env variable is not specified,
+ // try the in-cluster config.
+ kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
+ if len(kubeconfigPath) == 0 {
+ c, err := loadInClusterConfig()
+ if err == nil {
+ return c, nil
+ }
+
+ defer func() {
+ if configErr != nil {
+ log.Error(err, "unable to load in-cluster config")
+ }
+ }()
+ }
+
+ // If the recommended kubeconfig env variable is set, or there
+ // is no in-cluster config, try the default recommended locations.
+ //
+ // NOTE: For default config file locations, upstream only checks
+ // $HOME for the user's home directory, but we can also try
+ // os/user.HomeDir when $HOME is unset.
+ //
+ // TODO(jlanford): could this be done upstream?
+ loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+ if _, ok := os.LookupEnv("HOME"); !ok {
+ u, err := user.Current()
+ if err != nil {
+ return nil, fmt.Errorf("could not get current user: %w", err)
+ }
+ loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName))
+ }
+
+ return loadConfigWithContext("", loadingRules, context)
+}
+
+func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) {
+ return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ loader,
+ &clientcmd.ConfigOverrides{
+ ClusterInfo: clientcmdapi.Cluster{
+ Server: apiServerURL,
+ },
+ CurrentContext: context,
+ }).ClientConfig()
+}
+
+// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver.
+// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
+// in cluster and use the cluster provided kubeconfig.
+//
+// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and
+// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it.
+//
+// Will log an error and exit if there is an error creating the rest.Config.
+func GetConfigOrDie() *rest.Config {
+ config, err := GetConfig()
+ if err != nil {
+ log.Error(err, "unable to get kubeconfig")
+ os.Exit(1)
+ }
+ return config
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go
new file mode 100644
index 0000000000..796c9cf590
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package config contains libraries for initializing REST configs for talking to the Kubernetes API
+package config
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go
new file mode 100644
index 0000000000..737cc7eff2
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package signals contains libraries for handling signals to gracefully
+// shutdown the manager in combination with Kubernetes pod graceful termination
+// policy.
+package signals
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
new file mode 100644
index 0000000000..a79cfb42df
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package signals
+
+import (
+ "context"
+ "os"
+ "os/signal"
+)
+
+var onlyOneSignalHandler = make(chan struct{})
+
+// SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned
+// which is canceled on one of these signals. If a second signal is caught, the program
+// is terminated with exit code 1.
+func SetupSignalHandler() context.Context {
+ close(onlyOneSignalHandler) // panics when called twice
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ c := make(chan os.Signal, 2)
+ signal.Notify(c, shutdownSignals...)
+ go func() {
+ <-c
+ cancel()
+ <-c
+ os.Exit(1) // second signal. Exit directly.
+ }()
+
+ return ctx
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go
new file mode 100644
index 0000000000..2b24faa428
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go
@@ -0,0 +1,26 @@
+//go:build !windows
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package signals
+
+import (
+ "os"
+ "syscall"
+)
+
+var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go
new file mode 100644
index 0000000000..4907d573fe
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package signals
+
+import (
+ "os"
+)
+
+var shutdownSignals = []os.Signal{os.Interrupt}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
new file mode 100644
index 0000000000..55ebe21773
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package scheme contains utilities for gradually building Schemes,
+// which contain information associating Go types with Kubernetes
+// groups, versions, and kinds.
+//
+// Each API group should define a utility function
+// called AddToScheme for adding its types to a Scheme:
+//
+// // in package myapigroupv1...
+// var (
+// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
+// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+// AddToScheme = SchemeBuilder.AddToScheme
+// )
+//
+// func init() {
+// SchemeBuilder.Register(&MyType{}, &MyTypeList)
+// }
+// var (
+// scheme *runtime.Scheme = runtime.NewScheme()
+// )
+//
+// This also true of the built-in Kubernetes types. Then, in the entrypoint for
+// your manager, assemble the scheme containing exactly the types you need,
+// panicing if scheme registration failed. For instance, if our controller needs
+// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
+//
+// func init() {
+// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
+// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
+// }
+//
+// func main() {
+// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
+// Scheme: scheme,
+// })
+// // ...
+// }
+package scheme
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
+type Builder struct {
+ GroupVersion schema.GroupVersion
+ runtime.SchemeBuilder
+}
+
+// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld.
+func (bld *Builder) Register(object ...runtime.Object) *Builder {
+ bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(bld.GroupVersion, object...)
+ metav1.AddToGroupVersion(scheme, bld.GroupVersion)
+ return nil
+ })
+ return bld
+}
+
+// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld.
+func (bld *Builder) RegisterAll(b *Builder) *Builder {
+ bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...)
+ return bld
+}
+
+// AddToScheme adds all registered types to s.
+func (bld *Builder) AddToScheme(s *runtime.Scheme) error {
+ return bld.SchemeBuilder.AddToScheme(s)
+}
+
+// Build returns a new Scheme containing the registered types.
+func (bld *Builder) Build() (*runtime.Scheme, error) {
+ s := runtime.NewScheme()
+ return s, bld.AddToScheme(s)
+}