You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
only report Progressing for active network rollouts
Keep pod-based Progressing tied to an actual CNO rollout instead of
temporary unavailability during node reboot churn. Persist the rollout
generation in status manager state so Progressing stays true until the
rollout is both observed and fully available.
For machine config status, stop treating generic MCP node convergence as
a CNO rollout signal. Check whether the CNO machine config is still
present in the pool's rendered source list so routine MCO updates do not
flip network Progressing to true.
Signed-off-by: Jamo Luhrsen <jluhrsen@gmail.com>
Co-Authored-by: Claude Code and Codex
progressing=append(progressing, fmt.Sprintf("DaemonSet %q update is rolling out (%d out of %d updated)", dsName.String(), ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled))
98
119
dsProgressing=true
99
120
} elseifds.Status.NumberUnavailable>0 {
100
-
progressing=append(progressing, fmt.Sprintf("DaemonSet %q is not available (awaiting %d nodes)", dsName.String(), ds.Status.NumberUnavailable))
101
-
dsProgressing=true
102
-
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
121
+
ifdsRolloutActive {
122
+
progressing=append(progressing, fmt.Sprintf("DaemonSet %q is not available (awaiting %d nodes)", dsName.String(), ds.Status.NumberUnavailable))
progressing=append(progressing, fmt.Sprintf("StatefulSet %q update is rolling out (%d out of %d updated)", ssName.String(), ss.Status.UpdatedReplicas, ss.Status.Replicas))
progressing=append(progressing, fmt.Sprintf("StatefulSet %q is not available (awaiting %d nodes)", ssName.String(), (ss.Status.Replicas-ss.Status.ReadyReplicas)))
157
-
ssProgressing=true
194
+
ifssRolloutActive {
195
+
progressing=append(progressing, fmt.Sprintf("StatefulSet %q is not available (awaiting %d nodes)", ssName.String(), (ss.Status.Replicas-ss.Status.ReadyReplicas)))
196
+
ssProgressing=true
197
+
}
158
198
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
progressing=append(progressing, fmt.Sprintf("Deployment %q update is rolling out (%d out of %d updated)", depName.String(), dep.Status.UpdatedReplicas, dep.Status.Replicas))
209
266
depProgressing=true
210
267
} elseifdep.Status.UnavailableReplicas>0 {
211
-
progressing=append(progressing, fmt.Sprintf("Deployment %q is not available (awaiting %d nodes)", depName.String(), dep.Status.UnavailableReplicas))
212
-
depProgressing=true
268
+
ifdepRolloutActive {
269
+
progressing=append(progressing, fmt.Sprintf("Deployment %q is not available (awaiting %d nodes)", depName.String(), dep.Status.UnavailableReplicas))
270
+
depProgressing=true
271
+
}
213
272
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
0 commit comments