@@ -105,7 +105,13 @@ func (r *NodeForceDrainReconciler) Reconcile(ctx context.Context, req ctrl.Reque
105
105
}
106
106
drainingNodes = append (drainingNodes , node )
107
107
}
108
- l .Info ("Draining nodes" , "nodes" , drainingNodes )
108
+ if len (drainingNodes ) > 0 {
109
+ nodeNames := make ([]string , 0 , len (drainingNodes ))
110
+ for _ , node := range drainingNodes {
111
+ nodeNames = append (nodeNames , node .Name )
112
+ }
113
+ l .Info ("Found draining nodes" , "nodes" , nodeNames )
114
+ }
109
115
110
116
// Update the last observed node drains for nodes that started draining.
111
117
statusChanged := false
@@ -136,7 +142,7 @@ func (r *NodeForceDrainReconciler) Reconcile(ctx context.Context, req ctrl.Reque
136
142
if timeUntilNextDrain == 0 || timeUntilDrain < timeUntilNextDrain {
137
143
timeUntilNextDrain = timeUntilDrain
138
144
}
139
- l .Info ("Node is still in grace period" , "node" , node .Name , "lastObservedNodeDrain" , ld )
145
+ l .Info ("Node is still in grace period" , "node" , node .Name , "lastObservedNodeDrain" , ld , "nodeDrainGracePeriod" , fd . Spec . NodeDrainGracePeriod , "timeUntilDrain" , timeUntilDrain )
140
146
continue
141
147
}
142
148
l .Info ("Node is out of grace period" , "node" , node .Name , "lastObservedNodeDrain" , ld , "nodeDrainGracePeriod" , fd .Spec .NodeDrainGracePeriod )
@@ -247,7 +253,7 @@ func (r *NodeForceDrainReconciler) forceDrainNode(ctx context.Context, node core
247
253
if pod .DeletionTimestamp != nil {
248
254
continue
249
255
}
250
- l .Info ("Deleting pod" , "pod" , pod .Name )
256
+ l .Info ("Deleting pod" , "pod" , pod .Name , "podNamespace" , pod . Namespace )
251
257
attemptedDeletion = true
252
258
if err := r .Delete (ctx , & pod ); err != nil {
253
259
deletionErrs = append (deletionErrs , err )
@@ -271,6 +277,7 @@ func (r *NodeForceDrainReconciler) forceDeletePodsOnNode(ctx context.Context, no
271
277
var timeUntilNextForceDelete time.Duration
272
278
deletionErrs := make ([]error , 0 , len (pods ))
273
279
for _ , pod := range pods {
280
+ l := l .WithValues ("pod" , pod .Name , "podNamespace" , pod .Namespace )
274
281
if pod .DeletionTimestamp == nil {
275
282
continue
276
283
}
@@ -281,11 +288,11 @@ func (r *NodeForceDrainReconciler) forceDeletePodsOnNode(ctx context.Context, no
281
288
if timeUntilNextForceDelete == 0 || timeUntilForceDelete < timeUntilNextForceDelete {
282
289
timeUntilNextForceDelete = timeUntilForceDelete
283
290
}
284
- l .Info ("Pod is still in grace period" , "pod " , pod .Name , "deletionTimestamp " , pod . DeletionTimestamp , "gracePeriod " , gracePeriod )
291
+ l .Info ("Pod is still in grace period" , "deletionTimestamp " , pod .DeletionTimestamp , "gracePeriod " , gracePeriod , "timeUntilForceDelete " , timeUntilForceDelete )
285
292
continue
286
293
}
287
294
288
- l .Info ("Force deleting pod" , "pod" , pod . Name )
295
+ l .Info ("Force deleting pod" )
289
296
if err := r .Delete (ctx , & pod , & client.DeleteOptions {
290
297
GracePeriodSeconds : ptr .To (int64 (0 )),
291
298
}); err != nil {
@@ -309,13 +316,18 @@ func (r *NodeForceDrainReconciler) getDeletionCandidatePodsForNode(ctx context.C
309
316
310
317
filteredPods := make ([]corev1.Pod , 0 , len (pods .Items ))
311
318
for _ , pod := range pods .Items {
319
+ l := l .WithValues ("pod" , pod .Name , "podNamespace" , pod .Namespace )
312
320
controlledByActiveDaemonSet , err := r .podIsControlledByExistingDaemonSet (ctx , pod )
313
321
if err != nil {
314
- l .Error (err , "Failed to check if pod is controlled by active DaemonSet" , "pod" , pod . Name )
322
+ l .Error (err , "Failed to check if pod is controlled by active DaemonSet" )
315
323
continue
316
324
}
317
325
if controlledByActiveDaemonSet {
318
- l .Info ("Pod is controlled by active DaemonSet. Skipping" , "pod" , pod .Name )
326
+ l .Info ("Pod is controlled by active DaemonSet. Skipping" )
327
+ continue
328
+ }
329
+ if r .podIsStatic (pod ) {
330
+ l .Info ("Pod is static. Skipping" )
319
331
continue
320
332
}
321
333
@@ -325,6 +337,19 @@ func (r *NodeForceDrainReconciler) getDeletionCandidatePodsForNode(ctx context.C
325
337
return filteredPods , nil
326
338
}
327
339
340
+ // podIsStatic returns true if the pod is a static pod.
341
+ // https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/
342
+ // Such pods are directly managed by the kubelet and should not be deleted.
343
+ // The check is based on the pod's controller being the node itself.
344
+ func (r * NodeForceDrainReconciler ) podIsStatic (pod corev1.Pod ) bool {
345
+ controllerRef := metav1 .GetControllerOf (& pod )
346
+ if controllerRef == nil {
347
+ return false
348
+ }
349
+
350
+ return controllerRef .APIVersion == "v1" && controllerRef .Kind == "Node"
351
+ }
352
+
328
353
// podIsControlledByExistingDaemonSet returns true if the pod is controlled by an existing DaemonSet.
329
354
// This is determined by checking if the pod's controller is a DaemonSet and if the DaemonSet exists in the API.
330
355
func (r * NodeForceDrainReconciler ) podIsControlledByExistingDaemonSet (ctx context.Context , pod corev1.Pod ) (bool , error ) {
@@ -345,7 +370,7 @@ func (r *NodeForceDrainReconciler) podIsControlledByExistingDaemonSet(ctx contex
345
370
// Edge case: Pod was orphaned
346
371
// See https://github.com/kubernetes/kubectl/blob/442e3d141a35703b7637f41339b9f73cad005c47/pkg/drain/filters.go#L174
347
372
if apierrors .IsNotFound (err ) {
348
- l .Info ("No daemon set found for pod" , "daemonSet" , controllerRef .Name , "pod" , pod .Name , "namespace " , pod .Namespace )
373
+ l .Info ("No daemon set found for pod" , "daemonSet" , controllerRef .Name , "pod" , pod .Name , "podNamespace " , pod .Namespace )
349
374
return false , nil
350
375
}
351
376
return false , fmt .Errorf ("failed to get DaemonSet %s: %w" , controllerRef .Name , err )
0 commit comments