diff --git a/cluster-autoscaler/simulator/drain.go b/cluster-autoscaler/simulator/drain.go index 1e9b62f2653e..34f81b0ce34d 100644 --- a/cluster-autoscaler/simulator/drain.go +++ b/cluster-autoscaler/simulator/drain.go @@ -17,13 +17,10 @@ limitations under the License. package simulator import ( - "fmt" "time" apiv1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" @@ -90,6 +87,10 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions NodeDele drainPods = append(drainPods, pod) } case drainability.BlockDrain: + // TODO(reviewer note): What is the consequence of returning a pod when + // the error was not caused by the pod (e.g. lister failed)? + // If this matters, one option is for drainability rules to return a + // value indicate which pod caused the condition. blockingPod = &drain.BlockingPod{ Pod: pod, Reason: status.BlockingReason, @@ -113,28 +114,6 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions NodeDele if err != nil { return pods, daemonSetPods, blockingPod, err } - if pdbBlockingPod, err := checkPdbs(pods, pdbs); err != nil { - return []*apiv1.Pod{}, []*apiv1.Pod{}, pdbBlockingPod, err - } return pods, daemonSetPods, nil, nil } - -func checkPdbs(pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget) (*drain.BlockingPod, error) { - // TODO: remove it after deprecating legacy scale down. - // RemainingPdbTracker.CanRemovePods() to replace this function. - for _, pdb := range pdbs { - selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector) - if err != nil { - return nil, err - } - for _, pod := range pods { - if pod.Namespace == pdb.Namespace && selector.Matches(labels.Set(pod.Labels)) { - if pdb.Status.DisruptionsAllowed < 1 { - return &drain.BlockingPod{Pod: pod, Reason: drain.NotEnoughPdb}, fmt.Errorf("not enough pod disruption budget to move %s/%s", pod.Namespace, pod.Name) - } - } - } - } - return nil, nil -} diff --git a/cluster-autoscaler/simulator/drainability/rules/pdb/pdb.go b/cluster-autoscaler/simulator/drainability/rules/pdb/pdb.go new file mode 100644 index 000000000000..d795324b9766 --- /dev/null +++ b/cluster-autoscaler/simulator/drainability/rules/pdb/pdb.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pdb + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/utils/drain" +) + +// Rule is a drainability rule on how to handle pods with pdbs. +type Rule struct{} + +// New creates a new Rule. +func New() *Rule { + return &Rule{} +} + +// Drainable decides how to handle pods with pdbs on node drain. +func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status { + // TODO: Replace this logic with RemainingPdbTracker.CanRemovePods() after + // deprecating legacy scale down. Depending on the implementation, this may + // require adding information to the DrainContext, such as the slice of pods + // and a flag to prevent duplicate checks. + for _, pdb := range drainCtx.Pdbs { + selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector) + if err != nil { + return drainability.NewBlockedStatus(drain.UnexpectedError, fmt.Errorf("failed to convert label selector")) + } + + if pod.Namespace == pdb.Namespace && selector.Matches(labels.Set(pod.Labels)) && pdb.Status.DisruptionsAllowed < 1 { + return drainability.NewBlockedStatus(drain.NotEnoughPdb, fmt.Errorf("not enough pod disruption budget to move %s/%s", pod.Namespace, pod.Name)) + } + } + return drainability.NewUndefinedStatus() +} diff --git a/cluster-autoscaler/simulator/drainability/rules/pdb/pdb_test.go b/cluster-autoscaler/simulator/drainability/rules/pdb/pdb_test.go new file mode 100644 index 000000000000..ae38c75ccb71 --- /dev/null +++ b/cluster-autoscaler/simulator/drainability/rules/pdb/pdb_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pdb + +import ( + "testing" + + apiv1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" + "k8s.io/autoscaler/cluster-autoscaler/utils/drain" +) + +func TestMirrorPodRule(t *testing.T) { + one := intstr.FromInt(1) + + testCases := []struct { + desc string + pod *apiv1.Pod + drainCtx *drainability.DrainContext + wantOutcome drainability.OutcomeType + wantReason drain.BlockingPodReason + }{ + { + desc: "no context", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{}, + }, + }, + { + desc: "no pdbs", + pod: &apiv1.Pod{}, + drainCtx: &drainability.DrainContext{}, + }, + { + desc: "no matching pdbs", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "happy", + Namespace: "good", + Labels: map[string]string{ + "label": "true", + }, + }, + }, + drainCtx: &drainability.DrainContext{ + Pdbs: []*policyv1.PodDisruptionBudget{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bad", + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &one, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "true", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "good", + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &one, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "false", + }, + }, + }, + }, + }, + }, + }, + { + desc: "pdb prevents scale-down", + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sad", + Namespace: "good", + Labels: map[string]string{ + "label": "true", + }, + }, + }, + drainCtx: &drainability.DrainContext{ + Pdbs: []*policyv1.PodDisruptionBudget{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bad", + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &one, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "true", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "good", + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &one, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "true", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "good", + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &one, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "false", + }, + }, + }, + }, + }, + }, + wantOutcome: drainability.BlockDrain, + wantReason: drain.NotEnoughPdb, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + got := New().Drainable(tc.drainCtx, tc.pod) + if got.Outcome != tc.wantOutcome || got.BlockingReason != tc.wantReason { + t.Errorf("Rule.Drainable(%s) = (outcome: %v, reason: %v), want (outcome: %v, reason: %v)", tc.pod.Name, got.Outcome, got.BlockingReason, tc.wantOutcome, tc.wantReason) + } + }) + } +} diff --git a/cluster-autoscaler/simulator/drainability/rules/rules.go b/cluster-autoscaler/simulator/drainability/rules/rules.go index cd9118931652..b99219cdd652 100644 --- a/cluster-autoscaler/simulator/drainability/rules/rules.go +++ b/cluster-autoscaler/simulator/drainability/rules/rules.go @@ -20,6 +20,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/mirror" + "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/pdb" ) // Rule determines whether a given pod can be drained or not. @@ -35,6 +36,7 @@ type Rule interface { func Default() Rules { return []Rule{ mirror.New(), + pdb.New(), } }