mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 14:41:10 +01:00
Migrate RemovePodsHavingTooManyRestarts to plugin
This commit is contained in:
@@ -57,3 +57,15 @@ type RemovePodsViolatingNodeAffinityArgs struct {
|
|||||||
LabelSelector *metav1.LabelSelector
|
LabelSelector *metav1.LabelSelector
|
||||||
NodeAffinityType []string
|
NodeAffinityType []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// RemovePodsHavingTooManyRestartsArgs holds arguments used to configure RemovePodsHavingTooManyRestarts plugin.
|
||||||
|
type RemovePodsHavingTooManyRestartsArgs struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
|
||||||
|
Namespaces *api.Namespaces
|
||||||
|
LabelSelector *metav1.LabelSelector
|
||||||
|
PodRestartThreshold int32
|
||||||
|
IncludingInitContainers bool
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ package validation
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
)
|
)
|
||||||
@@ -41,6 +41,15 @@ func ValidateRemoveFailedPodsArgs(args *componentconfig.RemoveFailedPodsArgs) er
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
|
||||||
|
func ValidateRemovePodsHavingTooManyRestartsArgs(args *componentconfig.RemovePodsHavingTooManyRestartsArgs) error {
|
||||||
|
return errorsAggregate(
|
||||||
|
validateNamespaceArgs(args.Namespaces),
|
||||||
|
validateLabelSelectorArgs(args.LabelSelector),
|
||||||
|
validatePodRestartThreshold(args.PodRestartThreshold),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// errorsAggregate converts all arg validation errors to a single error interface.
|
// errorsAggregate converts all arg validation errors to a single error interface.
|
||||||
// if no errors, it will return nil.
|
// if no errors, it will return nil.
|
||||||
func errorsAggregate(errors ...error) error {
|
func errorsAggregate(errors ...error) error {
|
||||||
@@ -76,6 +85,12 @@ func ValidateRemovePodsViolatingNodeAffinityArgs(args *componentconfig.RemovePod
|
|||||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePodRestartThreshold(podRestartThreshold int32) error {
|
||||||
|
if podRestartThreshold < 1 {
|
||||||
|
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,6 +104,41 @@ func (in *RemoveFailedPodsArgs) DeepCopyObject() runtime.Object {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyInto(out *RemovePodsHavingTooManyRestartsArgs) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
if in.Namespaces != nil {
|
||||||
|
in, out := &in.Namespaces, &out.Namespaces
|
||||||
|
*out = new(api.Namespaces)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.LabelSelector != nil {
|
||||||
|
in, out := &in.LabelSelector, &out.LabelSelector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsHavingTooManyRestartsArgs.
|
||||||
|
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopy() *RemovePodsHavingTooManyRestartsArgs {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RemovePodsHavingTooManyRestartsArgs)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyInto(out *RemovePodsViolatingNodeAffinityArgs) {
|
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyInto(out *RemovePodsViolatingNodeAffinityArgs) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||||
"RemovePodsViolatingNodeAffinity": nil,
|
"RemovePodsViolatingNodeAffinity": nil,
|
||||||
"RemovePodsViolatingNodeTaints": nil,
|
"RemovePodsViolatingNodeTaints": nil,
|
||||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
"RemovePodsHavingTooManyRestarts": nil,
|
||||||
"PodLifeTime": strategies.PodLifeTime,
|
"PodLifeTime": strategies.PodLifeTime,
|
||||||
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
|
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
|
||||||
"RemoveFailedPods": nil,
|
"RemoveFailedPods": nil,
|
||||||
|
|||||||
@@ -1,114 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2018 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package strategies
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
|
||||||
)
|
|
||||||
|
|
||||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
|
||||||
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
|
||||||
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// At most one of include/exclude can be set
|
|
||||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
|
||||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
|
||||||
}
|
|
||||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
|
||||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
|
||||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
|
||||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
|
||||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
|
||||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
|
||||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
|
||||||
if strategy.Params.Namespaces != nil {
|
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
|
||||||
}
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
|
||||||
WithFilter(evictorFilter.Filter).
|
|
||||||
WithNamespaces(includedNamespaces).
|
|
||||||
WithoutNamespaces(excludedNamespaces).
|
|
||||||
WithLabelSelector(strategy.Params.LabelSelector).
|
|
||||||
BuildFilterFunc()
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error initializing pod filter function")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
loop:
|
|
||||||
for _, node := range nodes {
|
|
||||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
|
||||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, pod := range pods {
|
|
||||||
restarts, initRestarts := calcContainerRestarts(pod)
|
|
||||||
if strategy.Params.PodsHavingTooManyRestarts.IncludingInitContainers {
|
|
||||||
if restarts+initRestarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{})
|
|
||||||
if podEvictor.NodeLimitExceeded(node) {
|
|
||||||
continue loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calcContainerRestarts get container restarts and init container restarts.
|
|
||||||
func calcContainerRestarts(pod *v1.Pod) (int32, int32) {
|
|
||||||
var restarts, initRestarts int32
|
|
||||||
|
|
||||||
for _, cs := range pod.Status.ContainerStatuses {
|
|
||||||
restarts += cs.RestartCount
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range pod.Status.InitContainerStatuses {
|
|
||||||
initRestarts += cs.RestartCount
|
|
||||||
}
|
|
||||||
|
|
||||||
return restarts, initRestarts
|
|
||||||
}
|
|
||||||
@@ -26,6 +26,7 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
|
||||||
"sigs.k8s.io/descheduler/pkg/framework"
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||||
)
|
)
|
||||||
@@ -103,4 +104,29 @@ var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *
|
|||||||
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingnodeaffinity.PluginName)
|
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingnodeaffinity.PluginName)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"RemovePodsHavingTooManyRestarts": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
|
||||||
|
tooManyRestartsParams := params.PodsHavingTooManyRestarts
|
||||||
|
if tooManyRestartsParams == nil {
|
||||||
|
tooManyRestartsParams = &api.PodsHavingTooManyRestarts{}
|
||||||
|
}
|
||||||
|
args := &componentconfig.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
Namespaces: params.Namespaces,
|
||||||
|
LabelSelector: params.LabelSelector,
|
||||||
|
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
|
||||||
|
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
|
||||||
|
}
|
||||||
|
if err := validation.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
|
||||||
|
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pg, err := removepodshavingtoomanyrestarts.New(args, handle)
|
||||||
|
if err != nil {
|
||||||
|
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
|
||||||
|
if status != nil && status.Err != nil {
|
||||||
|
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,138 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package removepodshavingtoomanyrestarts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||||
|
|
||||||
|
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||||
|
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||||
|
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
|
type RemovePodsHavingTooManyRestarts struct {
|
||||||
|
handle framework.Handle
|
||||||
|
args *componentconfig.RemovePodsHavingTooManyRestartsArgs
|
||||||
|
podFilter podutil.FilterFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ framework.Plugin = &RemovePodsHavingTooManyRestarts{}
|
||||||
|
_ framework.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// New builds plugin from its arguments while passing a handle
|
||||||
|
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||||
|
tooManyRestartsArgs, ok := args.(*componentconfig.RemovePodsHavingTooManyRestartsArgs)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
|
if tooManyRestartsArgs.Namespaces != nil {
|
||||||
|
includedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Include...)
|
||||||
|
excludedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||||
|
}
|
||||||
|
|
||||||
|
podFilter, err := podutil.NewOptions().
|
||||||
|
WithFilter(handle.Evictor().Filter).
|
||||||
|
WithNamespaces(includedNamespaces).
|
||||||
|
WithoutNamespaces(excludedNamespaces).
|
||||||
|
WithLabelSelector(tooManyRestartsArgs.LabelSelector).
|
||||||
|
BuildFilterFunc()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||||
|
if err := validateCanEvict(pod, tooManyRestartsArgs); err != nil {
|
||||||
|
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return &RemovePodsHavingTooManyRestarts{
|
||||||
|
handle: handle,
|
||||||
|
args: tooManyRestartsArgs,
|
||||||
|
podFilter: podFilter,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name retrieves the plugin name
|
||||||
|
func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||||
|
return PluginName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deschedule extension point implementation for the plugin
|
||||||
|
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||||
|
for _, node := range nodes {
|
||||||
|
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||||
|
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||||
|
if err != nil {
|
||||||
|
// no pods evicted as error encountered retrieving evictable Pods
|
||||||
|
return &framework.Status{
|
||||||
|
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
totalPods := len(pods)
|
||||||
|
for i := 0; i < totalPods; i++ {
|
||||||
|
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
||||||
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateCanEvict looks at tooManyRestartsArgs to see if pod can be evicted given the args.
|
||||||
|
func validateCanEvict(pod *v1.Pod, tooManyRestartsArgs *componentconfig.RemovePodsHavingTooManyRestartsArgs) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
|
||||||
|
if tooManyRestartsArgs.IncludingInitContainers {
|
||||||
|
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
|
||||||
|
}
|
||||||
|
|
||||||
|
if restarts < tooManyRestartsArgs.PodRestartThreshold {
|
||||||
|
err = fmt.Errorf("number of container restarts (%v) not exceeding the threshold", restarts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// calcContainerRestartsFromStatuses get container restarts from container statuses.
|
||||||
|
func calcContainerRestartsFromStatuses(statuses []v1.ContainerStatus) int32 {
|
||||||
|
var restarts int32
|
||||||
|
for _, cs := range statuses {
|
||||||
|
restarts += cs.RestartCount
|
||||||
|
}
|
||||||
|
return restarts
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2018 The Kubernetes Authors.
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package strategies
|
package removepodshavingtoomanyrestarts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -29,9 +29,11 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
|
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -103,16 +105,13 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
|
|
||||||
pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
|
pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
|
||||||
|
|
||||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32, nodeFit bool) api.DeschedulerStrategy {
|
createRemovePodsHavingTooManyRestartsAgrs := func(
|
||||||
return api.DeschedulerStrategy{
|
podRestartThresholds int32,
|
||||||
Enabled: enabled,
|
includingInitContainers bool,
|
||||||
Params: &api.StrategyParameters{
|
) componentconfig.RemovePodsHavingTooManyRestartsArgs {
|
||||||
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
|
return componentconfig.RemovePodsHavingTooManyRestartsArgs{
|
||||||
PodRestartThreshold: restartThresholds,
|
PodRestartThreshold: podRestartThresholds,
|
||||||
IncludingInitContainers: includingInitContainers,
|
IncludingInitContainers: includingInitContainers,
|
||||||
},
|
|
||||||
NodeFit: nodeFit,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,100 +120,106 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
description string
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
strategy api.DeschedulerStrategy
|
args componentconfig.RemovePodsHavingTooManyRestartsArgs
|
||||||
expectedEvictedPodCount uint
|
expectedEvictedPodCount uint
|
||||||
maxPodsToEvictPerNode *uint
|
maxPodsToEvictPerNode *uint
|
||||||
maxNoOfPodsToEvictPerNamespace *uint
|
maxNoOfPodsToEvictPerNamespace *uint
|
||||||
|
nodeFit bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts under threshold, no pod evictions",
|
description: "All pods have total restarts under threshold, no pod evictions",
|
||||||
strategy: createStrategy(true, true, 10000, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Some pods have total restarts bigger than threshold",
|
description: "Some pods have total restarts bigger than threshold",
|
||||||
strategy: createStrategy(true, true, 1, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1*25, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||||
strategy: createStrategy(true, false, 1*25, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 5,
|
expectedEvictedPodCount: 5,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1*20, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||||
strategy: createStrategy(true, false, 1*20, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 6,
|
expectedEvictedPodCount: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||||
strategy: createStrategy(true, true, 5*25+1, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||||
strategy: createStrategy(true, false, 5*20+1, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 1,
|
expectedEvictedPodCount: 1,
|
||||||
|
nodeFit: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
maxPodsToEvictPerNode: &uint3,
|
maxPodsToEvictPerNode: &uint3,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, false),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1},
|
nodes: []*v1.Node{node1},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, true),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1, node2},
|
nodes: []*v1.Node{node1, node2},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
maxPodsToEvictPerNode: &uint3,
|
maxPodsToEvictPerNode: &uint3,
|
||||||
|
nodeFit: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, true),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1, node3},
|
nodes: []*v1.Node{node1, node3},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
maxPodsToEvictPerNode: &uint3,
|
maxPodsToEvictPerNode: &uint3,
|
||||||
|
nodeFit: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, true),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1, node4},
|
nodes: []*v1.Node{node1, node4},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
maxPodsToEvictPerNode: &uint3,
|
maxPodsToEvictPerNode: &uint3,
|
||||||
|
nodeFit: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||||
strategy: createStrategy(true, true, 1, true),
|
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||||
nodes: []*v1.Node{node1, node5},
|
nodes: []*v1.Node{node1, node5},
|
||||||
expectedEvictedPodCount: 3,
|
expectedEvictedPodCount: 3,
|
||||||
maxPodsToEvictPerNode: &uint3,
|
maxPodsToEvictPerNode: &uint3,
|
||||||
|
nodeFit: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,10 +269,23 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
|
evictions.WithNodeFit(tc.nodeFit),
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
plugin, err := New(
|
||||||
|
&tc.args,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: fakeClient,
|
||||||
|
PodEvictorImpl: podEvictor,
|
||||||
|
EvictorFilterImpl: evictorFilter,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
@@ -30,16 +30,19 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/framework"
|
||||||
|
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTooManyRestarts(t *testing.T) {
|
func TestTooManyRestarts(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
clientSet, _, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
clientSet, sharedInformerFactory, _, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||||
@@ -107,22 +110,29 @@ func TestTooManyRestarts(t *testing.T) {
|
|||||||
t.Fatal("Pod restart count not as expected")
|
t.Fatal("Pod restart count not as expected")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
createRemovePodsHavingTooManyRestartsAgrs := func(
|
||||||
|
podRestartThresholds int32,
|
||||||
|
includingInitContainers bool,
|
||||||
|
) componentconfig.RemovePodsHavingTooManyRestartsArgs {
|
||||||
|
return componentconfig.RemovePodsHavingTooManyRestartsArgs{
|
||||||
|
PodRestartThreshold: podRestartThresholds,
|
||||||
|
IncludingInitContainers: includingInitContainers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
podRestartThreshold int32
|
args componentconfig.RemovePodsHavingTooManyRestartsArgs
|
||||||
includingInitContainers bool
|
|
||||||
expectedEvictedPodCount uint
|
expectedEvictedPodCount uint
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "test-no-evictions",
|
name: "test-no-evictions",
|
||||||
podRestartThreshold: int32(10000),
|
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||||
includingInitContainers: true,
|
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "test-one-evictions",
|
name: "test-one-evictions",
|
||||||
podRestartThreshold: int32(4),
|
args: createRemovePodsHavingTooManyRestartsAgrs(4, true),
|
||||||
includingInitContainers: true,
|
|
||||||
expectedEvictedPodCount: 4,
|
expectedEvictedPodCount: 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -146,32 +156,32 @@ func TestTooManyRestarts(t *testing.T) {
|
|||||||
eventRecorder,
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
plugin, err := removepodshavingtoomanyrestarts.New(
|
||||||
|
&tc.args,
|
||||||
|
&frameworkfake.HandleImpl{
|
||||||
|
ClientsetImpl: clientSet,
|
||||||
|
PodEvictorImpl: podEvictor,
|
||||||
|
EvictorFilterImpl: evictorFilter,
|
||||||
|
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||||
|
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Run RemovePodsHavingTooManyRestarts strategy
|
// Run RemovePodsHavingTooManyRestarts strategy
|
||||||
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
|
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
|
||||||
strategies.RemovePodsHavingTooManyRestarts(
|
plugin.(framework.DeschedulePlugin).Deschedule(ctx, workerNodes)
|
||||||
ctx,
|
t.Logf("Finished RemoveFailedPods strategy for %s", tc.name)
|
||||||
clientSet,
|
|
||||||
deschedulerapi.DeschedulerStrategy{
|
|
||||||
Enabled: true,
|
|
||||||
Params: &deschedulerapi.StrategyParameters{
|
|
||||||
PodsHavingTooManyRestarts: &deschedulerapi.PodsHavingTooManyRestarts{
|
|
||||||
PodRestartThreshold: tc.podRestartThreshold,
|
|
||||||
IncludingInitContainers: tc.includingInitContainers,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
workerNodes,
|
|
||||||
podEvictor,
|
|
||||||
evictions.NewEvictorFilter(
|
|
||||||
nodes,
|
|
||||||
getPodsAssignedToNode,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
getPodsAssignedToNode,
|
|
||||||
)
|
|
||||||
|
|
||||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
|
|||||||
Reference in New Issue
Block a user