mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
Split pod evictor and evictor filter
This commit is contained in:
@@ -45,6 +45,7 @@ import (
|
|||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||||
@@ -87,7 +88,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
|||||||
return runFn()
|
return runFn()
|
||||||
}
|
}
|
||||||
|
|
||||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
||||||
|
|
||||||
func cachedClient(
|
func cachedClient(
|
||||||
realClient clientset.Interface,
|
realClient clientset.Interface,
|
||||||
@@ -283,18 +284,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods,
|
|
||||||
ignorePvcPods,
|
|
||||||
evictBarePods,
|
|
||||||
!rs.DisableMetrics,
|
!rs.DisableMetrics,
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, strategy := range deschedulerPolicy.Strategies {
|
for name, strategy := range deschedulerPolicy.Strategies {
|
||||||
if f, ok := strategyFuncs[name]; ok {
|
if f, ok := strategyFuncs[name]; ok {
|
||||||
if strategy.Enabled {
|
if strategy.Enabled {
|
||||||
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
|
nodeFit := false
|
||||||
|
if name != "PodLifeTime" {
|
||||||
|
if strategy.Params != nil {
|
||||||
|
nodeFit = strategy.Params.NodeFit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
|
||||||
|
f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||||
|
|||||||
@@ -51,17 +51,12 @@ type namespacePodEvictCount map[string]uint
|
|||||||
type PodEvictor struct {
|
type PodEvictor struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc
|
|
||||||
policyGroupVersion string
|
policyGroupVersion string
|
||||||
dryRun bool
|
dryRun bool
|
||||||
maxPodsToEvictPerNode *uint
|
maxPodsToEvictPerNode *uint
|
||||||
maxPodsToEvictPerNamespace *uint
|
maxPodsToEvictPerNamespace *uint
|
||||||
nodepodCount nodePodEvictedCount
|
nodepodCount nodePodEvictedCount
|
||||||
namespacePodCount namespacePodEvictCount
|
namespacePodCount namespacePodEvictCount
|
||||||
evictFailedBarePods bool
|
|
||||||
evictLocalStoragePods bool
|
|
||||||
evictSystemCriticalPods bool
|
|
||||||
ignorePvcPods bool
|
|
||||||
metricsEnabled bool
|
metricsEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,11 +67,6 @@ func NewPodEvictor(
|
|||||||
maxPodsToEvictPerNode *uint,
|
maxPodsToEvictPerNode *uint,
|
||||||
maxPodsToEvictPerNamespace *uint,
|
maxPodsToEvictPerNamespace *uint,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
|
||||||
evictLocalStoragePods bool,
|
|
||||||
evictSystemCriticalPods bool,
|
|
||||||
ignorePvcPods bool,
|
|
||||||
evictFailedBarePods bool,
|
|
||||||
metricsEnabled bool,
|
metricsEnabled bool,
|
||||||
) *PodEvictor {
|
) *PodEvictor {
|
||||||
var nodePodCount = make(nodePodEvictedCount)
|
var nodePodCount = make(nodePodEvictedCount)
|
||||||
@@ -89,17 +79,12 @@ func NewPodEvictor(
|
|||||||
return &PodEvictor{
|
return &PodEvictor{
|
||||||
client: client,
|
client: client,
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
nodeIndexer: nodeIndexer,
|
|
||||||
policyGroupVersion: policyGroupVersion,
|
policyGroupVersion: policyGroupVersion,
|
||||||
dryRun: dryRun,
|
dryRun: dryRun,
|
||||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||||
nodepodCount: nodePodCount,
|
nodepodCount: nodePodCount,
|
||||||
namespacePodCount: namespacePodCount,
|
namespacePodCount: namespacePodCount,
|
||||||
evictLocalStoragePods: evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods: evictSystemCriticalPods,
|
|
||||||
evictFailedBarePods: evictFailedBarePods,
|
|
||||||
ignorePvcPods: ignorePvcPods,
|
|
||||||
metricsEnabled: metricsEnabled,
|
metricsEnabled: metricsEnabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -230,21 +215,26 @@ func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) {
|
|||||||
|
|
||||||
type constraint func(pod *v1.Pod) error
|
type constraint func(pod *v1.Pod) error
|
||||||
|
|
||||||
type evictable struct {
|
type EvictorFilter struct {
|
||||||
constraints []constraint
|
constraints []constraint
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
|
func NewEvictorFilter(
|
||||||
// The method accepts a list of options which allow to extend constraints
|
nodes []*v1.Node,
|
||||||
// which decides when a pod is considered evictable.
|
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
||||||
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
evictLocalStoragePods bool,
|
||||||
|
evictSystemCriticalPods bool,
|
||||||
|
ignorePvcPods bool,
|
||||||
|
evictFailedBarePods bool,
|
||||||
|
opts ...func(opts *Options),
|
||||||
|
) *EvictorFilter {
|
||||||
options := &Options{}
|
options := &Options{}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(options)
|
opt(options)
|
||||||
}
|
}
|
||||||
|
|
||||||
ev := &evictable{}
|
ev := &EvictorFilter{}
|
||||||
if pe.evictFailedBarePods {
|
if evictFailedBarePods {
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||||
@@ -263,7 +253,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if !pe.evictSystemCriticalPods {
|
if !evictSystemCriticalPods {
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
// Moved from IsEvictable function to allow for disabling
|
// Moved from IsEvictable function to allow for disabling
|
||||||
if utils.IsCriticalPriorityPod(pod) {
|
if utils.IsCriticalPriorityPod(pod) {
|
||||||
@@ -281,7 +271,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !pe.evictLocalStoragePods {
|
if !evictLocalStoragePods {
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
if utils.IsPodWithLocalStorage(pod) {
|
if utils.IsPodWithLocalStorage(pod) {
|
||||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||||
@@ -289,7 +279,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if pe.ignorePvcPods {
|
if ignorePvcPods {
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
if utils.IsPodWithPVC(pod) {
|
if utils.IsPodWithPVC(pod) {
|
||||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||||
@@ -299,7 +289,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|||||||
}
|
}
|
||||||
if options.nodeFit {
|
if options.nodeFit {
|
||||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||||
if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
|
if !nodeutil.PodFitsAnyOtherNode(nodeIndexer, pod, nodes) {
|
||||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -318,7 +308,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsEvictable decides when a pod is evictable
|
// IsEvictable decides when a pod is evictable
|
||||||
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
func (ef *EvictorFilter) Filter(pod *v1.Pod) bool {
|
||||||
checkErrs := []error{}
|
checkErrs := []error{}
|
||||||
|
|
||||||
ownerRefList := podutil.OwnerRef(pod)
|
ownerRefList := podutil.OwnerRef(pod)
|
||||||
@@ -338,7 +328,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
|||||||
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
|
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range ev.constraints {
|
for _, c := range ef.constraints {
|
||||||
if err := c(pod); err != nil {
|
if err := c(pod); err != nil {
|
||||||
checkErrs = append(checkErrs, err)
|
checkErrs = append(checkErrs, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
@@ -639,19 +638,6 @@ func TestIsEvictable(t *testing.T) {
|
|||||||
sharedInformerFactory.Start(ctx.Done())
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
podEvictor := &PodEvictor{
|
|
||||||
client: fakeClient,
|
|
||||||
nodes: nodes,
|
|
||||||
nodeIndexer: getPodsAssignedToNode,
|
|
||||||
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
|
|
||||||
dryRun: false,
|
|
||||||
maxPodsToEvictPerNode: nil,
|
|
||||||
maxPodsToEvictPerNamespace: nil,
|
|
||||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
|
||||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
|
||||||
evictFailedBarePods: test.evictFailedBarePods,
|
|
||||||
}
|
|
||||||
|
|
||||||
var opts []func(opts *Options)
|
var opts []func(opts *Options)
|
||||||
if test.priorityThreshold != nil {
|
if test.priorityThreshold != nil {
|
||||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||||
@@ -659,9 +645,18 @@ func TestIsEvictable(t *testing.T) {
|
|||||||
if test.nodeFit {
|
if test.nodeFit {
|
||||||
opts = append(opts, WithNodeFit(true))
|
opts = append(opts, WithNodeFit(true))
|
||||||
}
|
}
|
||||||
evictable := podEvictor.Evictable(opts...)
|
|
||||||
|
|
||||||
result := evictable.IsEvictable(test.pods[0])
|
evictorFilter := NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
test.evictLocalStoragePods,
|
||||||
|
test.evictSystemCriticalPods,
|
||||||
|
false,
|
||||||
|
test.evictFailedBarePods,
|
||||||
|
opts...,
|
||||||
|
)
|
||||||
|
|
||||||
|
result := evictorFilter.Filter(test.pods[0])
|
||||||
if result != test.result {
|
if result != test.result {
|
||||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,17 +66,13 @@ func RemoveDuplicatePods(
|
|||||||
strategy api.DeschedulerStrategy,
|
strategy api.DeschedulerStrategy,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
|
evictorFilter *evictions.EvictorFilter,
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||||
) {
|
) {
|
||||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||||
@@ -84,20 +80,13 @@ func RemoveDuplicatePods(
|
|||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||||
ownerKeyOccurence := make(map[podOwner]int32)
|
ownerKeyOccurence := make(map[podOwner]int32)
|
||||||
nodeCount := 0
|
nodeCount := 0
|
||||||
nodeMap := make(map[string]*v1.Node)
|
nodeMap := make(map[string]*v1.Node)
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithFilter(evictable.IsEvictable).
|
WithFilter(evictorFilter.Filter).
|
||||||
WithNamespaces(includedNamespaces).
|
WithNamespaces(includedNamespaces).
|
||||||
WithoutNamespaces(excludedNamespaces).
|
WithoutNamespaces(excludedNamespaces).
|
||||||
BuildFilterFunc()
|
BuildFilterFunc()
|
||||||
|
|||||||
@@ -319,16 +319,26 @@ func TestFindDuplicatePods(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
testCase.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
nodeFit := false
|
||||||
|
if testCase.strategy.Params != nil {
|
||||||
|
nodeFit = testCase.strategy.Params.NodeFit
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
testCase.nodes,
|
testCase.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithNodeFit(nodeFit),
|
||||||
)
|
)
|
||||||
|
|
||||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
@@ -748,15 +758,19 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
testCase.nodes,
|
testCase.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
testCase.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ func RemoveFailedPods(
|
|||||||
strategy api.DeschedulerStrategy,
|
strategy api.DeschedulerStrategy,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
|
evictorFilter *evictions.EvictorFilter,
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||||
) {
|
) {
|
||||||
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
|
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
|
||||||
@@ -41,19 +42,13 @@ func RemoveFailedPods(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(
|
|
||||||
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
|
|
||||||
evictions.WithNodeFit(strategyParams.NodeFit),
|
|
||||||
evictions.WithLabelSelector(strategyParams.LabelSelector),
|
|
||||||
)
|
|
||||||
|
|
||||||
var labelSelector *metav1.LabelSelector
|
var labelSelector *metav1.LabelSelector
|
||||||
if strategy.Params != nil {
|
if strategy.Params != nil {
|
||||||
labelSelector = strategy.Params.LabelSelector
|
labelSelector = strategy.Params.LabelSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithFilter(evictable.IsEvictable).
|
WithFilter(evictorFilter.Filter).
|
||||||
WithNamespaces(strategyParams.IncludedNamespaces).
|
WithNamespaces(strategyParams.IncludedNamespaces).
|
||||||
WithoutNamespaces(strategyParams.ExcludedNamespaces).
|
WithoutNamespaces(strategyParams.ExcludedNamespaces).
|
||||||
WithLabelSelector(labelSelector).
|
WithLabelSelector(labelSelector).
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "default empty strategy, 0 failures, 0 evictions",
|
description: "default empty strategy, 0 failures, 0 evictions",
|
||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: false}},
|
||||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||||
expectedEvictedPodCount: 0,
|
expectedEvictedPodCount: 0,
|
||||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||||
@@ -274,16 +274,21 @@ func TestRemoveFailedPods(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
|
||||||
)
|
)
|
||||||
|
|
||||||
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
||||||
@@ -48,16 +47,11 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
if strategy.Params.Namespaces != nil {
|
if strategy.Params.Namespaces != nil {
|
||||||
@@ -65,13 +59,6 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
|||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithNamespaces(includedNamespaces).
|
WithNamespaces(includedNamespaces).
|
||||||
WithoutNamespaces(excludedNamespaces).
|
WithoutNamespaces(excludedNamespaces).
|
||||||
@@ -94,7 +81,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
|||||||
node.Name,
|
node.Name,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||||
return evictable.IsEvictable(pod) &&
|
return evictorFilter.Filter(pod) &&
|
||||||
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
|
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
|
||||||
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
|
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -221,16 +221,26 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
tc.maxPodsToEvictPerNode,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.maxNoOfPodsToEvictPerNamespace,
|
tc.maxNoOfPodsToEvictPerNamespace,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
nodeFit := false
|
||||||
|
if tc.strategy.Params != nil {
|
||||||
|
nodeFit = tc.strategy.Params.NodeFit
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithNodeFit(nodeFit),
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
||||||
return
|
return
|
||||||
@@ -68,21 +68,8 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
|||||||
labelSelector = strategy.Params.LabelSelector
|
labelSelector = strategy.Params.LabelSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithFilter(evictable.IsEvictable).
|
WithFilter(evictorFilter.Filter).
|
||||||
WithNamespaces(includedNamespaces).
|
WithNamespaces(includedNamespaces).
|
||||||
WithoutNamespaces(excludedNamespaces).
|
WithoutNamespaces(excludedNamespaces).
|
||||||
WithLabelSelector(labelSelector).
|
WithLabelSelector(labelSelector).
|
||||||
|
|||||||
@@ -338,11 +338,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
tc.maxPodsToEvictPerNode,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.maxNoOfPodsToEvictPerNamespace,
|
tc.maxNoOfPodsToEvictPerNamespace,
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
tc.evictLocalStoragePods,
|
|
||||||
tc.evictSystemCriticalPods,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -354,7 +349,17 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
tc.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
tc.evictLocalStoragePods,
|
||||||
|
tc.evictSystemCriticalPods,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
evictions.WithNodeFit(tc.nodeFit),
|
||||||
|
)
|
||||||
|
|
||||||
|
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||||
|
|||||||
@@ -29,28 +29,16 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
||||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||||
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||||
@@ -108,8 +96,6 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||||
for name := range totalAvailableUsage {
|
for name := range totalAvailableUsage {
|
||||||
@@ -129,7 +115,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
|||||||
sourceNodes,
|
sourceNodes,
|
||||||
highNodes,
|
highNodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
evictable.IsEvictable,
|
evictorFilter.Filter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
"HighNodeUtilization",
|
"HighNodeUtilization",
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|||||||
@@ -507,11 +507,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
testCase.nodes,
|
testCase.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -524,7 +519,18 @@ func TestHighNodeUtilization(t *testing.T) {
|
|||||||
NodeFit: true,
|
NodeFit: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
testCase.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||||
|
)
|
||||||
|
|
||||||
|
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
|
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if testCase.expectedPodsEvicted != podsEvicted {
|
if testCase.expectedPodsEvicted != podsEvicted {
|
||||||
@@ -713,15 +719,19 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
&item.evictionsExpected,
|
&item.evictionsExpected,
|
||||||
nil,
|
nil,
|
||||||
item.nodes,
|
item.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
item.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
|
|
||||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||||
|
|||||||
@@ -29,27 +29,17 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||||
// to calculate nodes' utilization and not the actual resource usage.
|
// to calculate nodes' utilization and not the actual resource usage.
|
||||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
||||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||||
@@ -152,8 +142,6 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||||
@@ -176,7 +164,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
|||||||
sourceNodes,
|
sourceNodes,
|
||||||
lowNodes,
|
lowNodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
evictable.IsEvictable,
|
evictorFilter.Filter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
"LowNodeUtilization",
|
"LowNodeUtilization",
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|||||||
@@ -772,11 +772,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
test.nodes,
|
test.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -791,7 +786,18 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
NodeFit: true,
|
NodeFit: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
test.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||||
|
)
|
||||||
|
|
||||||
|
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
|
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if test.expectedPodsEvicted != podsEvicted {
|
if test.expectedPodsEvicted != podsEvicted {
|
||||||
@@ -1087,15 +1093,19 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
|||||||
&item.evictionsExpected,
|
&item.evictionsExpected,
|
||||||
nil,
|
nil,
|
||||||
item.nodes,
|
item.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
item.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
|
|
||||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
||||||
return
|
return
|
||||||
@@ -65,19 +65,6 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
|||||||
labelSelector = strategy.Params.LabelSelector
|
labelSelector = strategy.Params.LabelSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithNamespaces(includedNamespaces).
|
WithNamespaces(includedNamespaces).
|
||||||
WithoutNamespaces(excludedNamespaces).
|
WithoutNamespaces(excludedNamespaces).
|
||||||
@@ -98,7 +85,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
|||||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictable.IsEvictable(pods[i]) {
|
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
|
||||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Error evicting pod")
|
klog.ErrorS(err, "Error evicting pod")
|
||||||
|
|||||||
@@ -218,11 +218,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
test.maxPodsToEvictPerNode,
|
test.maxPodsToEvictPerNode,
|
||||||
test.maxNoOfPodsToEvictPerNamespace,
|
test.maxNoOfPodsToEvictPerNamespace,
|
||||||
test.nodes,
|
test.nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
strategy := api.DeschedulerStrategy{
|
strategy := api.DeschedulerStrategy{
|
||||||
@@ -231,7 +226,17 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
test.nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
evictions.WithNodeFit(test.nodeFit),
|
||||||
|
)
|
||||||
|
|
||||||
|
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != test.expectedEvictedPodCount {
|
if podsEvicted != test.expectedEvictedPodCount {
|
||||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||||
@@ -57,32 +56,24 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
if strategy.Params.Namespaces != nil {
|
if strategy.Params.Namespaces != nil {
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
filter := evictorFilter.Filter
|
||||||
|
|
||||||
filter := evictable.IsEvictable
|
|
||||||
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
||||||
filter = func(pod *v1.Pod) bool {
|
filter = func(pod *v1.Pod) bool {
|
||||||
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
||||||
if string(pod.Status.Phase) == phase {
|
if string(pod.Status.Phase) == phase {
|
||||||
return evictable.IsEvictable(pod)
|
return evictorFilter.Filter(pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -339,16 +339,20 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
tc.maxPodsToEvictPerNode,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.maxPodsToEvictPerNamespace,
|
tc.maxPodsToEvictPerNamespace,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
tc.ignorePvcPods,
|
tc.ignorePvcPods,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != tc.expectedEvictedPodCount {
|
if podsEvicted != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ import (
|
|||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
||||||
@@ -50,33 +49,20 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
|
|||||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var includedNamespaces, excludedNamespaces sets.String
|
var includedNamespaces, excludedNamespaces sets.String
|
||||||
if strategy.Params.Namespaces != nil {
|
if strategy.Params.Namespaces != nil {
|
||||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeFit := false
|
|
||||||
if strategy.Params != nil {
|
|
||||||
nodeFit = strategy.Params.NodeFit
|
|
||||||
}
|
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().
|
podFilter, err := podutil.NewOptions().
|
||||||
WithFilter(evictable.IsEvictable).
|
WithFilter(evictorFilter.Filter).
|
||||||
WithNamespaces(includedNamespaces).
|
WithNamespaces(includedNamespaces).
|
||||||
WithoutNamespaces(excludedNamespaces).
|
WithoutNamespaces(excludedNamespaces).
|
||||||
WithLabelSelector(strategy.Params.LabelSelector).
|
WithLabelSelector(strategy.Params.LabelSelector).
|
||||||
|
|||||||
@@ -249,16 +249,21 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
tc.maxPodsToEvictPerNode,
|
tc.maxPodsToEvictPerNode,
|
||||||
tc.maxNoOfPodsToEvictPerNamespace,
|
tc.maxNoOfPodsToEvictPerNamespace,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -46,12 +46,14 @@ type topology struct {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint: gocyclo
|
||||||
func RemovePodsViolatingTopologySpreadConstraint(
|
func RemovePodsViolatingTopologySpreadConstraint(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
strategy api.DeschedulerStrategy,
|
strategy api.DeschedulerStrategy,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
podEvictor *evictions.PodEvictor,
|
podEvictor *evictions.PodEvictor,
|
||||||
|
evictorFilter *evictions.EvictorFilter,
|
||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||||
) {
|
) {
|
||||||
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
|
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
|
||||||
@@ -60,11 +62,13 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
evictable := podEvictor.Evictable(
|
isEvictable := evictorFilter.Filter
|
||||||
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
|
|
||||||
evictions.WithNodeFit(strategyParams.NodeFit),
|
if strategyParams.LabelSelector != nil && !strategyParams.LabelSelector.Empty() {
|
||||||
evictions.WithLabelSelector(strategyParams.LabelSelector),
|
isEvictable = podutil.WrapFilterFuncs(isEvictable, func(pod *v1.Pod) bool {
|
||||||
)
|
return strategyParams.LabelSelector.Matches(labels.Set(pod.Labels))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
@@ -168,12 +172,12 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
|||||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodes)
|
balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictorFilter.Filter, nodes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for pod := range podsForEviction {
|
for pod := range podsForEviction {
|
||||||
if !evictable.IsEvictable(pod) {
|
if !isEvictable(pod) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil {
|
||||||
@@ -234,6 +238,7 @@ func balanceDomains(
|
|||||||
|
|
||||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||||
|
|
||||||
// i is the index for belowOrEqualAvg
|
// i is the index for belowOrEqualAvg
|
||||||
// j is the index for aboveAvg
|
// j is the index for aboveAvg
|
||||||
i := 0
|
i := 0
|
||||||
|
|||||||
@@ -970,15 +970,26 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
tc.nodes,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
nodeFit := false
|
||||||
|
if tc.strategy.Params != nil {
|
||||||
|
nodeFit = tc.strategy.Params.NodeFit
|
||||||
|
}
|
||||||
|
|
||||||
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
tc.nodes,
|
tc.nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithNodeFit(nodeFit),
|
||||||
)
|
)
|
||||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
|
||||||
|
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != tc.expectedEvictedCount {
|
if podsEvicted != tc.expectedEvictedCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||||
|
|||||||
@@ -145,11 +145,6 @@ func TestRemoveDuplicates(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -165,6 +160,14 @@ func TestRemoveDuplicates(t *testing.T) {
|
|||||||
},
|
},
|
||||||
workerNodes,
|
workerNodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
|
evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
),
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -96,6 +97,14 @@ func TestFailedPods(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes,
|
nodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
|
evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
),
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
t.Logf("Finished RemoveFailedPods strategy for %s", name)
|
t.Logf("Finished RemoveFailedPods strategy for %s", name)
|
||||||
|
|||||||
@@ -177,10 +177,7 @@ func runPodLifetimeStrategy(
|
|||||||
}
|
}
|
||||||
|
|
||||||
maxPodLifeTimeSeconds := uint(1)
|
maxPodLifeTimeSeconds := uint(1)
|
||||||
strategies.PodLifeTime(
|
strategy := deschedulerapi.DeschedulerStrategy{
|
||||||
ctx,
|
|
||||||
clientset,
|
|
||||||
deschedulerapi.DeschedulerStrategy{
|
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: &deschedulerapi.StrategyParameters{
|
Params: &deschedulerapi.StrategyParameters{
|
||||||
PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds},
|
PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds},
|
||||||
@@ -189,7 +186,17 @@ func runPodLifetimeStrategy(
|
|||||||
ThresholdPriorityClassName: priorityClass,
|
ThresholdPriorityClassName: priorityClass,
|
||||||
LabelSelector: labelSelector,
|
LabelSelector: labelSelector,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
|
|
||||||
|
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, clientset, strategy.Params)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get threshold priority from strategy's params")
|
||||||
|
}
|
||||||
|
|
||||||
|
strategies.PodLifeTime(
|
||||||
|
ctx,
|
||||||
|
clientset,
|
||||||
|
strategy,
|
||||||
nodes,
|
nodes,
|
||||||
evictions.NewPodEvictor(
|
evictions.NewPodEvictor(
|
||||||
clientset,
|
clientset,
|
||||||
@@ -197,13 +204,17 @@ func runPodLifetimeStrategy(
|
|||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
maxPodsToEvictPerNamespace,
|
maxPodsToEvictPerNamespace,
|
||||||
|
nodes,
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
evictions.NewEvictorFilter(
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
false,
|
false,
|
||||||
evictCritical,
|
evictCritical,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
evictions.WithPriorityThreshold(thresholdPriority),
|
||||||
),
|
),
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
@@ -326,7 +337,16 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
// Run LowNodeUtilization strategy
|
// Run LowNodeUtilization strategy
|
||||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||||
|
|
||||||
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
|
evictorFilter := evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
podFilter, err := podutil.NewOptions().WithFilter(evictorFilter.Filter).BuildFilterFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error initializing pod filter function, %v", err)
|
t.Errorf("Error initializing pod filter function, %v", err)
|
||||||
}
|
}
|
||||||
@@ -356,12 +376,13 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
workerNodes,
|
workerNodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
|
evictorFilter,
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
|
|
||||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||||
|
|
||||||
podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
|
podFilter, err = podutil.NewOptions().WithFilter(evictorFilter.Filter).BuildFilterFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error initializing pod filter function, %v", err)
|
t.Errorf("Error initializing pod filter function, %v", err)
|
||||||
}
|
}
|
||||||
@@ -1410,11 +1431,6 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,13 +138,9 @@ func TestTooManyRestarts(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nodes,
|
nodes,
|
||||||
getPodsAssignedToNode,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Run RemovePodsHavingTooManyRestarts strategy
|
// Run RemovePodsHavingTooManyRestarts strategy
|
||||||
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
|
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
|
||||||
strategies.RemovePodsHavingTooManyRestarts(
|
strategies.RemovePodsHavingTooManyRestarts(
|
||||||
@@ -161,6 +157,14 @@ func TestTooManyRestarts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
workerNodes,
|
workerNodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
|
evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
),
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -92,6 +93,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes,
|
nodes,
|
||||||
podEvictor,
|
podEvictor,
|
||||||
|
evictions.NewEvictorFilter(
|
||||||
|
nodes,
|
||||||
|
getPodsAssignedToNode,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
),
|
||||||
getPodsAssignedToNode,
|
getPodsAssignedToNode,
|
||||||
)
|
)
|
||||||
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||||
|
|||||||
Reference in New Issue
Block a user