mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
HighNodeUtilization: add NodeFit feature
This commit is contained in:
@@ -19,6 +19,7 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -36,6 +37,12 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
@@ -68,13 +75,13 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", targetThresholds[v1.ResourceCPU],
|
||||
"Mem", targetThresholds[v1.ResourceMemory],
|
||||
"Pods", targetThresholds[v1.ResourcePods],
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range targetThresholds {
|
||||
for name := range thresholds {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name]))
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +105,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
|
||||
@@ -19,6 +19,9 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -29,8 +32,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHighNodeUtilization(t *testing.T) {
|
||||
@@ -39,6 +40,9 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
nodeSelectorKey := "datacenter"
|
||||
nodeSelectorValue := "west"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
@@ -445,6 +449,78 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "Other node match pod node selector",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "Other node does not match pod node selector",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
@@ -514,6 +590,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||
|
||||
Reference in New Issue
Block a user