mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 14:41:10 +01:00
Merge pull request #525 from damemi/topology-spread-selector-fix
(TopologySpread) Evict pods with selectors that match multiple nodes
This commit is contained in:
@@ -289,11 +289,12 @@ func balanceDomains(
|
|||||||
// however we still account for it "being evicted" so the algorithm can complete
|
// however we still account for it "being evicted" so the algorithm can complete
|
||||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||||
if aboveToEvict[k].Spec.NodeSelector != nil ||
|
isRequiredDuringSchedulingIgnoredDuringExecution := aboveToEvict[k].Spec.Affinity != nil &&
|
||||||
(aboveToEvict[k].Spec.Affinity != nil &&
|
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
|
||||||
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
|
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
|
||||||
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
|
||||||
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) {
|
if (aboveToEvict[k].Spec.NodeSelector != nil || isRequiredDuringSchedulingIgnoredDuringExecution) &&
|
||||||
|
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0 {
|
||||||
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
|
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -281,6 +281,56 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
|||||||
strategy: api.DeschedulerStrategy{},
|
strategy: api.DeschedulerStrategy{},
|
||||||
namespaces: []string{"ns1"},
|
namespaces: []string{"ns1"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) {
|
||||||
|
n.Labels["zone"] = "zoneA"
|
||||||
|
n.Labels["region"] = "boston"
|
||||||
|
}),
|
||||||
|
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||||
|
n.Labels["zone"] = "zoneB"
|
||||||
|
n.Labels["region"] = "boston"
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
pods: createTestPods([]testPodList{
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
constraints: []v1.TopologySpreadConstraint{
|
||||||
|
{
|
||||||
|
MaxSkew: 1,
|
||||||
|
TopologyKey: "zone",
|
||||||
|
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||||
|
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeSelector: map[string]string{"region": "boston"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
nodeSelector: map[string]string{"region": "boston"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
nodeSelector: map[string]string{"region": "boston"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
count: 1,
|
||||||
|
node: "n1",
|
||||||
|
labels: map[string]string{"foo": "bar"},
|
||||||
|
nodeSelector: map[string]string{"region": "boston"},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expectedEvictedCount: 2,
|
||||||
|
strategy: api.DeschedulerStrategy{},
|
||||||
|
namespaces: []string{"ns1"},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
|
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
|
|||||||
Reference in New Issue
Block a user